gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import hashlib
import htpc
import imghdr
import logging
from cherrypy.lib.static import serve_file
from urllib2 import Request, urlopen
import urllib
import time
from functools import wraps
from operator import itemgetter
import itertools
from mako import exceptions
from mako.lookup import TemplateLookup
import requests
import workerpool
try:
import Image
PIL = True
except ImportError:
try:
from PIL import Image
PIL = True
except ImportError:
PIL = False
logger = logging.getLogger('htpc.helpers')
def timeit_func(func):
@wraps(func)
def inner(*args, **kwargs):
start = time.time()
res = func(*args)
logger.debug('%s took %s' % (func.__name__, time.time() - start))
return res
return inner
comp_table = {
'==': lambda x, y: x == y,
'!=': lambda x, y: x != y,
'>': lambda x, y: x > y,
'<': lambda x, y: x < y,
'>=': lambda x, y: x >= y,
'<=': lambda x, y: x <= y,
}
def get_image(url, height=None, width=None, opacity=100, mode=None, auth=None, headers=None, missing_image=None):
''' Load image form cache if possible, else download. Resize if needed '''
opacity = float(opacity)
logger = logging.getLogger('htpc.helpers')
# Create image directory if it doesnt exist
imgdir = os.path.join(htpc.DATADIR, 'images/')
if not os.path.exists(imgdir):
logger.debug('Creating image directory at ' + imgdir)
os.makedirs(imgdir)
# Create a hash of the path to use as filename
imghash = hashlib.md5(url).hexdigest()
# Set filename and path
image = os.path.join(imgdir, imghash)
# If there is no local copy of the original
# download it
if not os.path.isfile(image):
logger.debug('No local image found for %s. Downloading..' % url)
image = download_image(url, image, auth, headers)
# Check if resize is needed
if (height and width) or (opacity < 100) or mode:
if PIL:
# Set a filename for resized file
resized = '%s_w%s_h%s_o_%s_%s' % (image, width, height, opacity, mode)
# If there is no local resized copy
if not os.path.isfile(resized):
# try to resize, if we cant return original image
image = resize_image(image, height, width, opacity, mode, resized)
if image:
return serve_file(path=image, content_type='image/jpeg')
# If the resized image is already cached
if os.path.isfile(resized):
image = resized
else:
logger.error("Can't resize when PIL is missing on system!")
if (opacity < 100):
image = os.path.join(htpc.RUNDIR, 'interfaces/default/img/fff_20.png')
# Load file from disk
if image is not None:
imagetype = imghdr.what(os.path.abspath(image))
if imagetype is None:
imagetype = 'image/jpeg'
return serve_file(path=image, content_type=imagetype)
if missing_image:
# full fp to missing image
return serve_file(path=missing_image, content_type='image/jpeg')
class CacheImgDownload(workerpool.Job):
"Job for downloading a given URL."
def __init__(self, item, headers):
self.url = item['url']
self.fp = item['fp']
self.resize = item['resize']
self.headers = headers
self.item = item
def run(self):
try:
if self.resize:
for i in self.resize:
if len(i) > 3:
r = requests.get(i[2], headers=self.headers)
with open(i[3], 'wb') as local_file:
local_file.write(r.content)
# Download original image
r = requests.get(self.url, headers=self.headers)
with open(self.fp, 'wb') as local_file:
local_file.write(r.content)
except Exception as e:
self.logger.debug('Failed to cache image %s' % e)
def cache_resize_image(item):
#imglist = [{'hash': '123', 'url': 'xxx', 'fp': 'filepath', 'resize': [(w, h), (w, h)]}]
fp = item['fp']
imagetype = imghdr.what(fp)
if imagetype:
# Open orginal image
im = Image.open(fp)
if 'resize' in item:
for r in item['resize']:
im = im.resize(r, Image.ANTIALIAS)
resized = '%s_w%s_h%s_o_%s_%s' % (fp, r[0], r[1], None, None)
if imagetype.lower() == 'jpeg' or 'jpg':
im.save(resized, 'JPEG', quality=95)
else:
im.save(resized, imagetype)
@timeit_func
def cachedprime(urls, headers={}, resize=False, plex_resize=False):
'''
{'hash': '1dad1d1', fp': 'filepath', 'url': 'imgurl', 'resize': [[w, h, url, dest]}
'''
logger.debug('Got %s images' % len(urls))
urls = remove_dict_dupe_from_list(urls, 'hash')
logger.debug('Removed all dupicate images got %s left' % len(urls))
imgdir = os.path.join(htpc.DATADIR, 'images/')
made_dir = False
if not os.path.exists(imgdir):
logger.debug('Creating image directory at %s' % imgdir)
os.makedirs(imgdir)
made_dir = True
resize_list = []
logger.debug('This can take a while..')
# If there is no local copy of the original
if made_dir is True:
logger.debug('There was no image directory, so everything is missing')
resize_list = urls
else:
logger.debug('Checking for missing images')
# cba with resizes for plex
for item in urls:
if not os.path.isfile(item['fp']):
logger.debug('%s was missing, download it %s' % (item['fp'], item['url']))
resize_list.append(item)
if made_dir is False and resize_list == 0:
logger.debug('No missing images :)')
return
pool = workerpool.WorkerPool(size=20)
for i in resize_list:
j = CacheImgDownload(i, headers)
pool.put(j)
pool.shutdown()
pool.wait()
# use pil to resize images
if resize_list and plex_resize is False and resize is True:
from multiprocessing import Pool, cpu_count
ppool = Pool(cpu_count())
try:
ppool.map_async(cache_resize_image, (b for b in resize_list), 5)
ppool.close()
ppool.join()
except Exception as e:
logger.debug('Failed to resize image %s' % e)
else:
# Already downloaded transcoded images
return
def download_image(url, dest, auth=None, headers=None):
''' Download image and save to disk '''
logger = logging.getLogger('htpc.helpers')
logger.debug('Downloading image from %s to %s' % (url, dest))
try:
request = Request(url)
if auth:
request.add_header('Authorization', 'Basic %s' % auth)
if headers:
for key, value in headers.iteritems():
request.add_header(key, value)
# Sonarrs image api returns 304, but they cant know if a user has cleared it
# So make sure we get data every time.
request.add_header('Cache-Control', 'private, max-age=0, no-cache, must-revalidate')
request.add_header('Pragma', 'no-cache')
resp = urlopen(request).read()
if resp:
with open(dest, 'wb') as local_file:
local_file.write(urlopen(request).read())
else:
return
return dest
except Exception as e:
logger.error('Failed to download %s to %s %s' % (url, dest, e))
def resize_image(img, height, width, opacity, mode, dest):
''' Resize image, set opacity and save to disk '''
try:
imagetype = imghdr.what(img)
im = Image.open(img)
except IOError:
logger.error('Failed to open image %s dest %s' % (img, dest))
return
# Only resize if needed
if height is not None or width is not None:
size = int(width), int(height)
im = im.resize(size, Image.ANTIALIAS)
# Apply overlay if opacity is set
if (opacity < 100):
enhance = opacity / 100
# Create white overlay image
overlay = Image.new('RGB', size, '#FFFFFF')
# apply overlay to resized image
im = Image.blend(overlay, im, enhance)
# See http://effbot.org/imagingbook/concepts.htm
# for the different modes
if mode:
im = im.convert(str(mode))
if imagetype.lower() == 'jpeg' or 'jpg':
im.save(dest, 'JPEG', quality=95)
else:
im.save(dest, imagetype)
return dest
def fix_basepath(s):
''' Removes whitespace and adds / on each end '''
if s:
s = s.strip()
s = s.rstrip('/')
s = s.lstrip('/')
if not s.startswith('/'):
s = '/' + s
if not s.endswith('/'):
s += '/'
return s
def striphttp(s):
# hate regex and this was faster
if s:
s = s.strip(' ')
s = s.replace('https://', '')
s = s.replace('http://', '')
return s
else:
return ''
def remove_dict_dupe_from_list(l, key):
getvals = itemgetter(key)
l.sort(key=getvals)
result = []
for k, g in itertools.groupby(l, getvals):
result.append(g.next())
return result
def create_https_certificates(ssl_cert, ssl_key):
'''
Create self-signed HTTPS certificares and store in paths 'ssl_cert' and 'ssl_key'
'''
try:
from OpenSSL import crypto
from certgen import createKeyPair, createCertRequest, createCertificate, TYPE_RSA, serial
except Exception, e:
logger.error(e)
logger.error('You need pyopenssl and OpenSSL to make a cert')
return False
# Create the CA Certificate
cakey = createKeyPair(TYPE_RSA, 2048)
careq = createCertRequest(cakey, CN='Certificate Authority')
cacert = createCertificate(careq, (careq, cakey), serial, (0, 60 * 60 * 24 * 365 * 10)) # ten years
cname = 'Htpc-Manager'
pkey = createKeyPair(TYPE_RSA, 2048)
req = createCertRequest(pkey, CN=cname)
cert = createCertificate(req, (cacert, cakey), serial, (0, 60 * 60 * 24 * 365 * 10)) # ten years
# Save the key and certificate to disk
try:
open(ssl_key, 'w').write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
open(ssl_cert, 'w').write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
except Exception as e:
logger.error('Error creating SSL key and certificate %s' % e)
return False
return True
def joinArgs(args):
''' stolen for plexapi '''
if not args:
return ''
arglist = []
for key in sorted(args, key=lambda x: x.lower()):
value = str(args[key])
arglist.append('%s=%s' % (key, urllib.quote(value)))
return '?%s' % '&'.join(arglist)
def sizeof(num):
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return '%3.2f %s' % (num, x)
num /= 1024.0
return '%3.2f %s' % (num, 'TB')
def serve_template(name, **kwargs):
try:
loc = os.path.join(htpc.RUNDIR, 'interfaces/',
htpc.settings.get('app_template', 'default'))
template = TemplateLookup(directories=[os.path.join(loc, 'html/')])
return template.get_template(name).render(**kwargs)
except Exception as e:
logger.error('%s' % exceptions.text_error_template())
if htpc.DEV or htpc.LOGLEVEL == 'debug':
return exceptions.html_error_template().render()
|
|
# Copyright (c) 2017, Henrique Miranda
# All rights reserved.
#
# This file is part of the yambopy project
#
from yambopy import *
from cmath import polar
from yambopy.units import *
from itertools import product
class YamboExcitonDB(YamboSaveDB):
""" Read the excitonic states database from yambo
"""
def __init__(self,lattice,filename='ndb.BS_diago_Q01',path='.'):
self.lattice = lattice
self.filename = filename
self.path = path
self.get_database()
def get_database(self):
""" Load the diago database to memory
"""
try:
filename = "%s/%s"%(self.path,self.filename)
db = Dataset(filename)
except:
print "failed to read database %s"%filename
exit(1)
if 'BS_left_Residuals' in db.variables.keys():
#residuals
rel,iml = db['BS_left_Residuals'][:].T
rer,imr = db['BS_right_Residuals'][:].T
self.l_residual = rel+iml*I
self.r_residual = rer+imr*I
if 'BS_Residuals' in db.variables.keys():
#residuals
rel,iml,rer,imr = db['BS_Residuals'][:].T
self.l_residual = rel+iml*I
self.r_residual = rer+imr*I
#energies
eig = db['BS_Energies'][:]*ha2ev
self.eigenvalues = eig[:,0]+eig[:,1]*I
#eigenvectors
eiv = db['BS_EIGENSTATES'][:]
eiv = eiv[:,:,0] + eiv[:,:,1]*I
self.eigenvectors = eiv
#indexes
self.table = db['BS_TABLE'][:].T.astype(int)
#transitions dictionary
#bs table k, v, c
self.unique_vbands = np.unique(self.table[:,1]-1)
self.unique_cbands = np.unique(self.table[:,2]-1)
#initialize empty dictionary
transitions_v_to_c = dict([ ((v,c),[]) for v,c in product(self.unique_vbands,self.unique_cbands) ])
#add elements to dictionary
for eh,kvc in enumerate(self.table-1):
k,v,c = kvc
transitions_v_to_c[(v,c)].append((k,eh))
#make an array
for t,v in transitions_v_to_c.items():
transitions_v_to_c[t] = np.array(v)
self.transitions_v_to_c = transitions_v_to_c
self.nexcitons = len(self.eigenvalues)
self.ntransitions = len(self.table)
db.close()
def exciton_bs(self,energies,path,excitons=(0,),debug=False):
"""
Calculate exciton band-structure
Arguments:
energies -> can be an instance of YamboSaveDB or YamboQBDB
path -> path in reduced coordinates in which to plot the band structure
exciton -> exciton index to plot
"""
if isinstance(excitons, int):
excitons = (excitons,)
#get full kmesh
kpoints = self.lattice.red_kpoints
path = np.array(path)
kpoints_rep, kpoints_idx_rep = replicate_red_kmesh(kpoints,repx=range(-1,2),repy=range(-1,2),repz=range(-1,2))
band_indexes = get_path(kpoints_rep,path)
band_kpoints = kpoints_rep[band_indexes]
band_indexes = kpoints_idx_rep[band_indexes]
if debug:
for i,k in zip(band_indexes,band_kpoints):
x,y,z = k
plt.text(x,y,i)
plt.scatter(kpoints_rep[:,0],kpoints_rep[:,1])
plt.plot(path[:,0],path[:,1],c='r')
plt.scatter(band_kpoints[:,0],band_kpoints[:,1])
plt.show()
exit()
#get eigenvalues along the path
if isinstance(energies,YamboSaveDB):
#exapnd eigenvalues to the bull brillouin zone
energies = energies.eigenvalues[self.lattice.kpoints_indexes]
elif isinstance(energies,YamboQPDB):
#expand the quasiparticle energies to the bull brillouin zone
energies = energies.eigenvalues_qp[self.lattice.kpoints_indexes]
else:
raise ValueError("argument 'variables' must be an object of YamboSaveDB or YamboQPDB")
#get weight of state in each band
weights = np.zeros(energies.shape)
for exciton in excitons:
#get the eigenstate
eivec = self.eigenvectors[exciton-1]
for t,transitions in self.transitions_v_to_c.items():
c,v = t
iks, ehs = transitions.T
weights[iks,c] += abs2(eivec[ehs])
weights[iks,v] += abs2(eivec[ehs])
energies = energies[band_indexes]
weights = weights[band_indexes]
return np.array(band_kpoints), energies, weights
def plot_exciton_bs(self,ax,energies,path,excitons,size=500,space='bands',
args_scatter={'c':'b'},args_plot={'c':'r'}):
"""
Plot the excitons
Arguments:
ax -> axis extance of matplotlib to add the plot to
"""
bands_kpoints, energies, weights = self.exciton_bs(energies, path, excitons)
weights /= np.max(weights)
#calculate distances
bands_distances = [0]
distance = 0
for nk in range(1,len(bands_kpoints)):
distance += np.linalg.norm(bands_kpoints[nk-1]-bands_kpoints[nk])
bands_distances.append(distance)
for v,c in product(self.unique_vbands,self.unique_cbands):
if space=='bands':
ax.plot(bands_distances, energies[:,c], **args_plot)
ax.plot(bands_distances, energies[:,v], **args_plot)
ax.scatter(bands_distances, energies[:,c], s=weights[:,c]*size, **args_scatter)
ax.scatter(bands_distances, energies[:,v], s=weights[:,v]*size, **args_scatter)
else:
ax.plot(bands_distances, energies[:,c]-energies[:,v], c='b')
ax.scatter(bands_distances, energies[:,c]-energies[:,v], s=weights[:,c]*size, c='r')
ax.set_title("exciton %d-%d"%(excitons[0],excitons[-1]))
def get_amplitudes_phases(self,excitons=(0,)):
""" get the excitonic amplitudes and phases
"""
if isinstance(excitons, int):
excitons = (excitons,)
car_kpoints = self.lattice.car_kpoints
nkpoints = len(car_kpoints)
amplitudes = np.zeros([nkpoints])
phases = np.zeros([nkpoints],dtype=np.complex64)
for exciton in excitons:
#the the eigenstate
eivec = self.eigenvectors[exciton]
total = 0
for eh,kvc in enumerate(self.table):
ikbz, v, c = kvc-1
Acvk = eivec[eh]
phases[ikbz] += Acvk
amplitudes[ikbz] += np.abs(Acvk)
return car_kpoints, amplitudes, np.angle(phases)
def chi(self,dipoles,dir=0,emin=0,emax=8,estep=0.01,broad=0.1,nexcitons='all'):
"""
Calculate the dielectric response function using excitonic states
"""
if nexcitons == 'all': nexcitons = self.nexcitons
#energy range
w = np.arange(emin,emax,estep,dtype=np.float32)
nenergies = len(w)
print "energy range: %lf -> +%lf -> %lf "%(emin,estep,emax)
print "energy steps: %lf"%nenergies
#initialize the susceptibility intensity
chi = np.zeros([len(w)],dtype=np.complex64)
#calculate exciton-light coupling
print "calculate exciton-light coupling"
EL1,EL2 = self.project1(dipoles.dipoles[:,dir],nexcitons)
#get dipole
#dip1 = self.l_residual
#dip2 = self.r_residual
#iterate over the excitonic states
for s in xrange(nexcitons):
#get exciton energy
es = self.eigenvalues[s]
#calculate the green's functions
G1 = 1/( w - es - broad*I)
G2 = 1/( - w - es - broad*I)
r = EL1[s]*EL2[s]
chi += r*G1 + r*G2
return w,chi
def __str__(self):
s = "number of excitons: %d\n"%self.nexcitons
s += "number of excitons: %d\n"%self.ntransitions
return s
|
|
# Copyright 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.http.server import request_handler, respond_with_json_bytes
from synapse.http.servlet import parse_integer
from synapse.api.errors import SynapseError, Codes
from twisted.web.resource import Resource
from twisted.web.server import NOT_DONE_YET
from twisted.internet import defer
from io import BytesIO
import json
import logging
logger = logging.getLogger(__name__)
class RemoteKey(Resource):
"""HTTP resource for retreiving the TLS certificate and NACL signature
verification keys for a collection of servers. Checks that the reported
X.509 TLS certificate matches the one used in the HTTPS connection. Checks
that the NACL signature for the remote server is valid. Returns a dict of
JSON signed by both the remote server and by this server.
Supports individual GET APIs and a bulk query POST API.
Requsts:
GET /_matrix/key/v2/query/remote.server.example.com HTTP/1.1
GET /_matrix/key/v2/query/remote.server.example.com/a.key.id HTTP/1.1
POST /_matrix/v2/query HTTP/1.1
Content-Type: application/json
{
"server_keys": {
"remote.server.example.com": {
"a.key.id": {
"minimum_valid_until_ts": 1234567890123
}
}
}
}
Response:
HTTP/1.1 200 OK
Content-Type: application/json
{
"server_keys": [
{
"server_name": "remote.server.example.com"
"valid_until_ts": # posix timestamp
"verify_keys": {
"a.key.id": { # The identifier for a key.
key: "" # base64 encoded verification key.
}
}
"old_verify_keys": {
"an.old.key.id": { # The identifier for an old key.
key: "", # base64 encoded key
"expired_ts": 0, # when the key stop being used.
}
}
"tls_fingerprints": [
{ "sha256": # fingerprint }
]
"signatures": {
"remote.server.example.com": {...}
"this.server.example.com": {...}
}
}
]
}
"""
isLeaf = True
def __init__(self, hs):
self.keyring = hs.get_keyring()
self.store = hs.get_datastore()
self.version_string = hs.version_string
self.clock = hs.get_clock()
def render_GET(self, request):
self.async_render_GET(request)
return NOT_DONE_YET
@request_handler
@defer.inlineCallbacks
def async_render_GET(self, request):
if len(request.postpath) == 1:
server, = request.postpath
query = {server: {}}
elif len(request.postpath) == 2:
server, key_id = request.postpath
minimum_valid_until_ts = parse_integer(
request, "minimum_valid_until_ts"
)
arguments = {}
if minimum_valid_until_ts is not None:
arguments["minimum_valid_until_ts"] = minimum_valid_until_ts
query = {server: {key_id: arguments}}
else:
raise SynapseError(
404, "Not found %r" % request.postpath, Codes.NOT_FOUND
)
yield self.query_keys(request, query, query_remote_on_cache_miss=True)
def render_POST(self, request):
self.async_render_POST(request)
return NOT_DONE_YET
@request_handler
@defer.inlineCallbacks
def async_render_POST(self, request):
try:
content = json.loads(request.content.read())
if type(content) != dict:
raise ValueError()
except ValueError:
raise SynapseError(
400, "Content must be JSON object.", errcode=Codes.NOT_JSON
)
query = content["server_keys"]
yield self.query_keys(request, query, query_remote_on_cache_miss=True)
@defer.inlineCallbacks
def query_keys(self, request, query, query_remote_on_cache_miss=False):
logger.info("Handling query for keys %r", query)
store_queries = []
for server_name, key_ids in query.items():
if not key_ids:
key_ids = (None,)
for key_id in key_ids:
store_queries.append((server_name, key_id, None))
cached = yield self.store.get_server_keys_json(store_queries)
json_results = set()
time_now_ms = self.clock.time_msec()
cache_misses = dict()
for (server_name, key_id, from_server), results in cached.items():
results = [
(result["ts_added_ms"], result) for result in results
]
if not results and key_id is not None:
cache_misses.setdefault(server_name, set()).add(key_id)
continue
if key_id is not None:
ts_added_ms, most_recent_result = max(results)
ts_valid_until_ms = most_recent_result["ts_valid_until_ms"]
req_key = query.get(server_name, {}).get(key_id, {})
req_valid_until = req_key.get("minimum_valid_until_ts")
miss = False
if req_valid_until is not None:
if ts_valid_until_ms < req_valid_until:
logger.debug(
"Cached response for %r/%r is older than requested"
": valid_until (%r) < minimum_valid_until (%r)",
server_name, key_id,
ts_valid_until_ms, req_valid_until
)
miss = True
else:
logger.debug(
"Cached response for %r/%r is newer than requested"
": valid_until (%r) >= minimum_valid_until (%r)",
server_name, key_id,
ts_valid_until_ms, req_valid_until
)
elif (ts_added_ms + ts_valid_until_ms) / 2 < time_now_ms:
logger.debug(
"Cached response for %r/%r is too old"
": (added (%r) + valid_until (%r)) / 2 < now (%r)",
server_name, key_id,
ts_added_ms, ts_valid_until_ms, time_now_ms
)
# We more than half way through the lifetime of the
# response. We should fetch a fresh copy.
miss = True
else:
logger.debug(
"Cached response for %r/%r is still valid"
": (added (%r) + valid_until (%r)) / 2 < now (%r)",
server_name, key_id,
ts_added_ms, ts_valid_until_ms, time_now_ms
)
if miss:
cache_misses.setdefault(server_name, set()).add(key_id)
json_results.add(bytes(most_recent_result["key_json"]))
else:
for ts_added, result in results:
json_results.add(bytes(result["key_json"]))
if cache_misses and query_remote_on_cache_miss:
for server_name, key_ids in cache_misses.items():
try:
yield self.keyring.get_server_verify_key_v2_direct(
server_name, key_ids
)
except:
logger.exception("Failed to get key for %r", server_name)
pass
yield self.query_keys(
request, query, query_remote_on_cache_miss=False
)
else:
result_io = BytesIO()
result_io.write(b"{\"server_keys\":")
sep = b"["
for json_bytes in json_results:
result_io.write(sep)
result_io.write(json_bytes)
sep = b","
if sep == b"[":
result_io.write(sep)
result_io.write(b"]}")
respond_with_json_bytes(
request, 200, result_io.getvalue(),
version_string=self.version_string
)
|
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Unit test for checking fields in mapping and unmapping."""
import unittest
from ddt import data, ddt, unpack
from ggrc import utils
class BaseTestMappingRules(unittest.TestCase):
"""Base TestCase for mapping and unmapping check."""
rules = {}
def assertRules(self, model, *rules): # pylint: disable=C0103
"""Assert to check rules for current model in mapping rules."""
self.assertIn(model, self.rules)
self.assertEqual(set(rules), self.rules[model])
@ddt
class TestMappingRules(BaseTestMappingRules):
"""Test case for mapping rules."""
rules = utils.get_mapping_rules()
all_rules = ['AccessGroup', 'Assessment', 'Audit', 'Clause', 'Contract',
'Control', 'CycleTaskGroupObjectTask', 'DataAsset',
'Facility', 'Issue', 'Market', 'Objective', 'OrgGroup',
'Person', 'Policy', 'Process', 'Product', 'Program',
'Project', 'Regulation', 'Risk', 'Section', 'Standard',
'System', 'Threat', 'Vendor', ]
assessment_rules = ['AccessGroup', 'Audit', 'Clause', 'Contract',
'Control', 'CycleTaskGroupObjectTask', 'DataAsset',
'Facility', 'Issue', 'Market', 'Objective', 'OrgGroup',
'Person', 'Policy', 'Process', 'Product', 'Program',
'Project', 'Regulation', 'Risk', 'Section', 'Standard',
'System', 'Threat', 'Vendor', ]
audit_rules = ['AccessGroup', 'Assessment', 'Clause', 'Contract', 'Control',
'DataAsset', 'Facility', 'Issue', 'Market', 'Objective',
'OrgGroup', 'Person', 'Policy', 'Process', 'Product',
'Program', 'Project', 'Regulation', 'Section', 'Standard',
'System', 'Vendor', ]
accessgroup_rules = ['Assessment', 'Audit', 'Clause', 'Contract',
'Control', 'CycleTaskGroupObjectTask', 'DataAsset',
'Facility', 'Issue', 'Market', 'Objective',
'OrgGroup', 'Person', 'Policy', 'Process', 'Product',
'Program', 'Project', 'Regulation', 'Risk',
'Section', 'Standard', 'System', 'Threat', 'Vendor', ]
contract_rules = ['AccessGroup', 'Assessment', 'Audit', 'Clause',
'Control', 'CycleTaskGroupObjectTask', 'DataAsset',
'Facility', 'Issue', 'Market', 'Objective', 'OrgGroup',
'Person', 'Process', 'Product', 'Program', 'Project',
'Risk', 'Section', 'System', 'Threat', 'Vendor', ]
cycletaskgroupobjecttask_rules = ['AccessGroup', 'Assessment',
'Clause', 'Contract', 'Control',
'DataAsset', 'Facility', 'Issue', 'Market',
'Objective', 'OrgGroup', 'Person',
'Policy', 'Process', 'Product', 'Program',
'Project', 'Regulation', 'Risk',
'Section', 'Standard', 'System',
'Threat', 'Vendor', ]
clause_rules = ['AccessGroup', 'Assessment', 'Audit', 'Contract',
'Control', 'CycleTaskGroupObjectTask', 'DataAsset',
'Facility', 'Issue', 'Market', 'Objective', 'OrgGroup',
'Person', 'Policy', 'Process', 'Product', 'Program',
'Project', 'Regulation', 'Risk', 'Section', 'Standard',
'System', 'Threat', 'Vendor', ]
person_rules = ['AccessGroup', 'Assessment', 'Audit', 'Clause', 'Contract',
'Control', 'CycleTaskGroupObjectTask', 'DataAsset',
'Facility', 'Issue', 'Market', 'Objective', 'OrgGroup',
'Policy', 'Process', 'Product', 'Program',
'Project', 'Regulation', 'Risk', 'Section', 'Standard',
'System', 'Threat', 'Vendor', ]
policy_rules = ['AccessGroup', 'Assessment', 'Audit', 'Clause',
'Control', 'CycleTaskGroupObjectTask', 'DataAsset',
'Facility', 'Issue', 'Market', 'Objective', 'OrgGroup',
'Person', 'Process', 'Product', 'Program',
'Project', 'Risk', 'Section', 'System', 'Threat', 'Vendor', ]
program_rules = ['AccessGroup', 'Assessment', 'Audit', 'Clause', 'Contract',
'Control', 'CycleTaskGroupObjectTask', 'DataAsset',
'Facility', 'Issue', 'Market', 'Objective', 'OrgGroup',
'Person', 'Policy', 'Process', 'Product',
'Project', 'Regulation', 'Risk', 'Section', 'Standard',
'System', 'Threat', 'Vendor', ]
regulation_rules = ['AccessGroup', 'Assessment', 'Audit', 'Clause',
'Control', 'CycleTaskGroupObjectTask', 'DataAsset',
'Facility', 'Issue', 'Market', 'Objective', 'OrgGroup',
'Person', 'Process', 'Product', 'Program',
'Project', 'Risk', 'Section',
'System', 'Threat', 'Vendor', ]
risk_rules = ['AccessGroup', 'Assessment', 'Clause', 'Contract',
'Control', 'CycleTaskGroupObjectTask', 'DataAsset',
'Facility', 'Issue', 'Market', 'Objective', 'OrgGroup',
'Person', 'Policy', 'Process', 'Product', 'Program',
'Project', 'Regulation', 'Section', 'Standard',
'System', 'Threat', 'Vendor', ]
standard_rules = ['AccessGroup', 'Audit', 'Assessment', 'Clause',
'Control', 'CycleTaskGroupObjectTask', 'DataAsset',
'Facility', 'Issue', 'Market', 'Objective', 'OrgGroup',
'Person', 'Process', 'Product', 'Program', 'Project',
'Risk', 'Section', 'System', 'Threat', 'Vendor', ]
threat_rules = ['AccessGroup', 'Assessment', 'Clause', 'Contract',
'Control', 'CycleTaskGroupObjectTask', 'DataAsset',
'Facility', 'Issue', 'Market', 'Objective', 'OrgGroup',
'Person', 'Policy', 'Process', 'Product', 'Program',
'Project', 'Regulation', 'Risk', 'Section', 'Standard',
'System', 'Vendor', ]
@data(("AccessGroup", accessgroup_rules),
("Assessment", assessment_rules),
("Audit", audit_rules),
("Clause", clause_rules),
("Contract", contract_rules),
("Control", all_rules),
("CycleTaskGroupObjectTask", cycletaskgroupobjecttask_rules),
("DataAsset", all_rules),
("Facility", all_rules),
("Issue", all_rules),
("Market", all_rules),
("Objective", all_rules),
("OrgGroup", all_rules),
("Person", person_rules),
("Policy", policy_rules),
("Process", all_rules),
("Product", all_rules),
("Program", program_rules),
("Project", all_rules),
("Regulation", regulation_rules),
("Risk", risk_rules),
("Section", all_rules),
("Standard", standard_rules),
("System", all_rules),
("Threat", threat_rules),
("Vendor", all_rules))
@unpack
def test_field(self, field, rules):
self.assertRules(field, *rules)
@ddt
class TestUnMappingRules(BaseTestMappingRules):
"""Test case for unmapping rules."""
rules = utils.get_unmapping_rules()
all_rules = ['AccessGroup', 'Assessment', 'Audit', 'Clause', 'Contract',
'Control', 'CycleTaskGroupObjectTask', 'DataAsset',
'Facility', 'Issue', 'Market', 'Objective', 'OrgGroup',
'Person', 'Policy', 'Process', 'Product', 'Program',
'Project', 'Regulation', 'Risk', 'Section', 'Standard',
'System', 'Threat', 'Vendor', ]
assessment_rules = ['AccessGroup', 'Audit', 'Clause', 'Contract',
'Control', 'CycleTaskGroupObjectTask', 'DataAsset',
'Facility', 'Issue', 'Market', 'Objective', 'OrgGroup',
'Person', 'Policy', 'Process', 'Product', 'Program',
'Project', 'Regulation', 'Risk', 'Section', 'Standard',
'System', 'Threat', 'Vendor', ]
audit_rules = ['AccessGroup', 'Assessment', 'Clause', 'Contract', 'Control',
'DataAsset', 'Facility', 'Issue', 'Market', 'Objective',
'OrgGroup', 'Person', 'Policy', 'Process', 'Product',
'Program', 'Project', 'Regulation', 'Section', 'Standard',
'System', 'Vendor', ]
accessgroup_rules = ['Assessment', 'Audit', 'Clause', 'Contract',
'Control', 'CycleTaskGroupObjectTask', 'DataAsset',
'Facility', 'Issue', 'Market', 'Objective',
'OrgGroup', 'Person', 'Policy', 'Process', 'Product',
'Program', 'Project', 'Regulation', 'Risk',
'Section', 'Standard', 'System', 'Threat', 'Vendor', ]
contract_rules = ['AccessGroup', 'Assessment', 'Audit', 'Clause',
'Control', 'CycleTaskGroupObjectTask', 'DataAsset',
'Facility', 'Issue', 'Market', 'Objective', 'OrgGroup',
'Person', 'Process', 'Product', 'Program', 'Project',
'Risk', 'Section', 'System', 'Threat', 'Vendor', ]
cycletaskgroupobjecttask_rules = ['AccessGroup', 'Assessment',
'Clause', 'Contract', 'Control',
'DataAsset', 'Facility', 'Issue', 'Market',
'Objective', 'OrgGroup', 'Person',
'Policy', 'Process', 'Product', 'Program',
'Project', 'Regulation', 'Risk',
'Section', 'Standard', 'System',
'Threat', 'Vendor', ]
clause_rules = ['AccessGroup', 'Assessment', 'Audit', 'Contract',
'Control', 'CycleTaskGroupObjectTask', 'DataAsset',
'Facility', 'Issue', 'Market', 'Objective', 'OrgGroup',
'Person', 'Policy', 'Process', 'Product', 'Program',
'Project', 'Regulation', 'Risk', 'Section', 'Standard',
'System', 'Threat', 'Vendor', ]
person_rules = ['AccessGroup', 'Assessment', 'Audit', 'Clause', 'Contract',
'Control', 'CycleTaskGroupObjectTask', 'DataAsset',
'Facility', 'Issue', 'Market', 'Objective', 'OrgGroup',
'Policy', 'Process', 'Product', 'Program',
'Project', 'Regulation', 'Risk', 'Section', 'Standard',
'System', 'Threat', 'Vendor', ]
policy_rules = ['AccessGroup', 'Assessment', 'Audit', 'Clause',
'Control', 'CycleTaskGroupObjectTask', 'DataAsset',
'Facility', 'Issue', 'Market', 'Objective', 'OrgGroup',
'Person', 'Process', 'Product', 'Program',
'Project', 'Risk', 'Section', 'System', 'Threat', 'Vendor', ]
program_rules = ['AccessGroup', 'Assessment', 'Audit', 'Clause', 'Contract',
'Control', 'CycleTaskGroupObjectTask', 'DataAsset',
'Facility', 'Issue', 'Market', 'Objective', 'OrgGroup',
'Person', 'Policy', 'Process', 'Product',
'Project', 'Regulation', 'Risk', 'Section', 'Standard',
'System', 'Threat', 'Vendor', ]
regulation_rules = ['AccessGroup', 'Assessment', 'Audit', 'Clause',
'Control', 'CycleTaskGroupObjectTask', 'DataAsset',
'Facility', 'Issue', 'Market', 'Objective', 'OrgGroup',
'Person', 'Process', 'Product', 'Program',
'Project', 'Risk', 'Section',
'System', 'Threat', 'Vendor', ]
risk_rules = ['AccessGroup', 'Assessment', 'Clause', 'Contract',
'Control', 'CycleTaskGroupObjectTask', 'DataAsset',
'Facility', 'Issue', 'Market', 'Objective', 'OrgGroup',
'Person', 'Policy', 'Process', 'Product', 'Program',
'Project', 'Regulation', 'Section', 'Standard',
'System', 'Threat', 'Vendor', ]
standard_rules = ['AccessGroup', 'Audit', 'Assessment', 'Clause',
'Control', 'CycleTaskGroupObjectTask', 'DataAsset',
'Facility', 'Issue', 'Market', 'Objective', 'OrgGroup',
'Person', 'Process', 'Product', 'Program', 'Project',
'Risk', 'Section', 'System', 'Threat', 'Vendor', ]
threat_rules = ['AccessGroup', 'Assessment', 'Clause', 'Contract',
'Control', 'CycleTaskGroupObjectTask', 'DataAsset',
'Facility', 'Issue', 'Market', 'Objective', 'OrgGroup',
'Person', 'Policy', 'Process', 'Product', 'Program',
'Project', 'Regulation', 'Risk', 'Section', 'Standard',
'System', 'Vendor', ]
@data(("AccessGroup", accessgroup_rules),
("Assessment", assessment_rules),
("Audit", audit_rules),
("Clause", clause_rules),
("Contract", contract_rules),
("Control", all_rules),
("CycleTaskGroupObjectTask", cycletaskgroupobjecttask_rules),
("DataAsset", all_rules),
("Facility", all_rules),
("Issue", all_rules),
("Market", all_rules),
("Objective", all_rules),
("OrgGroup", all_rules),
("Person", person_rules),
("Policy", policy_rules),
("Process", all_rules),
("Product", all_rules),
("Program", program_rules),
("Project", all_rules),
("Regulation", regulation_rules),
("Risk", risk_rules),
("Section", all_rules),
("Standard", standard_rules),
("System", all_rules),
("Threat", threat_rules),
("Vendor", all_rules))
@unpack
def test_field(self, field, rules):
self.assertRules(field, *rules)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Audio analysis
'''
import os
import pandas as pd
import numpy as np
import soundfile as sf
import librosa
from .feature import Feature, FeatureCollection
from .timing import TimingList
class Audio(object):
"""
The base Audio object: wraps the ouput from librosa, and provides access to features
Attributes
----------
sample_rate: number
sample rate
raw_samples: numpy array
raw samples from the audio
analysis_samples: numpy array
downsampled samples for analysis
num_channels: integer
number of channels of the audio
duration: float
duration, in seconds
features: dict
collection of named feature objects
"""
def __init__(
self,
file_path=None,
raw_samples=None,
convert_to_mono=False,
sample_rate=44100,
analysis_sample_rate=22050,
):
"""
Audio constructor.
Opens a file path, loads the audio with librosa, and prepares the features
Parameters
----------
file_path: string
path to the audio file to load
raw_samples: np.array
samples to use for audio output
convert_to_mono: boolean
(optional) converts the file to mono on loading
sample_rate: number > 0 [scalar]
(optional) sample rate to pass to librosa.
Returns
------
An Audio object
"""
if file_path:
y, sr = librosa.load(file_path, mono=convert_to_mono, sr=sample_rate)
elif raw_samples is not None:
# This assumes that we're passing in raw_samples
# directly from another Audio's raw_samples.
y = raw_samples
sr = sample_rate
self.file_path = file_path
self.sample_rate = float(sr)
self.analysis_sample_rate = float(analysis_sample_rate)
self.num_channels = y.ndim
self.duration = librosa.get_duration(y=y, sr=sr)
self.analysis_samples = librosa.resample(
librosa.to_mono(y), sr, self.analysis_sample_rate, res_type='kaiser_best'
)
self.raw_samples = np.atleast_2d(y)
self.zero_indexes = self._create_zero_indexes()
self.features = self._create_features()
self.timings = self._create_timings()
def __repr__(self):
file_name = os.path.split(self.file_path)[-1]
args = file_name, self.duration
return '<Audio, file: {0:s}, duration: {1:.2f}>'.format(*args)
def output(self, filename, format=None):
"""
Write the samples out to the given filename.
Parameters
----------
filename : str
The path to write the audio on disk.
This can be any format supported by `pysoundfile`, including
`WAV`, `FLAC`, or `OGG` (but not `mp3`).
format : str
If provided, explicitly set the output encoding format.
See `soundfile.available_formats`.
"""
sf.write(filename, self.raw_samples.T, int(self.sample_rate), format=format)
def _create_zero_indexes(self):
"""
Create zero crossing indexes.
We use these in synthesis, and it is easier to make them here.
"""
zero_indexes = []
for channel_index in range(self.num_channels):
channel = self.raw_samples[channel_index]
zero_crossings = librosa.zero_crossings(channel)
zero_index = np.nonzero(zero_crossings)[0]
zero_indexes.append(zero_index)
return zero_indexes
def _create_timings(self):
"""
Create timings in a timings dict.
"""
timings = {}
timings['track'] = TimingList('track', [(0, self.duration)], self)
timings['beats'] = TimingList('beats', self._get_beats(), self)
timings['segments'] = TimingList('segments', self._get_segments(), self)
return timings
def _get_beats(self):
"""
Gets beats using librosa's beat tracker.
"""
_, beat_frames = librosa.beat.beat_track(
y=self.analysis_samples, sr=self.analysis_sample_rate, trim=False
)
# pad beat times to full duration
f_max = librosa.time_to_frames(self.duration, sr=self.analysis_sample_rate)
beat_frames = librosa.util.fix_frames(beat_frames, x_min=0, x_max=f_max)
# convert frames to times
beat_times = librosa.frames_to_time(beat_frames, sr=self.analysis_sample_rate)
# make the list of (start, duration) tuples that TimingList expects
starts_durs = [(s, t - s) for (s, t) in zip(beat_times, beat_times[1:])]
return starts_durs
def _get_segments(self):
"""
Gets Echo Nest style segments using librosa's onset detection and backtracking.
"""
onset_frames = librosa.onset.onset_detect(
y=self.analysis_samples, sr=self.analysis_sample_rate, backtrack=True
)
segment_times = librosa.frames_to_time(
onset_frames, sr=self.analysis_sample_rate
)
# make the list of (start, duration) tuples that TimingList expects
starts_durs = [(s, t - s) for (s, t) in zip(segment_times, segment_times[1:])]
return starts_durs
def _create_features(self):
"""
Creates the FeatureCollection, and loads each feature.
Parameters
---------
Returns
-----
FeatureCollection
FeatureCollection with each Amen.Feature object named correctly.
Note that _get_chroma returns a FeatureCollection of chroma features.
"""
features = FeatureCollection()
features['centroid'] = self._get_centroid()
features['amplitude'] = self._get_amplitude()
features['timbre'] = self._get_timbre()
features['chroma'] = self._get_chroma()
features['tempo'] = self._get_tempo()
return features
def _get_centroid(self):
"""
Gets spectral centroid data from librosa, and returns it as a Feature
Parameters
---------
Returns
-----
Feature
"""
centroids = librosa.feature.spectral_centroid(self.analysis_samples)
data = self._convert_to_dataframe(centroids, ['spectral_centroid'])
feature = Feature(data)
return feature
def _get_amplitude(self):
"""
Gets amplitude data from librosa, and returns it as a Feature
Parameters
---------
Returns
-----
Feature
"""
amplitudes = librosa.feature.rms(self.analysis_samples)
data = self._convert_to_dataframe(amplitudes, ['amplitude'])
feature = Feature(data)
return feature
def _get_timbre(self):
"""
Gets timbre (MFCC) data, taking the first 20.
Note that the keys to the Feature are "mffc_<index>",
to avoid having a dict-like object with numeric keys.
Parameters
---------
Returns
-----
Feature
"""
mfccs = librosa.feature.mfcc(
y=self.analysis_samples, sr=self.analysis_sample_rate, n_mfcc=12
)
feature = FeatureCollection()
for index, mfcc in enumerate(mfccs):
data = self._convert_to_dataframe(mfcc, ['timbre'])
key = 'mfcc_%s' % (index)
feature[key] = Feature(data)
return feature
def _get_chroma(self):
"""
Gets chroma data from librosa, and returns it as a FeatureCollection,
with 12 features.
Parameters
---------
Returns
-----
FeatureCollection
"""
feature = FeatureCollection()
pitch_names = ['c', 'c#', 'd', 'eb', 'e', 'f', 'f#', 'g', 'ab', 'a', 'bb', 'b']
chroma_cq = librosa.feature.chroma_cqt(self.analysis_samples)
for chroma, pitch in zip(chroma_cq, pitch_names):
data = self._convert_to_dataframe(chroma, [pitch])
feature[pitch] = Feature(data)
# Enharmonic aliases
feature['db'] = feature['c#']
feature['d#'] = feature['eb']
feature['gb'] = feature['f#']
feature['g#'] = feature['ab']
feature['a#'] = feature['bb']
return feature
def _get_tempo(self):
"""
Gets tempo data from librosa, and returns it as a feature collection.
Note that the tempo feature uses median aggregation, as opposed to the
default mean.
Parameters
---------
Returns
-----
FeatureCollection
"""
onset_env = librosa.onset.onset_strength(
self.analysis_samples, sr=self.analysis_sample_rate
)
tempo = librosa.beat.tempo(
onset_envelope=onset_env, sr=self.analysis_sample_rate, aggregate=None
)
data = self._convert_to_dataframe(tempo, ['tempo'])
feature = Feature(data, aggregate=np.median)
return feature
@classmethod
def _convert_to_dataframe(cls, feature_data, columns):
"""
Take raw librosa feature data, convert to a pandas dataframe.
Parameters
---------
feature_data: numpy array
a N by T array, where N is the number of features, and T is the number of time dimensions
columns: list [strings]
a list of column names of length N, the same as the N dimension of feature_data
Returns
-----
pandas.DataFrame
"""
feature_data = feature_data.transpose()
frame_numbers = np.arange(len(feature_data))
indexes = librosa.frames_to_time(frame_numbers)
indexes = pd.to_timedelta(indexes, unit='s')
data = pd.DataFrame(data=feature_data, index=indexes, columns=columns)
return data
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for bucketing data into groups.
The classes and functions in this module are used to queue up data into
buckets conditional on side information (e.g. sequence length).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.summary import summary
from tensorflow.python.training import input as input_py
from tensorflow.python.training import queue_runner
# pylint: disable=protected-access
_as_original_type = input_py._as_original_type
_as_tensor_list = input_py._as_tensor_list
_restore_sparse_tensors = input_py._restore_sparse_tensors
_dtypes = input_py._dtypes
_store_sparse_tensors = input_py._store_sparse_tensors
_validate_keep_input = input_py._validate_keep_input
_shapes = input_py._shapes
_smart_cond = input_py._smart_cond
_which_queue = input_py._which_queue
# pylint: enable=protected-access
def _validate_bucket(tensor_list):
tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensor_list)
if not tensor_list:
raise ValueError("Expected at least one tensor in bucket().")
return tensor_list
def bucket(tensors,
which_bucket,
batch_size,
num_buckets,
num_threads=1,
capacity=32,
bucket_capacities=None,
shapes=None,
dynamic_pad=False,
allow_smaller_final_batch=False,
keep_input=True,
shared_name=None,
name=None):
"""Lazy bucketing of input tensors according to `which_bucket`.
The argument `tensors` can be a list or a dictionary of tensors.
The value returned by the function will be of the same type
as `tensors`.
The tensors entering this function are put into the bucket given by
`which_bucket`. Each bucket has its own queue. When a bucket contains
`batch_size` elements, this minibatch is pushed onto a top queue. The
tensors returned from this function are a the result of dequeueing the
next minibatch from this top queue.
This function is implemented using several queues. A `QueueRunner` for the
queues is added to the current `Graph`'s `QUEUE_RUNNER` collection.
As the returned tensors are the result of of a dequeue operation, evaluating
them will throw a `tf.errors.OutOfRangeError` when the input queue is
exhausted. If these tensors are feeding another input queue, its queue runner
will catch this exception, however, if they are used in your main thread
you are responsible for catching this yourself.
*N.B.:* If `dynamic_pad` is `False`, you must ensure that either
(i) the `shapes` argument is passed, or (ii) all of the tensors in
`tensors` must have fully-defined shapes. `ValueError` will be
raised if neither of these conditions holds.
If `dynamic_pad` is `True`, it is sufficient that the *rank* of the
tensors is known, but individual dimensions may have shape `None`.
In this case, for each enqueue the dimensions with value `None`
may have a variable length; upon dequeue, the output tensors will be padded
on the right to the maximum shape of the tensors in the current minibatch.
For numbers, this padding takes value 0. For strings, this padding is
the empty string. See `PaddingFIFOQueue` for more info.
If `allow_smaller_final_batch` is `True`, a smaller batch value than
`batch_size` is returned when the queues are closed and there are not enough
elements to fill the batch, otherwise the pending elements are discarded.
In addition, all output tensors' static shapes, as accessed via the
`get_shape()` method will have a 0th `Dimension` value of `None`, and
operations that depend on fixed batch_size would fail.
Args:
tensors: The list or dictionary of tensors, representing a single element,
to bucket. Nested lists are not supported.
which_bucket: An `int32` scalar Tensor taking a value in `[0, num_buckets)`.
batch_size: The new batch size pulled from the queue (all queues will have
the same size). If a list is passed in then each bucket will have a
different batch_size.
(python int, int32 scalar or iterable of integers of length num_buckets).
num_buckets: A python integer, the number of buckets.
num_threads: An integer. The number of threads enqueuing `tensors`.
capacity: An integer. The maximum number of minibatches in the top queue,
and also (by default) the maximum number of elements within each bucket.
bucket_capacities: (Optional) None or a list of integers, the capacities of
each bucket. If None, capacity is used (default). If specified, it must
be a list of integers of length num_buckets: the i-th element is used
as capacity for the i-th bucket queue.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensors`.
dynamic_pad: Boolean. Allow variable dimensions in input shapes.
The given dimensions are padded upon dequeue so that tensors within a
batch have the same shapes.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batches to be smaller if there are insufficient items left in the queues.
keep_input: A `bool` scalar Tensor. If provided, this tensor controls
whether the input is added to the queue or not. If it evaluates `True`,
then `tensors` are added to the bucket; otherwise they are dropped. This
tensor essentially acts as a filtering mechanism.
shared_name: (Optional). If set, the queues will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A tuple `(bucket, outputs)` where `bucket` is
a `int32` scalar tensor and `outputs` is a list or
dictionary of batched outputs corresponding to elements of `tensors`.
Every step will receive a new bucket of outputs.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensors` or if batch_size is a sequence
but its length != num_buckets. Also if bucket_capacities is not None but
its length != num_buckets.
"""
batch_size_per_bucket = False
if isinstance(batch_size, (list, tuple)):
batch_size_per_bucket = True
if len(batch_size) != num_buckets:
raise ValueError(
"If batch_size is a list it must have num_buckets elements")
else:
batch_size = [batch_size] * num_buckets
if bucket_capacities is None:
bucket_capacities = [capacity] * num_buckets
if len(bucket_capacities) != num_buckets:
raise ValueError(
"The list bucket_capacities (%s) must have exactly num_buckets (%d) "
"elements." % (str(bucket_capacities), num_buckets))
tensor_list = _as_tensor_list(tensors)
with ops.name_scope(name, "bucket", tensor_list) as name:
tensor_list = _validate_bucket(tensor_list)
keep_input = _validate_keep_input(keep_input, enqueue_many=False)
(tensor_list, sparse_info) = _store_sparse_tensors(
tensor_list, enqueue_many=False, keep_input=keep_input)
# Round-trip batch_size to a tensor, and possibly back
for i, bucket_batch_size in enumerate(batch_size):
bucket_batch_size = ops.convert_to_tensor(
bucket_batch_size, dtype=dtypes.int32, name="batch_size")
static_batch_size = tensor_util.constant_value(bucket_batch_size)
batch_size[i] = (static_batch_size if static_batch_size is not None else
bucket_batch_size)
types = _dtypes([tensor_list])
shapes = _shapes([tensor_list], shapes, enqueue_many=False)
which_bucket = ops.convert_to_tensor(
which_bucket, dtype=dtypes.int32, name="which_bucket")
queue_creator = _which_queue(dynamic_pad)
bucket_queues = []
for i in range(num_buckets):
shared_name_i = ("%s_%d" % (shared_name, i) if shared_name is not None
else None)
bucket_queues.append(
queue_creator(
capacity=bucket_capacities[i],
dtypes=types,
shapes=shapes,
shared_name=shared_name_i,
name="bucket_queue_%d" % i))
maybe_static_batch_size = (
None if (allow_smaller_final_batch or batch_size_per_bucket)
else static_batch_size)
bucket_shapes = [
tensor_shape.vector(maybe_static_batch_size).concatenate(s)
for s in bucket_queues[0].shapes
]
# top_queue is a PaddingFIFOQueue even if the bucket queues are regular FIFO
# queues because if we use allow_smaller_final_batch, shapes will
# contain Nones in their first entry; as a result, a regular
# FIFOQueue would die when being passed shapes that are not fully defined.
top_queue = data_flow_ops.PaddingFIFOQueue(
capacity=capacity,
dtypes=[dtypes.int32] + types,
shapes=[tensor_shape.scalar()] + bucket_shapes,
shared_name=shared_name,
name="top_queue")
def enqueue_which():
"""Return an op that enqueues conditionally in one of the queues."""
def enqueue_single(i):
return bucket_queues[i].enqueue(tensor_list)
enqueues = [
control_flow_ops.cond(
math_ops.equal(which_bucket, i),
functools.partial(enqueue_single, i), control_flow_ops.no_op)
for i in range(num_buckets)
]
return control_flow_ops.group(*enqueues, name="group_enqueues")
maybe_enqueue = _smart_cond(
keep_input,
enqueue_which,
control_flow_ops.no_op)
bucket_enqueue_ops = [maybe_enqueue] * num_threads
if allow_smaller_final_batch:
which_dequeue = lambda q: q.dequeue_up_to
else:
which_dequeue = lambda q: q.dequeue_many
enqueues_to_top = [
top_queue.enqueue(
[constant_op.constant(i)] + which_dequeue(q)(
bs, name="read_bucket_%d" % i),
name="enqueue_from_bucket_%d" % i)
for i, (q, bs) in enumerate(zip(bucket_queues, batch_size))
]
for i, q in enumerate(bucket_queues):
queue_runner.add_queue_runner(
queue_runner.QueueRunner(
q, [enqueues_to_top[i]],
queue_closed_exception_types=(errors.OutOfRangeError,
errors.CancelledError)))
queue_runner.add_queue_runner(
queue_runner.QueueRunner(
top_queue,
bucket_enqueue_ops,
queue_closed_exception_types=(errors.OutOfRangeError,
errors.CancelledError)))
for q in bucket_queues:
summary.scalar("bucket/%s/size" % q.name,
math_ops.cast(top_queue.size(), dtypes.float32))
summary.scalar("bucket/%s/fraction_of_%d_full" % (top_queue.name, capacity),
math_ops.cast(top_queue.size(), dtypes.float32) *
(1. / capacity))
dequeued = top_queue.dequeue(name="dequeue_top")
which_bucket_dequeued = dequeued[0]
dequeued = dequeued[1:]
dequeued = _restore_sparse_tensors(dequeued, sparse_info)
return (which_bucket_dequeued, _as_original_type(tensors, dequeued))
def bucket_by_sequence_length(input_length,
tensors,
batch_size,
bucket_boundaries,
num_threads=1,
capacity=32,
bucket_capacities=None,
shapes=None,
dynamic_pad=False,
allow_smaller_final_batch=False,
keep_input=True,
shared_name=None,
name=None):
"""Lazy bucketing of inputs according to their length.
This method calls `tf.contrib.training.bucket` under the hood, after first
subdividing the bucket boundaries into separate buckets and identifying which
bucket the given `input_length` belongs to. See the documentation for
`which_bucket` for details of the other arguments.
Args:
input_length: `int32` scalar `Tensor`, the sequence length of tensors.
tensors: The list or dictionary of tensors, representing a single element,
to bucket. Nested lists are not supported.
batch_size: The new batch size pulled from the queue (all queues will have
the same size). If a list is passed in then each bucket will have a
different batch_size.
(python int, int32 scalar or iterable of integers of length num_buckets).
bucket_boundaries: int list, increasing non-negative numbers.
The edges of the buckets to use when bucketing tensors. Two extra buckets
are created, one for `input_length < bucket_boundaries[0]` and
one for `input_length >= bucket_boundaries[-1]`.
num_threads: An integer. The number of threads enqueuing `tensors`.
capacity: An integer. The maximum number of minibatches in the top queue,
and also the maximum number of elements within each bucket.
bucket_capacities: (Optional) None or a list of integers, the capacities of
each bucket. If None, capacity is used (default). If specified, it must
be a list of integers of length one larger than bucket_boundaries.
Its i-th element is used as capacity for the i-th bucket queue.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensors`.
dynamic_pad: Boolean. Allow variable dimensions in input shapes.
The given dimensions are padded upon dequeue so that tensors within a
batch have the same shapes.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batches to be smaller if there are insufficient items left in the queues.
keep_input: A `bool` scalar Tensor. If provided, this tensor controls
whether the input is added to the queue or not. If it evaluates `True`,
then `tensors` are added to the bucket; otherwise they are dropped. This
tensor essentially acts as a filtering mechanism.
shared_name: (Optional). If set, the queues will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A tuple `(sequence_length, outputs)` where `sequence_length` is
a 1-D `Tensor` of size `batch_size` and `outputs` is a list or dictionary
of batched, bucketed, outputs corresponding to elements of `tensors`.
Raises:
TypeError: if `bucket_boundaries` is not a list of python integers.
ValueError: if `bucket_boundaries` is empty or contains non-increasing
values or if batch_size is a list and it's length doesn't equal the number
of buckets.
"""
tensor_list = _as_tensor_list(tensors)
if not isinstance(bucket_boundaries, (list, tuple)):
raise TypeError(
"bucket_boundaries must be a list or tuple, but received: %s" %
bucket_boundaries)
if not bucket_boundaries:
raise ValueError("bucket_boundaries must not be empty")
for (s, e) in zip(bucket_boundaries[:-1], bucket_boundaries[1:]):
if not isinstance(s, int) or not isinstance(e, int):
raise TypeError("bucket boundaries must be integers, but saw: %s and %s" %
(s, e))
if s >= e:
raise ValueError(
"Buckets must contain sequential increasing lengths, but saw: "
"%d before %d" % (s, e))
with ops.name_scope(name, "bucket_by_sequence_length",
[input_length] + tensor_list) as name:
input_length = ops.convert_to_tensor(
input_length, dtype=dtypes.int32, name="input_length")
# Bucketing conditions are:
# l < b[0]
# b[0] <= l < b[1]
# b[1] <= l < b[2]
# ...
# b[N-2] <= l < b[N-1]
# b[N-1] <= l
# Equivalent to:
# [-inf, b[0], b[1], ..., b[N-1]] <= l < [b[0], b[1], ..., b[N-1], inf]
buckets_min = [np.iinfo(np.int32).min] + list(bucket_boundaries)
buckets_max = list(bucket_boundaries) + [np.iinfo(np.int32).max]
conditions_c = math_ops.logical_and(
math_ops.less_equal(buckets_min, input_length),
math_ops.less(input_length, buckets_max))
which_bucket = math_ops.reduce_min(array_ops.where(conditions_c))
which_bucket = math_ops.to_int32(which_bucket)
if shapes is not None:
shapes = [tensor_shape.scalar()] + shapes
_, dequeued = bucket(
tensors=[input_length] + tensor_list,
which_bucket=which_bucket,
batch_size=batch_size,
num_buckets=len(bucket_boundaries) + 1,
num_threads=num_threads,
capacity=capacity,
bucket_capacities=bucket_capacities,
shapes=shapes,
dynamic_pad=dynamic_pad,
allow_smaller_final_batch=allow_smaller_final_batch,
keep_input=keep_input,
shared_name=shared_name)
return (dequeued[0], _as_original_type(tensors, dequeued[1:]))
__all__ = ["bucket", "bucket_by_sequence_length"]
|
|
import py
from rpython.rlib.jit import JitDriver
from rpython.jit.metainterp.test.support import LLJitMixin
from rpython.jit.metainterp.blackhole import BlackholeInterpBuilder
from rpython.jit.metainterp.blackhole import BlackholeInterpreter
from rpython.jit.metainterp.blackhole import convert_and_run_from_pyjitpl
from rpython.jit.metainterp import history, pyjitpl, jitexc, resoperation
from rpython.jit.codewriter.assembler import JitCode
from rpython.rtyper.lltypesystem import lltype, llmemory
from rpython.rtyper.llinterp import LLException
class FakeCodeWriter:
pass
class FakeAssembler:
pass
class FakeCPU:
def bh_call_i(self, func, args_i, args_r, args_f, calldescr):
assert func == 321
assert calldescr == "<calldescr>"
if args_i[0] < 0:
raise LLException("etype", "evalue")
return args_i[0] * 2
def getblackholeinterp(insns, descrs=[]):
cw = FakeCodeWriter()
cw.cpu = FakeCPU()
cw.assembler = FakeAssembler()
cw.assembler.insns = insns
cw.assembler.descrs = descrs
builder = BlackholeInterpBuilder(cw)
return builder.acquire_interp()
def test_simple():
jitcode = JitCode("test")
jitcode.setup("\x00\x00\x01\x02"
"\x01\x02",
[])
blackholeinterp = getblackholeinterp({'int_add/ii>i': 0,
'int_return/i': 1})
blackholeinterp.setposition(jitcode, 0)
blackholeinterp.setarg_i(0, 40)
blackholeinterp.setarg_i(1, 2)
blackholeinterp.run()
assert blackholeinterp._final_result_anytype() == 42
def test_simple_const():
jitcode = JitCode("test")
jitcode.setup("\x00\x30\x01\x02"
"\x01\x02",
[])
blackholeinterp = getblackholeinterp({'int_sub/ci>i': 0,
'int_return/i': 1})
blackholeinterp.setposition(jitcode, 0)
blackholeinterp.setarg_i(1, 6)
blackholeinterp.run()
assert blackholeinterp._final_result_anytype() == 42
def test_simple_bigconst():
jitcode = JitCode("test")
jitcode.setup("\x00\xFD\x01\x02"
"\x01\x02",
[666, 666, 10042, 666])
blackholeinterp = getblackholeinterp({'int_sub/ii>i': 0,
'int_return/i': 1})
blackholeinterp.setposition(jitcode, 0)
blackholeinterp.setarg_i(1, 10000)
blackholeinterp.run()
assert blackholeinterp._final_result_anytype() == 42
def test_simple_loop():
jitcode = JitCode("test")
jitcode.setup("\x00\x16\x02\x10\x00" # L1: goto_if_not_int_gt %i0, 2, L2
"\x01\x17\x16\x17" # int_add %i1, %i0, %i1
"\x02\x16\x01\x16" # int_sub %i0, $1, %i0
"\x03\x00\x00" # goto L1
"\x04\x17", # L2: int_return %i1
[])
blackholeinterp = getblackholeinterp({'goto_if_not_int_gt/icL': 0,
'int_add/ii>i': 1,
'int_sub/ic>i': 2,
'goto/L': 3,
'int_return/i': 4})
blackholeinterp.setposition(jitcode, 0)
blackholeinterp.setarg_i(0x16, 6) # %i0
blackholeinterp.setarg_i(0x17, 100) # %i1
blackholeinterp.run()
assert blackholeinterp._final_result_anytype() == 100+6+5+4+3
def test_simple_exception():
jitcode = JitCode("test")
jitcode.setup( # residual_call_ir_i $<* fn g>, I[%i9], R[], <Descr> %i8
"\x01\xFF\x01\x09\x00\x00\x00\x08"
"\x00\x0D\x00" # catch_exception L1
"\x02\x08" # int_return %i8
"\x03\x2A", # L1: int_return $42
[321]) # <-- address of the function g
blackholeinterp = getblackholeinterp({'catch_exception/L': 0,
'residual_call_ir_i/iIRd>i': 1,
'int_return/i': 2,
'int_return/c': 3},
["<calldescr>"])
#
blackholeinterp.setposition(jitcode, 0)
blackholeinterp.setarg_i(0x9, 100)
blackholeinterp.run()
assert blackholeinterp._final_result_anytype() == 200
#
blackholeinterp.setposition(jitcode, 0)
blackholeinterp.setarg_i(0x9, -100)
blackholeinterp.run()
assert blackholeinterp._final_result_anytype() == 42
def test_convert_and_run_from_pyjitpl():
class MyMIFrame:
jitcode = JitCode("test")
jitcode.setup("\xFF" # illegal instruction
"\x00\x00\x01\x02" # int_add/ii>i
"\x01\x02", # int_return/i
[],
num_regs_i=3, num_regs_r=0, num_regs_f=0)
jitcode.jitdriver_sd = "foo" # not none
pc = 1
registers_i = [resoperation.InputArgInt(40), history.ConstInt(2), None]
class MyMetaInterp:
class staticdata:
result_type = 'int'
class profiler:
@staticmethod
def start_blackhole(): pass
@staticmethod
def end_blackhole(): pass
last_exc_value = None
framestack = [MyMIFrame()]
MyMetaInterp.staticdata.blackholeinterpbuilder = getblackholeinterp(
{'int_add/ii>i': 0, 'int_return/i': 1}).builder
MyMetaInterp.staticdata.blackholeinterpbuilder.metainterp_sd = \
MyMetaInterp.staticdata
#
d = py.test.raises(jitexc.DoneWithThisFrameInt,
convert_and_run_from_pyjitpl, MyMetaInterp())
assert d.value.result == 42
class TestBlackhole(LLJitMixin):
def test_blackholeinterp_cache_basic(self):
class FakeJitcode:
def num_regs_r(self):
return 0
interp1 = getblackholeinterp({})
interp1.jitcode = FakeJitcode()
builder = interp1.builder
interp2 = builder.acquire_interp()
builder.release_interp(interp1)
interp3 = builder.acquire_interp()
assert builder.num_interpreters == 2
def test_blackholeinterp_cache_normal(self):
myjitdriver = JitDriver(greens = [], reds = ['x', 'y'])
def choices(x):
if x == 0: # <- this is the test that eventually succeeds,
return 0 # requiring a blackhole interp in a call stack
return 34871 # of two functions (hence num_interpreters==2)
def f(x):
y = 0
cont = 1
while cont:
myjitdriver.can_enter_jit(x=x, y=y)
myjitdriver.jit_merge_point(x=x, y=y)
cont = choices(x)
y += cont
x -= 1
return y
#
seen = []
def my_copy_constants(self, *args):
seen.append(1)
return org_copy_constants(self, *args)
org_copy_constants = BlackholeInterpreter.copy_constants
BlackholeInterpreter.copy_constants = my_copy_constants
try:
res = self.meta_interp(f, [7], repeat=7)
finally:
BlackholeInterpreter.copy_constants = org_copy_constants
#
assert res == sum([choices(x) for x in range(1, 8)])
builder = pyjitpl._warmrunnerdesc.metainterp_sd.blackholeinterpbuilder
assert builder.num_interpreters == 2
assert len(seen) == 2 * 3
def test_blackholeinterp_cache_exc(self):
myjitdriver = JitDriver(greens = [], reds = ['x', 'y'])
class FooError(Exception):
def __init__(self, num):
self.num = num
def choices(x):
if x == 0:
raise FooError(0)
raise FooError(34871)
def f(x):
y = 0
while True:
myjitdriver.can_enter_jit(x=x, y=y)
myjitdriver.jit_merge_point(x=x, y=y)
try:
choices(x)
except FooError, e:
if e.num == 0:
break
y += e.num
x -= 1
return y
res = self.meta_interp(f, [7], repeat=7)
assert res == sum([py.test.raises(FooError, choices, x).value.num
for x in range(1, 8)])
builder = pyjitpl._warmrunnerdesc.metainterp_sd.blackholeinterpbuilder
assert builder.num_interpreters == 2
def test_bad_shift():
py.test.raises(ValueError, BlackholeInterpreter.bhimpl_int_lshift.im_func, 7, 100)
py.test.raises(ValueError, BlackholeInterpreter.bhimpl_int_rshift.im_func, 7, 100)
py.test.raises(ValueError, BlackholeInterpreter.bhimpl_uint_rshift.im_func, 7, 100)
py.test.raises(ValueError, BlackholeInterpreter.bhimpl_int_lshift.im_func, 7, -1)
py.test.raises(ValueError, BlackholeInterpreter.bhimpl_int_rshift.im_func, 7, -1)
py.test.raises(ValueError, BlackholeInterpreter.bhimpl_uint_rshift.im_func, 7, -1)
assert BlackholeInterpreter.bhimpl_int_lshift.im_func(100, 3) == 100<<3
assert BlackholeInterpreter.bhimpl_int_rshift.im_func(100, 3) == 100>>3
assert BlackholeInterpreter.bhimpl_uint_rshift.im_func(100, 3) == 100>>3
def test_debug_fatalerror():
from rpython.rtyper.lltypesystem import lltype, llmemory, rstr
from rpython.rtyper.llinterp import LLFatalError
msg = rstr.mallocstr(1)
msg.chars[0] = "!"
msg = lltype.cast_opaque_ptr(llmemory.GCREF, msg)
e = py.test.raises(LLFatalError,
BlackholeInterpreter.bhimpl_debug_fatalerror.im_func,
msg)
assert str(e.value) == '!'
|
|
import os
import requests
import logging
import sys
import zipfile
import tempfile
import shutil
import base64
from urllib.parse import urlparse
# Make sure the current directory is in the
# path so that we can run this from anywhere.
this_dir = os.path.dirname(__file__)
if this_dir not in sys.path:
sys.path.insert(0, this_dir)
from BaseSandboxParser import *
class CuckooParser(BaseSandboxParser):
def __init__(self, config, json_report_path, screenshot=True, whitelister=None):
# Run the super init to inherit attributes and load the config.
super().__init__(config, json_report_path, whitelister=whitelister)
# Try and load this report from cache.
if not self.load_from_cache():
self.logger.info("Parsing Cuckoo sandbox report: " + json_report_path)
# Read some items the config file.
self.base_url = self.config["CuckooParser"]["base_url"]
self.sandbox_display_name = self.config["CuckooParser"]["sandbox_display_name"]
self.report_directory = os.path.dirname(json_report_path)
# Fail if we can't parse the MD5. This is used as a sanity check when
# figuring out which of the sandbox parsers you should use on your JSON.
self.md5 = self.parse(self.report, "target", "file", "md5")
if not self.md5:
self.logger.critical("Unable to parse Cuckoo MD5 from: " + str(json_report_path))
return None
# Parse some basic info directly from the report.
self.sandbox_vm_name = self.parse(self.report, "info", "machine", "name")
self.filename = self.parse(self.report, "target", "file", "name")
self.sha1 = self.parse(self.report, "target", "file", "sha1")
self.sha256 = self.parse(self.report, "target", "file", "sha256")
self.sha512 = self.parse(self.report, "target", "file", "sha512")
self.ssdeep = self.parse(self.report, "target", "file", "ssdeep")
self.malware_family = self.parse(self.report, "malfamily")
self.sample_id = str(self.parse(self.report, "info", "id"))
# The rest of the info requires a bit more parsing.
self.sandbox_url = self.parse_sandbox_url()
if screenshot:
self.screenshot_path = self.download_screenshot()
self.contacted_hosts = self.parse_contacted_hosts()
self.dropped_files = self.parse_dropped_files()
self.http_requests = self.parse_http_requests()
self.dns_requests = self.parse_dns_requests()
self.process_tree = self.parse_process_tree()
self.decoded_process_tree = self.decode_process_tree()
self.process_tree_urls = self.parse_process_tree_urls()
self.mutexes = self.parse_mutexes()
self.resolved_apis = self.parse_resolved_apis()
self.created_services = self.parse_created_services()
self.started_services = self.parse_started_services()
self.strings = self.parse_strings()
#self.strings_urls = self.parse_strings_urls()
self.strings_urls = []
#self.json_urls = self.parse_json_urls()
self.all_urls = self.get_all_urls()
# Extract the IOCs.
self.extract_indicators()
# Get rid of the JSON report to save space.
self.report = None
# Cache the report.
self.save_to_cache()
# Override __get/setstate__ in case someone
# wants to pickle an object of this class.
def __getstate__(self):
d = dict(self.__dict__)
if "logger" in d:
del d["logger"]
if "strings" in d:
del d["strings"]
if "whitelister" in d:
del d["whitelister"]
return d
def __setstate__(self, d):
self.__dict__.update(d)
def parse_sandbox_url(self):
return self.base_url + "/analysis/" + self.sample_id + "/"
def download_screenshot(self):
if self.screenshot_repository:
screenshot_zip_path = os.path.join(self.screenshot_repository, self.md5 + "_cuckoo.zip")
screenshot_path = os.path.join(self.screenshot_repository, self.md5 + "_cuckoo.jpg")
# If the screenshot .jpg hasn't already been cached...
if not os.path.exists(screenshot_path):
# If the screenshot .zip hasn't already been cached...
if not os.path.exists(screenshot_zip_path):
# This URL will download the .zip of all the screenshots.
url = self.parse_screenshot_url()
if url:
try:
request = requests.get(url, allow_redirects=True, verify=self.requests_verify)
self.logger.debug("Downloading screenshots .zip " + url)
if request.status_code == 200:
with open(screenshot_zip_path, "wb") as url_file:
url_file.write(request.content)
except requests.exceptions.ConnectionError:
return None
# The .zip is cached, but the screenshot is not. Extract the .zip
# to get at the screenshots. Extract them to a temp dir and pick
# the "best" screenshot from there to cache.
with tempfile.TemporaryDirectory() as temp_dir:
with zipfile.ZipFile(screenshot_zip_path, "r") as zf:
zf.extractall(temp_dir)
# Our VMs use a plain black Desktop background, so the logic
# is that the largest filesize of the screenshots is going
# to have the most "stuff" on it, so we'll pick that one.
best_screenshot = {"path": "", "size": 0}
for temp_screenshot in os.listdir(temp_dir):
temp_screenshot_path = os.path.join(temp_dir, temp_screenshot)
temp_screenshot_size = int(os.path.getsize(temp_screenshot_path))
if temp_screenshot_size > best_screenshot["size"]:
best_screenshot["path"] = temp_screenshot_path
best_screenshot["size"] = int(temp_screenshot_size)
# If we have a best screenshot, copy it out of the temp
# directory into the screenshot cache.
if best_screenshot["path"]:
self.logger.debug("Copying screenshot from temp dir to cache: {}".format(screenshot_path))
shutil.copy2(best_screenshot["path"], screenshot_path)
return screenshot_path
else:
return screenshot_path
return None
def parse_screenshot_url(self):
return self.base_url + "/api/tasks/screenshots/" + str(self.sample_id)
def parse_http_requests(self):
self.logger.debug("Parsing HTTP requests")
http_requests = []
http_requests_json = self.parse(self.report, "network", "http")
if http_requests_json:
for request in http_requests_json:
r = HttpRequest()
try:
full_url = request["path"]
parsed_url = urlparse(full_url)
r.host = parsed_url.netloc
r.port = parsed_url.port
r.uri = parsed_url.path
except:
pass
#try: r.host = request["host"]
#except KeyError: pass
#try: r.port = request["port"]
#except KeyError: pass
#try: r.uri = request["path"]
#except KeyError: pass
try: r.method = request["method"]
except KeyError: pass
try: r.user_agent = request["user-agent"]
except KeyError: pass
# Only add the request if the host was successfully parsed.
if r.host:
http_requests.append(r)
return http_requests
def parse_dns_requests(self):
self.logger.debug("Parsing DNS requests")
dns_requests = []
dns_requests_json = self.parse(self.report, "network", "dns")
if dns_requests_json:
for request in dns_requests_json:
r = DnsRequest()
try: r.request = request["request"]
except KeyError: pass
try: r.type = request["type"]
except KeyError: pass
# Technically, the Cuckoo JSON can have multiple answers listed,
# but we are only going to grab the first one, as most of the time
# there is only a single answer anyway.
try: r.answer = request["answers"][0]["data"]
except IndexError: pass
except KeyError: pass
try: r.answer_type = request["answers"][0]["type"]
except IndexError: pass
except KeyError: pass
# Only add the request if the host was successfully parsed.
if r.request:
dns_requests.append(r)
return dns_requests
def parse_dropped_files(self):
self.logger.debug("Parsing dropped files")
dropped_files = []
dropped_files_json = self.parse(self.report, "dropped")
if dropped_files_json:
for file in dropped_files_json:
f = DroppedFile()
try: f.filename = file["name"]
except KeyError: pass
try: f.path = file["guest_paths"][0]
except KeyError: pass
try: f.size = file["size"]
except KeyError: pass
try: f.type = file["type"]
except KeyError: pass
try: f.md5 = file["md5"]
except KeyError: pass
try: f.sha1 = file["sha1"]
except KeyError: pass
try:
f.sha256 = file["sha256"]
potential_path = os.path.join(self.report_directory, "dropped", f.sha256)
if os.path.exists(potential_path):
f.os_path = potential_path
except KeyError: pass
try: f.sha512 = file["sha512"]
except KeyError: pass
try: f.ssdeep = file["ssdeep"]
except KeyError: pass
# Only add the file if its filename was succesfully parsed.
if f.filename:
self.logger.debug("Adding dropped file: " + f.filename)
dropped_files.append(f)
else:
self.logger.error("Unable to parse dropped filename: " + f.md5)
return dropped_files
def parse_contacted_hosts(self):
self.logger.debug("Parsing contacted hosts")
contacted_hosts = []
contacted_hosts_json = self.parse(self.report, "network", "hosts")
if contacted_hosts_json:
for host in contacted_hosts_json:
h = ContactedHost()
h.ipv4 = host
"""
try: h.ipv4 = host["ip"]
except KeyError: pass
try: h.location = host["country_name"]
except KeyError: pass
try:
if host["hostname"]:
h.add_associated_domain(host["hostname"])
except KeyError: pass
"""
# Only add the host if its IP was succesfully parsed.
if h.ipv4:
contacted_hosts.append(h)
return contacted_hosts
def parse_process_tree_urls(self):
self.logger.debug("Looking for URLs in process tree")
urls = RegexHelpers.find_urls(str(self.parse_process_tree()))
urls += RegexHelpers.find_urls(self.decode_process_tree())
return urls
def parse_process_tree(self):
self.logger.debug("Parsing process tree")
def walk_tree(process_json=None, process_list=None):
if not process_list:
process_list = ProcessList()
for process in process_json:
#command = process["environ"]["CommandLine"]
command = process["command_line"]
pid = process["pid"]
parent_pid = process["ppid"]
new_process = Process(command, pid, parent_pid)
process_list.add_process(new_process)
process_list = walk_tree(process["children"], process_list)
return process_list
return walk_tree(process_json=self.parse(self.report, "behavior", "processtree"))
def decode_process_tree(self):
process_tree = str(self.parse_process_tree())
decoded_process_tree = process_tree
# Try to decode base64 chunks.
for chunk in process_tree.split():
try:
decoded_chunk = base64.b64decode(chunk).decode('utf-8')
if '\x00' in decoded_chunk:
decoded_chunk = base64.b64decode(chunk).decode('utf-16')
decoded_process_tree = decoded_process_tree.replace(chunk, decoded_chunk)
except:
pass
# Try to decode int arrays.
try:
int_array_regex = re.compile(r"\(((\s*\d+\s*,?)+)\)")
matches = int_array_regex.findall(decoded_process_tree)
for match in matches:
orig_int_array = match[0]
clean_int_array = ''.join(orig_int_array.split()).split(',')
chars = ''.join([chr(int(num)) for num in clean_int_array])
decoded_process_tree = decoded_process_tree.replace(orig_int_array, chars)
except:
pass
# Try to decode split+int arrays.
try:
split_int_array_regex = re.compile(r"\(\s*'((\s*\d+.)+)\s*'")
matches = split_int_array_regex.findall(decoded_process_tree)
for match in matches:
orig_int_array = match[0]
int_regex = re.compile(r"\d+")
int_array = int_regex.findall(orig_int_array)
chars = ''.join([chr(int(num)) for num in int_array])
decoded_process_tree = decoded_process_tree.replace(orig_int_array, chars)
except:
pass
if decoded_process_tree != process_tree:
return decoded_process_tree
else:
return ''
def parse_mutexes(self):
self.logger.debug("Parsing mutexes")
mutexes = set()
mutexes_json = self.parse(self.report, "behavior", "summary", "mutexes")
if mutexes_json:
for mutex in mutexes_json:
mutexes.add(mutex)
return sorted(list(mutexes))
def parse_resolved_apis(self):
self.logger.debug("Parsing resolved APIs")
resolved_apis = set()
resolved_apis_json = self.parse(self.report, "behavior", "summary", "resolved_apis")
if resolved_apis_json:
for api_call in resolved_apis_json:
resolved_apis.add(api_call)
return sorted(list(resolved_apis))
def parse_created_services(self):
self.logger.debug("Parsing created services")
created_services = set()
created_services_json = self.parse(self.report, "behavior", "summary", "created_services")
if created_services_json:
for service in created_services_json:
created_services.add(service)
return sorted(list(created_services))
def parse_started_services(self):
self.logger.debug("Parsing started services")
started_services = set()
started_services_json = self.parse(self.report, "behavior", "summary", "started_services")
if started_services_json:
for service in started_services_json:
started_services.add(service)
return sorted(list(started_services))
def parse_strings_urls(self):
self.logger.debug("Looking for URLs in strings")
return RegexHelpers.find_urls(self.parse_strings())
def parse_strings(self):
self.logger.debug("Parsing strings")
strings_json = self.parse(self.report, "strings")
return "\n".join(strings_json)
|
|
import json
from contextlib import contextmanager
from pathlib import Path
from unittest import mock
import pytest
from django_dynamic_fixture import get
from readthedocs.builds.storage import BuildMediaFileSystemStorage
from readthedocs.projects.constants import MKDOCS, SPHINX
from readthedocs.projects.models import HTMLFile, Project, Feature
data_path = Path(__file__).parent.resolve() / 'data'
@pytest.mark.django_db
@pytest.mark.search
class TestParsers:
def setup_method(self):
self.feature = get(
Feature,
feature_id=Feature.INDEX_FROM_HTML_FILES,
)
self.project = get(
Project,
slug='test',
main_language_project=None,
)
self.version = self.project.versions.first()
def _mock_open(self, content):
@contextmanager
def f(*args, **kwargs):
read_mock = mock.MagicMock()
read_mock.read.return_value = content
yield read_mock
return f
@mock.patch.object(BuildMediaFileSystemStorage, 'exists')
@mock.patch.object(BuildMediaFileSystemStorage, 'open')
def test_mkdocs(self, storage_open, storage_exists):
json_file = data_path / 'mkdocs/in/search_index.json'
storage_open.side_effect = self._mock_open(
json_file.open().read()
)
storage_exists.return_value = True
self.version.documentation_type = MKDOCS
self.version.save()
index_file = get(
HTMLFile,
project=self.project,
version=self.version,
path='index.html',
)
versions_file = get(
HTMLFile,
project=self.project,
version=self.version,
path='versions/index.html',
)
no_title_file = get(
HTMLFile,
project=self.project,
version=self.version,
path='no-title/index.html',
)
parsed_json = [
index_file.processed_json,
versions_file.processed_json,
no_title_file.processed_json,
]
expected_json = json.load(open(data_path / 'mkdocs/out/search_index.json'))
assert parsed_json == expected_json
@mock.patch.object(BuildMediaFileSystemStorage, 'exists')
@mock.patch.object(BuildMediaFileSystemStorage, 'open')
def test_mkdocs_default_theme(self, storage_open, storage_exists):
local_path = data_path / 'mkdocs/in/mkdocs-1.1/'
storage_exists.return_value = True
self.project.feature_set.add(self.feature)
self.version.documentation_type = MKDOCS
self.version.save()
parsed_json = []
all_files = [
'index.html',
'404.html',
'configuration.html',
'no-title.html',
'no-main-header.html',
]
for file_name in all_files:
file = local_path / file_name
storage_open.reset_mock()
storage_open.side_effect = self._mock_open(file.open().read())
file = get(
HTMLFile,
project=self.project,
version=self.version,
path=file_name,
)
parsed_json.append(file.processed_json)
expected_json = json.load(open(data_path / 'mkdocs/out/mkdocs-1.1.json'))
assert parsed_json == expected_json
@mock.patch.object(BuildMediaFileSystemStorage, 'exists')
@mock.patch.object(BuildMediaFileSystemStorage, 'open')
def test_mkdocs_gitbook_theme(self, storage_open, storage_exists):
file = data_path / 'mkdocs/in/gitbook/index.html'
storage_exists.return_value = True
self.project.feature_set.add(self.feature)
self.version.documentation_type = MKDOCS
self.version.save()
storage_open.side_effect = self._mock_open(file.open().read())
file = get(
HTMLFile,
project=self.project,
version=self.version,
path='index.html',
)
parsed_json = [file.processed_json]
expected_json = json.load(open(data_path / 'mkdocs/out/gitbook.json'))
assert parsed_json == expected_json
@mock.patch.object(BuildMediaFileSystemStorage, 'exists')
@mock.patch.object(BuildMediaFileSystemStorage, 'open')
def test_mkdocs_material_theme(self, storage_open, storage_exists):
file = data_path / 'mkdocs/in/material/index.html'
storage_exists.return_value = True
self.project.feature_set.add(self.feature)
self.version.documentation_type = MKDOCS
self.version.save()
storage_open.side_effect = self._mock_open(file.open().read())
file = get(
HTMLFile,
project=self.project,
version=self.version,
path='index.html',
)
parsed_json = [file.processed_json]
expected_json = json.load(open(data_path / 'mkdocs/out/material.json'))
assert parsed_json == expected_json
@mock.patch.object(BuildMediaFileSystemStorage, 'exists')
@mock.patch.object(BuildMediaFileSystemStorage, 'open')
def test_mkdocs_windmill_theme(self, storage_open, storage_exists):
file = data_path / 'mkdocs/in/windmill/index.html'
storage_exists.return_value = True
self.project.feature_set.add(self.feature)
self.version.documentation_type = MKDOCS
self.version.save()
storage_open.side_effect = self._mock_open(file.open().read())
file = get(
HTMLFile,
project=self.project,
version=self.version,
path='index.html',
)
parsed_json = [file.processed_json]
expected_json = json.load(open(data_path / 'mkdocs/out/windmill.json'))
assert parsed_json == expected_json
@mock.patch.object(BuildMediaFileSystemStorage, 'exists')
@mock.patch.object(BuildMediaFileSystemStorage, 'open')
def test_mkdocs_readthedocs_theme(self, storage_open, storage_exists):
self.project.feature_set.add(self.feature)
storage_exists.return_value = True
self.version.documentation_type = MKDOCS
self.version.save()
local_path = data_path / 'mkdocs/in/readthedocs-1.1/'
parsed_json = []
for file_name in ['index.html', '404.html', 'versions.html']:
file = local_path / file_name
storage_open.reset_mock()
storage_open.side_effect = self._mock_open(file.open().read())
file = get(
HTMLFile,
project=self.project,
version=self.version,
path=file_name,
)
parsed_json.append(file.processed_json)
expected_json = json.load(open(data_path / 'mkdocs/out/readthedocs-1.1.json'))
assert parsed_json == expected_json
@mock.patch.object(BuildMediaFileSystemStorage, 'exists')
@mock.patch.object(BuildMediaFileSystemStorage, 'open')
def test_mkdocs_old_version(self, storage_open, storage_exists):
json_file = data_path / 'mkdocs/in/search_index_old.json'
storage_open.side_effect = self._mock_open(
json_file.open().read()
)
storage_exists.return_value = True
self.version.documentation_type = MKDOCS
self.version.save()
index_file = get(
HTMLFile,
project=self.project,
version=self.version,
path='index.html',
)
versions_file = get(
HTMLFile,
project=self.project,
version=self.version,
path='versions/index.html',
)
parsed_json = [
index_file.processed_json,
versions_file.processed_json,
]
expected_json = json.load(open(data_path / 'mkdocs/out/search_index_old.json'))
assert parsed_json == expected_json
@mock.patch.object(BuildMediaFileSystemStorage, 'exists')
@mock.patch.object(BuildMediaFileSystemStorage, 'open')
def test_sphinx(self, storage_open, storage_exists):
json_file = data_path / 'sphinx/in/page.json'
html_content = data_path / 'sphinx/in/page.html'
json_content = json.load(json_file.open())
json_content['body'] = html_content.open().read()
storage_open.side_effect = self._mock_open(
json.dumps(json_content)
)
storage_exists.return_value = True
self.version.documentation_type = SPHINX
self.version.save()
page_file = get(
HTMLFile,
project=self.project,
version=self.version,
path='page.html',
)
parsed_json = page_file.processed_json
expected_json = json.load(open(data_path / 'sphinx/out/page.json'))
assert parsed_json == expected_json
@mock.patch.object(BuildMediaFileSystemStorage, 'exists')
@mock.patch.object(BuildMediaFileSystemStorage, 'open')
def test_sphinx_page_without_title(self, storage_open, storage_exists):
json_file = data_path / 'sphinx/in/no-title.json'
html_content = data_path / 'sphinx/in/no-title.html'
json_content = json.load(json_file.open())
json_content['body'] = html_content.open().read()
storage_open.side_effect = self._mock_open(
json.dumps(json_content)
)
storage_exists.return_value = True
self.version.documentation_type = SPHINX
self.version.save()
page_file = get(
HTMLFile,
project=self.project,
version=self.version,
path='no-title.html',
)
parsed_json = page_file.processed_json
expected_json = json.load(open(data_path / 'sphinx/out/no-title.json'))
assert parsed_json == expected_json
|
|
import numpy as np
from numpy.testing import (assert_almost_equal, assert_array_almost_equal,
assert_equal, assert_,
assert_allclose, assert_warns)
from pytest import raises as assert_raises
import pytest
from scipy.fft import fft
from scipy.special import sinc
from scipy.signal import kaiser_beta, kaiser_atten, kaiserord, \
firwin, firwin2, freqz, remez, firls, minimum_phase
def test_kaiser_beta():
b = kaiser_beta(58.7)
assert_almost_equal(b, 0.1102 * 50.0)
b = kaiser_beta(22.0)
assert_almost_equal(b, 0.5842 + 0.07886)
b = kaiser_beta(21.0)
assert_equal(b, 0.0)
b = kaiser_beta(10.0)
assert_equal(b, 0.0)
def test_kaiser_atten():
a = kaiser_atten(1, 1.0)
assert_equal(a, 7.95)
a = kaiser_atten(2, 1/np.pi)
assert_equal(a, 2.285 + 7.95)
def test_kaiserord():
assert_raises(ValueError, kaiserord, 1.0, 1.0)
numtaps, beta = kaiserord(2.285 + 7.95 - 0.001, 1/np.pi)
assert_equal((numtaps, beta), (2, 0.0))
class TestFirwin:
def check_response(self, h, expected_response, tol=.05):
N = len(h)
alpha = 0.5 * (N-1)
m = np.arange(0,N) - alpha # time indices of taps
for freq, expected in expected_response:
actual = abs(np.sum(h*np.exp(-1.j*np.pi*m*freq)))
mse = abs(actual-expected)**2
assert_(mse < tol, 'response not as expected, mse=%g > %g'
% (mse, tol))
def test_response(self):
N = 51
f = .5
# increase length just to try even/odd
h = firwin(N, f) # low-pass from 0 to f
self.check_response(h, [(.25,1), (.75,0)])
h = firwin(N+1, f, window='nuttall') # specific window
self.check_response(h, [(.25,1), (.75,0)])
h = firwin(N+2, f, pass_zero=False) # stop from 0 to f --> high-pass
self.check_response(h, [(.25,0), (.75,1)])
f1, f2, f3, f4 = .2, .4, .6, .8
h = firwin(N+3, [f1, f2], pass_zero=False) # band-pass filter
self.check_response(h, [(.1,0), (.3,1), (.5,0)])
h = firwin(N+4, [f1, f2]) # band-stop filter
self.check_response(h, [(.1,1), (.3,0), (.5,1)])
h = firwin(N+5, [f1, f2, f3, f4], pass_zero=False, scale=False)
self.check_response(h, [(.1,0), (.3,1), (.5,0), (.7,1), (.9,0)])
h = firwin(N+6, [f1, f2, f3, f4]) # multiband filter
self.check_response(h, [(.1,1), (.3,0), (.5,1), (.7,0), (.9,1)])
h = firwin(N+7, 0.1, width=.03) # low-pass
self.check_response(h, [(.05,1), (.75,0)])
h = firwin(N+8, 0.1, pass_zero=False) # high-pass
self.check_response(h, [(.05,0), (.75,1)])
def mse(self, h, bands):
"""Compute mean squared error versus ideal response across frequency
band.
h -- coefficients
bands -- list of (left, right) tuples relative to 1==Nyquist of
passbands
"""
w, H = freqz(h, worN=1024)
f = w/np.pi
passIndicator = np.zeros(len(w), bool)
for left, right in bands:
passIndicator |= (f >= left) & (f < right)
Hideal = np.where(passIndicator, 1, 0)
mse = np.mean(abs(abs(H)-Hideal)**2)
return mse
def test_scaling(self):
"""
For one lowpass, bandpass, and highpass example filter, this test
checks two things:
- the mean squared error over the frequency domain of the unscaled
filter is smaller than the scaled filter (true for rectangular
window)
- the response of the scaled filter is exactly unity at the center
of the first passband
"""
N = 11
cases = [
([.5], True, (0, 1)),
([0.2, .6], False, (.4, 1)),
([.5], False, (1, 1)),
]
for cutoff, pass_zero, expected_response in cases:
h = firwin(N, cutoff, scale=False, pass_zero=pass_zero, window='ones')
hs = firwin(N, cutoff, scale=True, pass_zero=pass_zero, window='ones')
if len(cutoff) == 1:
if pass_zero:
cutoff = [0] + cutoff
else:
cutoff = cutoff + [1]
assert_(self.mse(h, [cutoff]) < self.mse(hs, [cutoff]),
'least squares violation')
self.check_response(hs, [expected_response], 1e-12)
class TestFirWinMore:
"""Different author, different style, different tests..."""
def test_lowpass(self):
width = 0.04
ntaps, beta = kaiserord(120, width)
kwargs = dict(cutoff=0.5, window=('kaiser', beta), scale=False)
taps = firwin(ntaps, **kwargs)
# Check the symmetry of taps.
assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1])
# Check the gain at a few samples where we know it should be approximately 0 or 1.
freq_samples = np.array([0.0, 0.25, 0.5-width/2, 0.5+width/2, 0.75, 1.0])
freqs, response = freqz(taps, worN=np.pi*freq_samples)
assert_array_almost_equal(np.abs(response),
[1.0, 1.0, 1.0, 0.0, 0.0, 0.0], decimal=5)
taps_str = firwin(ntaps, pass_zero='lowpass', **kwargs)
assert_allclose(taps, taps_str)
def test_highpass(self):
width = 0.04
ntaps, beta = kaiserord(120, width)
# Ensure that ntaps is odd.
ntaps |= 1
kwargs = dict(cutoff=0.5, window=('kaiser', beta), scale=False)
taps = firwin(ntaps, pass_zero=False, **kwargs)
# Check the symmetry of taps.
assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1])
# Check the gain at a few samples where we know it should be approximately 0 or 1.
freq_samples = np.array([0.0, 0.25, 0.5-width/2, 0.5+width/2, 0.75, 1.0])
freqs, response = freqz(taps, worN=np.pi*freq_samples)
assert_array_almost_equal(np.abs(response),
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0], decimal=5)
taps_str = firwin(ntaps, pass_zero='highpass', **kwargs)
assert_allclose(taps, taps_str)
def test_bandpass(self):
width = 0.04
ntaps, beta = kaiserord(120, width)
kwargs = dict(cutoff=[0.3, 0.7], window=('kaiser', beta), scale=False)
taps = firwin(ntaps, pass_zero=False, **kwargs)
# Check the symmetry of taps.
assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1])
# Check the gain at a few samples where we know it should be approximately 0 or 1.
freq_samples = np.array([0.0, 0.2, 0.3-width/2, 0.3+width/2, 0.5,
0.7-width/2, 0.7+width/2, 0.8, 1.0])
freqs, response = freqz(taps, worN=np.pi*freq_samples)
assert_array_almost_equal(np.abs(response),
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0], decimal=5)
taps_str = firwin(ntaps, pass_zero='bandpass', **kwargs)
assert_allclose(taps, taps_str)
def test_bandstop_multi(self):
width = 0.04
ntaps, beta = kaiserord(120, width)
kwargs = dict(cutoff=[0.2, 0.5, 0.8], window=('kaiser', beta),
scale=False)
taps = firwin(ntaps, **kwargs)
# Check the symmetry of taps.
assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1])
# Check the gain at a few samples where we know it should be approximately 0 or 1.
freq_samples = np.array([0.0, 0.1, 0.2-width/2, 0.2+width/2, 0.35,
0.5-width/2, 0.5+width/2, 0.65,
0.8-width/2, 0.8+width/2, 0.9, 1.0])
freqs, response = freqz(taps, worN=np.pi*freq_samples)
assert_array_almost_equal(np.abs(response),
[1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0],
decimal=5)
taps_str = firwin(ntaps, pass_zero='bandstop', **kwargs)
assert_allclose(taps, taps_str)
def test_fs_nyq(self):
"""Test the fs and nyq keywords."""
nyquist = 1000
width = 40.0
relative_width = width/nyquist
ntaps, beta = kaiserord(120, relative_width)
taps = firwin(ntaps, cutoff=[300, 700], window=('kaiser', beta),
pass_zero=False, scale=False, fs=2*nyquist)
# Check the symmetry of taps.
assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1])
# Check the gain at a few samples where we know it should be approximately 0 or 1.
freq_samples = np.array([0.0, 200, 300-width/2, 300+width/2, 500,
700-width/2, 700+width/2, 800, 1000])
freqs, response = freqz(taps, worN=np.pi*freq_samples/nyquist)
assert_array_almost_equal(np.abs(response),
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0], decimal=5)
taps2 = firwin(ntaps, cutoff=[300, 700], window=('kaiser', beta),
pass_zero=False, scale=False, nyq=nyquist)
assert_allclose(taps2, taps)
def test_bad_cutoff(self):
"""Test that invalid cutoff argument raises ValueError."""
# cutoff values must be greater than 0 and less than 1.
assert_raises(ValueError, firwin, 99, -0.5)
assert_raises(ValueError, firwin, 99, 1.5)
# Don't allow 0 or 1 in cutoff.
assert_raises(ValueError, firwin, 99, [0, 0.5])
assert_raises(ValueError, firwin, 99, [0.5, 1])
# cutoff values must be strictly increasing.
assert_raises(ValueError, firwin, 99, [0.1, 0.5, 0.2])
assert_raises(ValueError, firwin, 99, [0.1, 0.5, 0.5])
# Must have at least one cutoff value.
assert_raises(ValueError, firwin, 99, [])
# 2D array not allowed.
assert_raises(ValueError, firwin, 99, [[0.1, 0.2],[0.3, 0.4]])
# cutoff values must be less than nyq.
assert_raises(ValueError, firwin, 99, 50.0, nyq=40)
assert_raises(ValueError, firwin, 99, [10, 20, 30], nyq=25)
assert_raises(ValueError, firwin, 99, 50.0, fs=80)
assert_raises(ValueError, firwin, 99, [10, 20, 30], fs=50)
def test_even_highpass_raises_value_error(self):
"""Test that attempt to create a highpass filter with an even number
of taps raises a ValueError exception."""
assert_raises(ValueError, firwin, 40, 0.5, pass_zero=False)
assert_raises(ValueError, firwin, 40, [.25, 0.5])
def test_bad_pass_zero(self):
"""Test degenerate pass_zero cases."""
with assert_raises(ValueError, match='pass_zero must be'):
firwin(41, 0.5, pass_zero='foo')
with assert_raises(TypeError, match='cannot be interpreted'):
firwin(41, 0.5, pass_zero=1.)
for pass_zero in ('lowpass', 'highpass'):
with assert_raises(ValueError, match='cutoff must have one'):
firwin(41, [0.5, 0.6], pass_zero=pass_zero)
for pass_zero in ('bandpass', 'bandstop'):
with assert_raises(ValueError, match='must have at least two'):
firwin(41, [0.5], pass_zero=pass_zero)
class TestFirwin2:
def test_invalid_args(self):
# `freq` and `gain` have different lengths.
with assert_raises(ValueError, match='must be of same length'):
firwin2(50, [0, 0.5, 1], [0.0, 1.0])
# `nfreqs` is less than `ntaps`.
with assert_raises(ValueError, match='ntaps must be less than nfreqs'):
firwin2(50, [0, 0.5, 1], [0.0, 1.0, 1.0], nfreqs=33)
# Decreasing value in `freq`
with assert_raises(ValueError, match='must be nondecreasing'):
firwin2(50, [0, 0.5, 0.4, 1.0], [0, .25, .5, 1.0])
# Value in `freq` repeated more than once.
with assert_raises(ValueError, match='must not occur more than twice'):
firwin2(50, [0, .1, .1, .1, 1.0], [0.0, 0.5, 0.75, 1.0, 1.0])
# `freq` does not start at 0.0.
with assert_raises(ValueError, match='start with 0'):
firwin2(50, [0.5, 1.0], [0.0, 1.0])
# `freq` does not end at fs/2.
with assert_raises(ValueError, match='end with fs/2'):
firwin2(50, [0.0, 0.5], [0.0, 1.0])
# Value 0 is repeated in `freq`
with assert_raises(ValueError, match='0 must not be repeated'):
firwin2(50, [0.0, 0.0, 0.5, 1.0], [1.0, 1.0, 0.0, 0.0])
# Value fs/2 is repeated in `freq`
with assert_raises(ValueError, match='fs/2 must not be repeated'):
firwin2(50, [0.0, 0.5, 1.0, 1.0], [1.0, 1.0, 0.0, 0.0])
# Value in `freq` that is too close to a repeated number
with assert_raises(ValueError, match='cannot contain numbers '
'that are too close'):
firwin2(50, [0.0, 0.5 - np.finfo(float).eps * 0.5, 0.5, 0.5, 1.0],
[1.0, 1.0, 1.0, 0.0, 0.0])
# Type II filter, but the gain at nyquist frequency is not zero.
with assert_raises(ValueError, match='Type II filter'):
firwin2(16, [0.0, 0.5, 1.0], [0.0, 1.0, 1.0])
# Type III filter, but the gains at nyquist and zero rate are not zero.
with assert_raises(ValueError, match='Type III filter'):
firwin2(17, [0.0, 0.5, 1.0], [0.0, 1.0, 1.0], antisymmetric=True)
with assert_raises(ValueError, match='Type III filter'):
firwin2(17, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0], antisymmetric=True)
with assert_raises(ValueError, match='Type III filter'):
firwin2(17, [0.0, 0.5, 1.0], [1.0, 1.0, 1.0], antisymmetric=True)
# Type IV filter, but the gain at zero rate is not zero.
with assert_raises(ValueError, match='Type IV filter'):
firwin2(16, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0], antisymmetric=True)
def test01(self):
width = 0.04
beta = 12.0
ntaps = 400
# Filter is 1 from w=0 to w=0.5, then decreases linearly from 1 to 0 as w
# increases from w=0.5 to w=1 (w=1 is the Nyquist frequency).
freq = [0.0, 0.5, 1.0]
gain = [1.0, 1.0, 0.0]
taps = firwin2(ntaps, freq, gain, window=('kaiser', beta))
freq_samples = np.array([0.0, 0.25, 0.5-width/2, 0.5+width/2,
0.75, 1.0-width/2])
freqs, response = freqz(taps, worN=np.pi*freq_samples)
assert_array_almost_equal(np.abs(response),
[1.0, 1.0, 1.0, 1.0-width, 0.5, width], decimal=5)
def test02(self):
width = 0.04
beta = 12.0
# ntaps must be odd for positive gain at Nyquist.
ntaps = 401
# An ideal highpass filter.
freq = [0.0, 0.5, 0.5, 1.0]
gain = [0.0, 0.0, 1.0, 1.0]
taps = firwin2(ntaps, freq, gain, window=('kaiser', beta))
freq_samples = np.array([0.0, 0.25, 0.5-width, 0.5+width, 0.75, 1.0])
freqs, response = freqz(taps, worN=np.pi*freq_samples)
assert_array_almost_equal(np.abs(response),
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0], decimal=5)
def test03(self):
width = 0.02
ntaps, beta = kaiserord(120, width)
# ntaps must be odd for positive gain at Nyquist.
ntaps = int(ntaps) | 1
freq = [0.0, 0.4, 0.4, 0.5, 0.5, 1.0]
gain = [1.0, 1.0, 0.0, 0.0, 1.0, 1.0]
taps = firwin2(ntaps, freq, gain, window=('kaiser', beta))
freq_samples = np.array([0.0, 0.4-width, 0.4+width, 0.45,
0.5-width, 0.5+width, 0.75, 1.0])
freqs, response = freqz(taps, worN=np.pi*freq_samples)
assert_array_almost_equal(np.abs(response),
[1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0], decimal=5)
def test04(self):
"""Test firwin2 when window=None."""
ntaps = 5
# Ideal lowpass: gain is 1 on [0,0.5], and 0 on [0.5, 1.0]
freq = [0.0, 0.5, 0.5, 1.0]
gain = [1.0, 1.0, 0.0, 0.0]
taps = firwin2(ntaps, freq, gain, window=None, nfreqs=8193)
alpha = 0.5 * (ntaps - 1)
m = np.arange(0, ntaps) - alpha
h = 0.5 * sinc(0.5 * m)
assert_array_almost_equal(h, taps)
def test05(self):
"""Test firwin2 for calculating Type IV filters"""
ntaps = 1500
freq = [0.0, 1.0]
gain = [0.0, 1.0]
taps = firwin2(ntaps, freq, gain, window=None, antisymmetric=True)
assert_array_almost_equal(taps[: ntaps // 2], -taps[ntaps // 2:][::-1])
freqs, response = freqz(taps, worN=2048)
assert_array_almost_equal(abs(response), freqs / np.pi, decimal=4)
def test06(self):
"""Test firwin2 for calculating Type III filters"""
ntaps = 1501
freq = [0.0, 0.5, 0.55, 1.0]
gain = [0.0, 0.5, 0.0, 0.0]
taps = firwin2(ntaps, freq, gain, window=None, antisymmetric=True)
assert_equal(taps[ntaps // 2], 0.0)
assert_array_almost_equal(taps[: ntaps // 2], -taps[ntaps // 2 + 1:][::-1])
freqs, response1 = freqz(taps, worN=2048)
response2 = np.interp(freqs / np.pi, freq, gain)
assert_array_almost_equal(abs(response1), response2, decimal=3)
def test_fs_nyq(self):
taps1 = firwin2(80, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
taps2 = firwin2(80, [0.0, 30.0, 60.0], [1.0, 1.0, 0.0], fs=120.0)
assert_array_almost_equal(taps1, taps2)
taps2 = firwin2(80, [0.0, 30.0, 60.0], [1.0, 1.0, 0.0], nyq=60.0)
assert_array_almost_equal(taps1, taps2)
def test_tuple(self):
taps1 = firwin2(150, (0.0, 0.5, 0.5, 1.0), (1.0, 1.0, 0.0, 0.0))
taps2 = firwin2(150, [0.0, 0.5, 0.5, 1.0], [1.0, 1.0, 0.0, 0.0])
assert_array_almost_equal(taps1, taps2)
def test_input_modyfication(self):
freq1 = np.array([0.0, 0.5, 0.5, 1.0])
freq2 = np.array(freq1)
firwin2(80, freq1, [1.0, 1.0, 0.0, 0.0])
assert_equal(freq1, freq2)
class TestRemez:
def test_bad_args(self):
assert_raises(ValueError, remez, 11, [0.1, 0.4], [1], type='pooka')
def test_hilbert(self):
N = 11 # number of taps in the filter
a = 0.1 # width of the transition band
# design an unity gain hilbert bandpass filter from w to 0.5-w
h = remez(11, [a, 0.5-a], [1], type='hilbert')
# make sure the filter has correct # of taps
assert_(len(h) == N, "Number of Taps")
# make sure it is type III (anti-symmetric tap coefficients)
assert_array_almost_equal(h[:(N-1)//2], -h[:-(N-1)//2-1:-1])
# Since the requested response is symmetric, all even coefficients
# should be zero (or in this case really small)
assert_((abs(h[1::2]) < 1e-15).all(), "Even Coefficients Equal Zero")
# now check the frequency response
w, H = freqz(h, 1)
f = w/2/np.pi
Hmag = abs(H)
# should have a zero at 0 and pi (in this case close to zero)
assert_((Hmag[[0, -1]] < 0.02).all(), "Zero at zero and pi")
# check that the pass band is close to unity
idx = np.logical_and(f > a, f < 0.5-a)
assert_((abs(Hmag[idx] - 1) < 0.015).all(), "Pass Band Close To Unity")
def test_compare(self):
# test comparison to MATLAB
k = [0.024590270518440, -0.041314581814658, -0.075943803756711,
-0.003530911231040, 0.193140296954975, 0.373400753484939,
0.373400753484939, 0.193140296954975, -0.003530911231040,
-0.075943803756711, -0.041314581814658, 0.024590270518440]
h = remez(12, [0, 0.3, 0.5, 1], [1, 0], Hz=2.)
assert_allclose(h, k)
h = remez(12, [0, 0.3, 0.5, 1], [1, 0], fs=2.)
assert_allclose(h, k)
h = [-0.038976016082299, 0.018704846485491, -0.014644062687875,
0.002879152556419, 0.016849978528150, -0.043276706138248,
0.073641298245579, -0.103908158578635, 0.129770906801075,
-0.147163447297124, 0.153302248456347, -0.147163447297124,
0.129770906801075, -0.103908158578635, 0.073641298245579,
-0.043276706138248, 0.016849978528150, 0.002879152556419,
-0.014644062687875, 0.018704846485491, -0.038976016082299]
assert_allclose(remez(21, [0, 0.8, 0.9, 1], [0, 1], Hz=2.), h)
assert_allclose(remez(21, [0, 0.8, 0.9, 1], [0, 1], fs=2.), h)
class TestFirls:
def test_bad_args(self):
# even numtaps
assert_raises(ValueError, firls, 10, [0.1, 0.2], [0, 0])
# odd bands
assert_raises(ValueError, firls, 11, [0.1, 0.2, 0.4], [0, 0, 0])
# len(bands) != len(desired)
assert_raises(ValueError, firls, 11, [0.1, 0.2, 0.3, 0.4], [0, 0, 0])
# non-monotonic bands
assert_raises(ValueError, firls, 11, [0.2, 0.1], [0, 0])
assert_raises(ValueError, firls, 11, [0.1, 0.2, 0.3, 0.3], [0] * 4)
assert_raises(ValueError, firls, 11, [0.3, 0.4, 0.1, 0.2], [0] * 4)
assert_raises(ValueError, firls, 11, [0.1, 0.3, 0.2, 0.4], [0] * 4)
# negative desired
assert_raises(ValueError, firls, 11, [0.1, 0.2], [-1, 1])
# len(weight) != len(pairs)
assert_raises(ValueError, firls, 11, [0.1, 0.2], [0, 0], [1, 2])
# negative weight
assert_raises(ValueError, firls, 11, [0.1, 0.2], [0, 0], [-1])
def test_firls(self):
N = 11 # number of taps in the filter
a = 0.1 # width of the transition band
# design a halfband symmetric low-pass filter
h = firls(11, [0, a, 0.5-a, 0.5], [1, 1, 0, 0], fs=1.0)
# make sure the filter has correct # of taps
assert_equal(len(h), N)
# make sure it is symmetric
midx = (N-1) // 2
assert_array_almost_equal(h[:midx], h[:-midx-1:-1])
# make sure the center tap is 0.5
assert_almost_equal(h[midx], 0.5)
# For halfband symmetric, odd coefficients (except the center)
# should be zero (really small)
hodd = np.hstack((h[1:midx:2], h[-midx+1::2]))
assert_array_almost_equal(hodd, 0)
# now check the frequency response
w, H = freqz(h, 1)
f = w/2/np.pi
Hmag = np.abs(H)
# check that the pass band is close to unity
idx = np.logical_and(f > 0, f < a)
assert_array_almost_equal(Hmag[idx], 1, decimal=3)
# check that the stop band is close to zero
idx = np.logical_and(f > 0.5-a, f < 0.5)
assert_array_almost_equal(Hmag[idx], 0, decimal=3)
def test_compare(self):
# compare to OCTAVE output
taps = firls(9, [0, 0.5, 0.55, 1], [1, 1, 0, 0], [1, 2])
# >> taps = firls(8, [0 0.5 0.55 1], [1 1 0 0], [1, 2]);
known_taps = [-6.26930101730182e-04, -1.03354450635036e-01,
-9.81576747564301e-03, 3.17271686090449e-01,
5.11409425599933e-01, 3.17271686090449e-01,
-9.81576747564301e-03, -1.03354450635036e-01,
-6.26930101730182e-04]
assert_allclose(taps, known_taps)
# compare to MATLAB output
taps = firls(11, [0, 0.5, 0.5, 1], [1, 1, 0, 0], [1, 2])
# >> taps = firls(10, [0 0.5 0.5 1], [1 1 0 0], [1, 2]);
known_taps = [
0.058545300496815, -0.014233383714318, -0.104688258464392,
0.012403323025279, 0.317930861136062, 0.488047220029700,
0.317930861136062, 0.012403323025279, -0.104688258464392,
-0.014233383714318, 0.058545300496815]
assert_allclose(taps, known_taps)
# With linear changes:
taps = firls(7, (0, 1, 2, 3, 4, 5), [1, 0, 0, 1, 1, 0], fs=20)
# >> taps = firls(6, [0, 0.1, 0.2, 0.3, 0.4, 0.5], [1, 0, 0, 1, 1, 0])
known_taps = [
1.156090832768218, -4.1385894727395849, 7.5288619164321826,
-8.5530572592947856, 7.5288619164321826, -4.1385894727395849,
1.156090832768218]
assert_allclose(taps, known_taps)
taps = firls(7, (0, 1, 2, 3, 4, 5), [1, 0, 0, 1, 1, 0], nyq=10)
assert_allclose(taps, known_taps)
with pytest.raises(ValueError, match='between 0 and 1'):
firls(7, [0, 1], [0, 1], nyq=0.5)
def test_rank_deficient(self):
# solve() runs but warns (only sometimes, so here we don't use match)
x = firls(21, [0, 0.1, 0.9, 1], [1, 1, 0, 0])
w, h = freqz(x, fs=2.)
assert_allclose(np.abs(h[:2]), 1., atol=1e-5)
assert_allclose(np.abs(h[-2:]), 0., atol=1e-6)
# switch to pinvh (tolerances could be higher with longer
# filters, but using shorter ones is faster computationally and
# the idea is the same)
x = firls(101, [0, 0.01, 0.99, 1], [1, 1, 0, 0])
w, h = freqz(x, fs=2.)
mask = w < 0.01
assert mask.sum() > 3
assert_allclose(np.abs(h[mask]), 1., atol=1e-4)
mask = w > 0.99
assert mask.sum() > 3
assert_allclose(np.abs(h[mask]), 0., atol=1e-4)
class TestMinimumPhase:
def test_bad_args(self):
# not enough taps
assert_raises(ValueError, minimum_phase, [1.])
assert_raises(ValueError, minimum_phase, [1., 1.])
assert_raises(ValueError, minimum_phase, np.full(10, 1j))
assert_raises(ValueError, minimum_phase, 'foo')
assert_raises(ValueError, minimum_phase, np.ones(10), n_fft=8)
assert_raises(ValueError, minimum_phase, np.ones(10), method='foo')
assert_warns(RuntimeWarning, minimum_phase, np.arange(3))
def test_homomorphic(self):
# check that it can recover frequency responses of arbitrary
# linear-phase filters
# for some cases we can get the actual filter back
h = [1, -1]
h_new = minimum_phase(np.convolve(h, h[::-1]))
assert_allclose(h_new, h, rtol=0.05)
# but in general we only guarantee we get the magnitude back
rng = np.random.RandomState(0)
for n in (2, 3, 10, 11, 15, 16, 17, 20, 21, 100, 101):
h = rng.randn(n)
h_new = minimum_phase(np.convolve(h, h[::-1]))
assert_allclose(np.abs(fft(h_new)),
np.abs(fft(h)), rtol=1e-4)
def test_hilbert(self):
# compare to MATLAB output of reference implementation
# f=[0 0.3 0.5 1];
# a=[1 1 0 0];
# h=remez(11,f,a);
h = remez(12, [0, 0.3, 0.5, 1], [1, 0], fs=2.)
k = [0.349585548646686, 0.373552164395447, 0.326082685363438,
0.077152207480935, -0.129943946349364, -0.059355880509749]
m = minimum_phase(h, 'hilbert')
assert_allclose(m, k, rtol=5e-3)
# f=[0 0.8 0.9 1];
# a=[0 0 1 1];
# h=remez(20,f,a);
h = remez(21, [0, 0.8, 0.9, 1], [0, 1], fs=2.)
k = [0.232486803906329, -0.133551833687071, 0.151871456867244,
-0.157957283165866, 0.151739294892963, -0.129293146705090,
0.100787844523204, -0.065832656741252, 0.035361328741024,
-0.014977068692269, -0.158416139047557]
m = minimum_phase(h, 'hilbert', n_fft=2**19)
assert_allclose(m, k, rtol=2e-3)
|
|
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file at
# http://src.chromium.org/viewvc/chrome/trunk/src/LICENSE
# This file is NOT under GPL.
"""Queries buildbot through the json interface.
"""
__author__ = 'maruel@chromium.org'
__version__ = '1.1'
import code
import datetime
import functools
import logging
import optparse
import time
import urllib
import urllib2
import sys
from find_json import json
import natsort
# These values are buildbot constants used for Build and BuildStep.
# This line was copied from master/buildbot/status/builder.py.
SUCCESS, WARNINGS, FAILURE, SKIPPED, EXCEPTION, RETRY = range(6)
## Generic node caching code.
class Node(object):
"""Root class for all nodes in the graph.
Provides base functionality for any node in the graph, independent if it has
children or not or if its content can be addressed through an url or needs to
be fetched as part of another node.
"""
# Mostly for help purposes. Used by __str__().
children = []
def __init__(self, parent, url):
self.children = self.children[:]
if url:
self.children.append('url')
url = url.rstrip('/')
if parent is not None:
self.children.append('parent')
self.url = url
self.parent = parent
def __str__(self):
return self.to_string()
def __repr__(self):
"""Embeds key if present."""
key = getattr(self, 'key', None)
if key is not None:
return '<%s key=%s>' % (self.__class__.__name__, key)
return super(Node, self).__repr__()
def to_string(self, maximum=100):
out = ['%s:' % self.__class__.__name__]
assert not 'children' in self.children
def limit(txt):
txt = str(txt)
if maximum > 0:
if len(txt) > maximum + 2:
txt = txt[:maximum] + '...'
return txt
for k in self.children:
if k == 'parent':
# Avoid infinite recursion.
continue
value = '\n '.join(limit(getattr(self, k)).splitlines())
out.append(' %s: %s' % (k, value))
return '\n'.join(out)
def refresh(self):
"""Refreshes the data."""
self.discard()
return self.cache()
def cache(self): # pragma: no cover
"""Caches the data."""
raise NotImplementedError()
def discard(self): # pragma: no cover
"""Discards cached data.
Pretty much everything is temporary except completed Build.
"""
raise NotImplementedError()
class AddressableBaseDataNode(Node): # pylint: disable=W0223
"""A node that contains a dictionary of data that can be fetched with an url.
The node is directly addressable. It also often can be fetched by the parent.
"""
children = Node.children + ['data', 'cached_data']
def __init__(self, parent, url, data):
super(AddressableBaseDataNode, self).__init__(parent, url)
self._data = data
@property
def cached_data(self):
return self._data
@property
def data(self):
self.cache()
return self._data
def cache(self):
if self._data is None:
self._data = self._readall()
return True
return False
def discard(self):
self._data = None
def read(self, suburl):
assert self.url, self.__class__.__name__
url = self.url
if suburl:
url = '%s/%s' % (self.url, suburl)
return self.parent.read(url)
def _readall(self):
return self.read('')
class AddressableDataNode(AddressableBaseDataNode): # pylint: disable=W0223
"""Automatically encodes the url."""
def __init__(self, parent, url, data):
super(AddressableDataNode, self).__init__(parent, urllib.quote(url), data)
class NonAddressableDataNode(Node): # pylint: disable=W0223
"""A node that cannot be addressed by an unique url.
The data comes directly from the parent.
"""
def __init__(self, parent, subkey):
super(NonAddressableDataNode, self).__init__(parent, None)
self.subkey = subkey
@property
def cached_data(self):
if self.parent.cached_data is None:
return None
return self.parent.cached_data[self.subkey]
@property
def data(self):
return self.parent.data[self.subkey]
def cache(self):
self.parent.cache()
def discard(self): # pragma: no cover
"""Avoid invalid state when parent recreate the object."""
raise AttributeError('Call parent discard() instead')
class VirtualNodeList(Node):
"""Base class for every node that has children.
Adds partial supports for keys and iterator functionality. 'key' can be a
string or a int. Not to be used directly.
"""
children = Node.children + ['keys', 'cached_children']
def __init__(self, parent, url):
super(VirtualNodeList, self).__init__(parent, url)
# Keeps the keys independently when ordering is needed.
self._is_cached = False
self._has_keys_cached = False
def __contains__(self, key):
"""Enables 'if i in obj:'."""
return key in self.keys
def __iter__(self):
"""Enables 'for i in obj:'. It returns children."""
self.cache_keys()
for key in self.keys:
yield self[key]
def __len__(self):
"""Enables 'len(obj)' to get the number of childs."""
return len(self.keys)
def discard(self):
"""Discards data.
The default behavior is to not invalidate cached keys. The only place where
keys need to be invalidated is with Builds.
"""
self._is_cached = False
self._has_keys_cached = False
@property
def cached_children(self): # pragma: no cover
"""Returns an iterator over the children that are cached."""
raise NotImplementedError()
@property
def keys(self): # pragma: no cover
"""Returns the keys for every children."""
raise NotImplementedError()
def __getitem__(self, key): # pragma: no cover
"""Returns a child, without fetching its data.
The children could be invalid since no verification is done.
"""
raise NotImplementedError()
def cache(self): # pragma: no cover
"""Cache all the children."""
raise NotImplementedError()
def cache_keys(self): # pragma: no cover
"""Cache all children's keys."""
raise NotImplementedError()
class NodeList(VirtualNodeList): # pylint: disable=W0223
"""Adds a cache of the keys."""
def __init__(self, parent, url):
super(NodeList, self).__init__(parent, url)
self._keys = []
@property
def keys(self):
self.cache_keys()
return self._keys
class NonAddressableNodeList(VirtualNodeList): # pylint: disable=W0223
"""A node that contains children but retrieves all its data from its parent.
I.e. there's no url to get directly this data.
"""
# Child class object for children of this instance. For example, BuildSteps
# has BuildStep children.
_child_cls = None
def __init__(self, parent, subkey):
super(NonAddressableNodeList, self).__init__(parent, None)
self.subkey = subkey
assert (
not isinstance(self._child_cls, NonAddressableDataNode) and
issubclass(self._child_cls, NonAddressableDataNode)), (
self._child_cls.__name__)
@property
def cached_children(self):
if self.parent.cached_data is not None:
for i in xrange(len(self.parent.cached_data[self.subkey])):
yield self[i]
@property
def cached_data(self):
if self.parent.cached_data is None:
return None
return self.parent.data.get(self.subkey, None)
@property
def data(self):
return self.parent.data[self.subkey]
@property
def keys(self): # pragma: no cover
raise NotImplementedError()
def cache(self):
self.parent.cache()
def cache_keys(self):
self.parent.cache()
def discard(self): # pragma: no cover
"""Avoid infinite recursion by having the caller calls the parent's
discard() explicitely.
"""
raise AttributeError('Call parent discard() instead')
def __iter__(self):
"""Enables 'for i in obj:'. It returns children."""
if self.data:
for i in xrange(len(self.data)):
yield self[i]
def __getitem__(self, key):
"""Doesn't cache the value, it's not needed.
TODO(maruel): Cache?
"""
if isinstance(key, int) and key < 0:
key = len(self.data) + key
# pylint: disable=E1102
return self._child_cls(self, key)
class AddressableNodeList(NodeList):
"""A node that has children that can be addressed with an url."""
# Child class object for children of this instance. For example, Builders has
# Builder children and Builds has Build children.
_child_cls = None
def __init__(self, parent, url):
super(AddressableNodeList, self).__init__(parent, url)
self._cache = {}
assert (
not isinstance(self._child_cls, AddressableDataNode) and
issubclass(self._child_cls, AddressableDataNode)), (
self._child_cls.__name__)
@property
def cached_children(self):
for item in self._cache.itervalues():
if item.cached_data is not None:
yield item
def __getitem__(self, key):
"""Enables 'obj[i]'."""
if self._has_keys_cached and not key in self._keys:
raise KeyError(key)
if not key in self._cache:
# Create an empty object.
self._create_obj(key, None)
return self._cache[key]
def cache(self):
if not self._is_cached:
data = self._readall()
for key in sorted(data):
self._create_obj(key, data[key])
self._is_cached = True
self._has_keys_cached = True
def cache_partial(self, children):
"""Caches a partial number of children.
This method is more efficient since it does a single request for all the
children instead of one request per children.
It only grab objects not already cached.
"""
# pylint: disable=W0212
if not self._is_cached:
to_fetch = [
child for child in children
if not (child in self._cache and self._cache[child].cached_data)
]
if to_fetch:
# Similar to cache(). The only reason to sort is to simplify testing.
params = '&'.join(
'select=%s' % urllib.quote(str(v)) for v in sorted(to_fetch))
data = self.read('?' + params)
for key in sorted(data):
self._create_obj(key, data[key])
def cache_keys(self):
"""Implement to speed up enumeration. Defaults to call cache()."""
if not self._has_keys_cached:
self.cache()
assert self._has_keys_cached
def discard(self):
"""Discards temporary children."""
super(AddressableNodeList, self).discard()
for v in self._cache.itervalues():
v.discard()
def read(self, suburl):
assert self.url, self.__class__.__name__
url = self.url
if suburl:
url = '%s/%s' % (self.url, suburl)
return self.parent.read(url)
def _create_obj(self, key, data):
"""Creates an object of type self._child_cls."""
# pylint: disable=E1102
obj = self._child_cls(self, key, data)
# obj.key and key may be different.
# No need to overide cached data with None.
if data is not None or obj.key not in self._cache:
self._cache[obj.key] = obj
if obj.key not in self._keys:
self._keys.append(obj.key)
def _readall(self):
return self.read('')
class SubViewNodeList(VirtualNodeList): # pylint: disable=W0223
"""A node that shows a subset of children that comes from another structure.
The node is not addressable.
E.g. the keys are retrieved from parent but the actual data comes from
virtual_parent.
"""
def __init__(self, parent, virtual_parent, subkey):
super(SubViewNodeList, self).__init__(parent, None)
self.subkey = subkey
self.virtual_parent = virtual_parent
assert isinstance(self.parent, AddressableDataNode)
assert isinstance(self.virtual_parent, NodeList)
@property
def cached_children(self):
if self.parent.cached_data is not None:
for item in self.keys:
if item in self.virtual_parent.keys:
child = self[item]
if child.cached_data is not None:
yield child
@property
def keys(self):
self.cache_keys()
return self.parent.data.get(self.subkey, [])
def cache(self):
"""Batch request for each child in a single read request."""
if not self._is_cached:
self.virtual_parent.cache_partial(self.keys)
self._is_cached = True
def cache_keys(self):
if not self._has_keys_cached:
self.parent.cache()
self._has_keys_cached = True
def discard(self):
if self.parent.cached_data is not None:
for child in self.virtual_parent.cached_children:
if child.key in self.keys:
child.discard()
self.parent.discard()
super(SubViewNodeList, self).discard()
def __getitem__(self, key):
"""Makes sure the key is in our key but grab it from the virtual parent."""
return self.virtual_parent[key]
def __iter__(self):
self.cache()
return super(SubViewNodeList, self).__iter__()
###############################################################################
## Buildbot-specific code
class Slave(AddressableDataNode):
children = AddressableDataNode.children + [
'name', 'key', 'connected', 'version']
def __init__(self, parent, name, data):
super(Slave, self).__init__(parent, name, data)
self.name = name
self.key = self.name
# TODO(maruel): Add SlaveBuilders and a 'builders' property.
# TODO(maruel): Add a 'running_builds' property.
@property
def connected(self):
return self.data['connected']
@property
def version(self):
return self.data['version']
class Slaves(AddressableNodeList):
_child_cls = Slave
children = AddressableNodeList.children + ['names']
def __init__(self, parent):
super(Slaves, self).__init__(parent, 'slaves')
self.children.append('names')
@property
def names(self):
return self.keys
class BuilderSlaves(SubViewNodeList):
"""Similar to Slaves but only list slaves connected to a specific builder.
"""
children = SubViewNodeList.children + ['names']
def __init__(self, parent):
super(BuilderSlaves, self).__init__(
parent, parent.parent.parent.slaves, 'slaves')
@property
def names(self):
return self.keys
class BuildStep(NonAddressableDataNode):
children = NonAddressableDataNode.children + [
'name', 'number', 'result', 'simplified_result']
def __init__(self, parent, number):
"""It's already pre-loaded by definition since the data is retrieve via the
Build object.
"""
assert isinstance(number, int)
super(BuildStep, self).__init__(parent, number)
self.number = number
@property
def name(self):
return self.data['name']
@property
def result(self):
result = self.data.get('results')
if result is None:
# results may be 0, in that case with filter=1, the value won't be
# present.
if self.data.get('isFinished'):
result = self.data.get('results', 0)
while isinstance(result, list):
result = result[0]
return result
@property
def simplified_result(self):
"""Returns a simplified 3 state value, True, False or None."""
result = self.result
if result in (SUCCESS, WARNINGS, SKIPPED):
return True
elif result in (FAILURE, EXCEPTION):
return False
assert result is None
return None
class BuildSteps(NonAddressableNodeList):
"""Duplicates keys to support lookup by both step number and step name."""
children = NonAddressableNodeList.children + ['failed']
_child_cls = BuildStep
def __init__(self, parent):
"""It's already pre-loaded by definition since the data is retrieve via the
Build object.
"""
super(BuildSteps, self).__init__(parent, 'steps')
@property
def keys(self):
"""Returns the indexes of the steps.
It could return the build names but then it wouldn't be ordered."""
return range(len(self.data or []))
@property
def failed(self):
"""Shortcuts that lists the step names of steps that failed."""
return [step.name for step in self if step.simplified_result is False]
def __getitem__(self, key):
"""Accept step name in addition to index number."""
if isinstance(key, basestring):
# It's a string, try to find the corresponding index.
for i, step in enumerate(self.data):
if step['name'] == key:
key = i
break
else:
raise KeyError(key)
return super(BuildSteps, self).__getitem__(key)
class Build(AddressableDataNode):
children = AddressableDataNode.children + [
'key', 'number', 'steps', 'blame', 'reason', 'revision', 'result',
'simplified_result', 'slave', 'properties']
def __init__(self, parent, key, data):
super(Build, self).__init__(parent, str(key), data)
self.number = int(key)
self.key = self.number
self.steps = BuildSteps(self)
@property
def blame(self):
return self.data.get('blame')
@property
def builder(self):
"""Returns the Builder object.
Goes up the hierarchy to find the Buildbot.builders[builder] instance.
"""
return self.parent.parent.parent.parent.builders[self.data['builderName']]
@property
def eta(self):
return self.data.get('eta', 0)
@property
def properties(self):
return self.data['properties']
@property
def reason(self):
return self.data.get('reason')
@property
def result(self):
result = self.data.get('results')
while isinstance(result, list):
result = result[0]
if result is None and self.steps:
# results may be 0, in that case with filter=1, the value won't be
# present.
result = self.steps[-1].result
return result
@property
def revision(self):
return self.data['sourceStamp'].get('revision')
@property
def simplified_result(self):
"""Returns a simplified 3 state value, True, False or None."""
result = self.result
if result in (SUCCESS, WARNINGS, SKIPPED):
return True
elif result in (FAILURE, EXCEPTION):
return False
assert result is None
return None
@property
def slave(self):
"""Returns the Slave object.
Goes up the hierarchy to find the Buildbot.slaves[slave] instance.
"""
return self.parent.parent.parent.parent.slaves[self.data['slave']]
def discard(self):
"""Completed Build isn't discarded."""
if self._data and self.result is None:
assert not self.steps or not self.steps[-1].data.get('isFinished')
self._data = None
class CurrentBuilds(SubViewNodeList):
"""Lists of the current builds."""
def __init__(self, parent):
super(CurrentBuilds, self).__init__(
parent, parent.builds, 'currentBuilds')
class PendingBuilds(AddressableDataNode):
def __init__(self, parent):
super(PendingBuilds, self).__init__(parent, 'pendingBuilds', None)
class Builds(AddressableNodeList):
"""Supports iteration.
Recommends using .cache() to speed up if a significant number of builds are
iterated over.
"""
_child_cls = Build
def __init__(self, parent):
super(Builds, self).__init__(parent, 'builds')
def __getitem__(self, key):
"""Adds supports for negative reference.
e.g. -1 is the last build, -2 is the previous build before the last one.
"""
key = int(key)
if key < 0:
# Convert negative to positive build number.
self.cache_keys()
key = self._keys[key]
return super(Builds, self).__getitem__(key)
def __iter__(self):
"""Returns in reversed order.
The most recent build is returned first and then in reverse chronological
order, up to the oldest cached build by the server. Older builds can be
accessed but will trigger significantly more I/O so they are not included by
default in the iteration.
"""
self.cache()
return reversed(self._cache.values())
def cache_keys(self):
"""Grabs the keys (build numbers) from the builder."""
if not self._has_keys_cached:
for i in self.parent.data['cachedBuilds']:
i = int(i)
self._cache.setdefault(i, Build(self, i, None))
if i not in self._keys:
self._keys.append(i)
self._has_keys_cached = True
def discard(self):
super(Builds, self).discard()
# Can't keep keys.
self._has_keys_cached = False
def _readall(self):
return self.read('_all')
class Builder(AddressableDataNode):
children = AddressableDataNode.children + [
'name', 'key', 'builds', 'slaves', 'pending_builds', 'current_builds']
def __init__(self, parent, name, data):
super(Builder, self).__init__(parent, name, data)
self.name = name
self.key = name
self.builds = Builds(self)
self.slaves = BuilderSlaves(self)
self.current_builds = CurrentBuilds(self)
self.pending_builds = PendingBuilds(self)
def discard(self):
super(Builder, self).discard()
self.builds.discard()
self.slaves.discard()
self.current_builds.discard()
class Builders(AddressableNodeList):
"""Root list of builders."""
_child_cls = Builder
def __init__(self, parent):
super(Builders, self).__init__(parent, 'builders')
class Buildbot(AddressableBaseDataNode):
"""If a master restart occurs, this object should be recreated as it caches
data.
"""
# Throttle fetches to not kill the server.
auto_throttle = None
children = AddressableDataNode.children + [
'slaves', 'builders', 'last_fetch']
def __init__(self, url):
super(Buildbot, self).__init__(None, url.rstrip('/') + '/json', None)
self._builders = Builders(self)
self._slaves = Slaves(self)
self.last_fetch = None
@property
def builders(self):
return self._builders
@property
def slaves(self):
return self._slaves
def discard(self):
"""Discards information about Builders and Slaves."""
super(Buildbot, self).discard()
self._builders.discard()
self._slaves.discard()
def read(self, suburl):
if self.auto_throttle:
if self.last_fetch:
delta = datetime.datetime.utcnow() - self.last_fetch
remaining = (datetime.timedelta(seconds=self.auto_throttle) -
delta)
if remaining > datetime.timedelta(seconds=0):
logging.debug('Sleeping for %ss' % remaining)
time.sleep(remaining.seconds)
self.last_fetch = datetime.datetime.utcnow()
url = '%s/%s' % (self.url, suburl)
if '?' in url:
url += '&filter=1'
else:
url += '?filter=1'
logging.info('read(%s)' % suburl)
channel = urllib.urlopen(url)
try:
return json.load(channel)
except ValueError, e:
if '<head><title>404 - No Such Resource</title></head>' in e.doc:
raise urllib2.HTTPError(
url, 404, '%s:\n%s' % (url, e.doc), channel.headers, None)
raise
def _readall(self):
return self.read('project')
###############################################################################
## Controller code
def usage(more):
def hook(fn):
fn.func_usage_more = more
return fn
return hook
def need_buildbot(fn):
"""Post-parse args to create a buildbot object."""
@functools.wraps(fn)
def hook(parser, args, *extra_args, **kwargs):
old_parse_args = parser.parse_args
def new_parse_args(args):
options, args = old_parse_args(args)
if len(args) < 1:
parser.error('Need to pass the root url of the buildbot')
url = args.pop(0)
if not url.startswith('http'):
url = 'http://' + url
buildbot = Buildbot(url)
buildbot.auto_throttle = options.throttle
return options, args, buildbot
parser.parse_args = new_parse_args
# Call the original function with the modified parser.
return fn(parser, args, *extra_args, **kwargs)
hook.func_usage_more = '[options] <url>'
return hook
@need_buildbot
def CMDpending(parser, args):
"""Lists pending jobs."""
parser.add_option(
'-b', '--builder', dest='builders', action='append', default=[],
help='Builders to filter on')
options, args, buildbot = parser.parse_args(args)
if args:
parser.error('Unrecognized parameters: %s' % ' '.join(args))
if not options.builders:
options.builders = buildbot.builders.keys
for builder in options.builders:
builder = buildbot.builders[builder]
pending_builds = builder.data.get('pendingBuilds', 0)
if not pending_builds:
continue
print 'Builder %s: %d' % (builder.name, pending_builds)
if not options.quiet:
for pending in builder.pending_builds.data:
if 'revision' in pending['source']:
print ' revision: %s' % pending['source']['revision']
for change in pending['source']['changes']:
print ' change:'
print ' comment: %r' % change['comments'][:50]
print ' who: %s' % change['who']
return 0
@usage('[options] <url> [commands] ...')
@need_buildbot
def CMDrun(parser, args):
"""Runs commands passed as parameters.
When passing commands on the command line, each command will be run as if it
was on its own line.
"""
parser.add_option('-f', '--file', help='Read script from file')
parser.add_option(
'-i', dest='use_stdin', action='store_true', help='Read script on stdin')
# Variable 'buildbot' is not used directly.
# pylint: disable=W0612
options, args, buildbot = parser.parse_args(args)
if (bool(args) + bool(options.use_stdin) + bool(options.file)) != 1:
parser.error('Need to pass only one of: <commands>, -f <file> or -i')
if options.use_stdin:
cmds = sys.stdin.read()
elif options.file:
cmds = open(options.file).read()
else:
cmds = '\n'.join(args)
compiled = compile(cmds, '<cmd line>', 'exec')
eval(compiled, globals(), locals())
return 0
@need_buildbot
def CMDinteractive(parser, args):
"""Runs an interactive shell to run queries."""
_, args, buildbot = parser.parse_args(args)
if args:
parser.error('Unrecognized parameters: %s' % ' '.join(args))
prompt = (
'Buildbot interactive console for "%s".\n'
'Hint: Start with typing: \'buildbot.children\' or '
'\'print str(buildbot)\' to explore.') % buildbot.url[:-len('/json')]
local_vars = {
'buildbot': buildbot,
'b': buildbot,
'natsort': natsort,
}
code.interact(prompt, None, local_vars)
@need_buildbot
def CMDidle(parser, args):
"""Lists idle slaves."""
return find_idle_busy_slaves(parser, args, True)
@need_buildbot
def CMDbusy(parser, args):
"""Lists idle slaves."""
return find_idle_busy_slaves(parser, args, False)
def find_idle_busy_slaves(parser, args, show_idle):
parser.add_option(
'-b', '--builder', dest='builders', action='append', default=[],
help='Builders to filter on')
parser.add_option(
'-s', '--slave', dest='slaves', action='append', default=[],
help='Slaves to filter on')
options, args, buildbot = parser.parse_args(args)
if args:
parser.error('Unrecognized parameters: %s' % ' '.join(args))
if not options.builders:
options.builders = buildbot.builders.keys
for builder in options.builders:
builder = buildbot.builders[builder]
if options.slaves:
# Only the subset of slaves connected to the builder.
slaves = list(set(options.slaves).intersection(set(builder.slaves.names)))
if not slaves:
continue
else:
slaves = builder.slaves.names
busy_slaves = [build.slave.name for build in builder.current_builds]
if show_idle:
slaves = natsort.natsorted(set(slaves) - set(busy_slaves))
else:
slaves = natsort.natsorted(set(slaves) & set(busy_slaves))
if options.quiet:
for slave in slaves:
print slave
else:
if slaves:
print 'Builder %s: %s' % (builder.name, ', '.join(slaves))
return 0
def last_failure(buildbot, builders=None, slaves=None, steps=None,
result=FAILURE, no_cache=False):
"""Generator returning Build object that were the last failure with the
specific filters.
"""
builders = builders or buildbot.builders.keys
for builder in builders:
builder = buildbot.builders[builder]
if slaves:
# Only the subset of slaves connected to the builder.
builder_slaves = list(set(slaves).intersection(set(builder.slaves.names)))
if not builder_slaves:
continue
else:
builder_slaves = builder.slaves.names
if not no_cache and len(builder.slaves) > 2:
# Unless you just want the last few builds, it's often faster to
# fetch the whole thing at once, at the cost of a small hickup on
# the buildbot.
# TODO(maruel): Cache only N last builds or all builds since
# datetime.
builder.builds.cache()
found = []
for build in builder.builds:
if build.slave.name not in builder_slaves or build.slave.name in found:
continue
found.append(build.slave.name)
if steps:
if any(build.steps[step].result == result for step in steps):
yield build
elif result is None or build.result == result:
yield build
if len(found) == len(slaves):
# Found all the slaves, quit.
break
@need_buildbot
def CMDlast_failure(parser, args):
"""Lists all slaves that failed on that step on their last build.
Example: to find all slaves where their last build was a compile failure,
run with --step compile"""
parser.add_option(
'-S', '--step', dest='steps', action='append', default=[],
help='List all slaves that failed on that step on their last build')
parser.add_option(
'-r', '--result', type='int', default=FAILURE,
help='Build result to filter on')
parser.add_option(
'-b', '--builder', dest='builders', action='append', default=[],
help='Builders to filter on')
parser.add_option(
'-s', '--slave', dest='slaves', action='append', default=[],
help='Slaves to filter on')
parser.add_option(
'-n', '--no_cache', action='store_true',
help='Don\'t load all builds at once')
options, args, buildbot = parser.parse_args(args)
if args:
parser.error('Unrecognized parameters: %s' % ' '.join(args))
if options.steps and options.result is None:
options.result = 2
print_builders = not options.quiet and len(options.builders) != 1
last_builder = None
for build in last_failure(buildbot, builders=options.builders,
slaves=options.slaves, steps=options.steps, result=options.result,
no_cache=options.no_cache):
if print_builders and last_builder != build.builder:
print build.builder.name
last_builder = build.builder
if options.quiet:
if options.slaves:
print '%s: %s' % (build.builder.name, build.slave.name)
else:
print build.slave.name
else:
out = '%d on %s: result:%s blame:%s' % (
build.number, build.slave.name, build.result,
', '.join(build.blame))
if print_builders:
out = ' ' + out
print out
if len(options.steps) != 1:
for step in build.steps:
if step.result not in (0, None):
out = ' %s: r=%s %s' % (
step.data['name'], step.result,
', '.join(step.data['text'])[:40])
if print_builders:
out = ' ' + out
print out
return 0
@need_buildbot
def CMDcurrent(parser, args):
"""Lists current jobs."""
parser.add_option(
'-b', '--builder', dest='builders', action='append', default=[],
help='Builders to filter on')
options, args, buildbot = parser.parse_args(args)
if args:
parser.error('Unrecognized parameters: %s' % ' '.join(args))
if not options.builders:
options.builders = buildbot.builders.keys
for builder in options.builders:
builder = buildbot.builders[builder]
if not options.quiet and builder.current_builds:
print builder.name
for build in builder.current_builds:
if options.quiet:
print build.slave.name
else:
out = '%d: slave=%s' % (build.number, build.slave.name)
if build.eta:
out += ' eta=%.0f' % build.eta
if build.blame:
out += ' blame=' + ', '.join(build.blame)
print out
return 0
def gen_parser():
"""Returns an OptionParser instance with default options.
It should be then processed with gen_usage() before being used.
"""
parser = optparse.OptionParser(
version=__version__)
# Remove description formatting
parser.format_description = lambda x: parser.description
# Add common parsing.
old_parser_args = parser.parse_args
def Parse(*args, **kwargs):
options, args = old_parser_args(*args, **kwargs)
if options.verbose >= 2:
logging.basicConfig(level=logging.DEBUG)
elif options.verbose:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.WARNING)
return options, args
parser.parse_args = Parse
parser.add_option(
'-v', '--verbose', action='count',
help='Use multiple times to increase logging leve')
parser.add_option(
'-q', '--quiet', action='store_true',
help='Reduces the output to be parsed by scripts, independent of -v')
parser.add_option(
'--throttle', type='float',
help='Minimum delay to sleep between requests')
return parser
###############################################################################
## Generic subcommand handling code
def Command(name):
return getattr(sys.modules[__name__], 'CMD' + name, None)
@usage('<command>')
def CMDhelp(parser, args):
"""Print list of commands or use 'help <command>'."""
_, args = parser.parse_args(args)
if len(args) == 1:
return main(args + ['--help'])
parser.print_help()
return 0
def gen_usage(parser, command):
"""Modifies an OptionParser object with the command's documentation.
The documentation is taken from the function's docstring.
"""
obj = Command(command)
more = getattr(obj, 'func_usage_more')
# OptParser.description prefer nicely non-formatted strings.
parser.description = obj.__doc__ + '\n'
parser.set_usage('usage: %%prog %s %s' % (command, more))
def main(args=None):
# Do it late so all commands are listed.
# pylint: disable=E1101
CMDhelp.__doc__ += '\n\nCommands are:\n' + '\n'.join(
' %-12s %s' % (fn[3:], Command(fn[3:]).__doc__.split('\n', 1)[0])
for fn in dir(sys.modules[__name__]) if fn.startswith('CMD'))
parser = gen_parser()
if args is None:
args = sys.argv[1:]
if args:
command = Command(args[0])
if command:
# "fix" the usage and the description now that we know the subcommand.
gen_usage(parser, args[0])
return command(parser, args[1:])
# Not a known command. Default to help.
gen_usage(parser, 'help')
return CMDhelp(parser, args)
if __name__ == '__main__':
sys.exit(main())
|
|
"""
Support for interfacing with the XBMC/Kodi JSON-RPC API.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.kodi/
"""
import asyncio
from collections import OrderedDict
from functools import wraps
import logging
import re
import socket
import urllib
import aiohttp
import voluptuous as vol
from homeassistant.components.media_player import (
DOMAIN, MEDIA_PLAYER_SCHEMA, MEDIA_TYPE_CHANNEL, MEDIA_TYPE_MOVIE,
MEDIA_TYPE_MUSIC, MEDIA_TYPE_PLAYLIST, MEDIA_TYPE_TVSHOW, MEDIA_TYPE_VIDEO,
PLATFORM_SCHEMA, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA, SUPPORT_PREVIOUS_TRACK, SUPPORT_SEEK,
SUPPORT_SHUFFLE_SET, SUPPORT_STOP, SUPPORT_TURN_OFF, SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, SUPPORT_VOLUME_STEP,
MediaPlayerDevice)
from homeassistant.const import (
CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_PORT, CONF_PROXY_SSL,
CONF_TIMEOUT, CONF_USERNAME, EVENT_HOMEASSISTANT_STOP, STATE_IDLE,
STATE_OFF, STATE_PAUSED, STATE_PLAYING)
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers import script
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.template import Template
from homeassistant.util.yaml import dump
import homeassistant.util.dt as dt_util
REQUIREMENTS = ['jsonrpc-async==0.6', 'jsonrpc-websocket==0.6']
_LOGGER = logging.getLogger(__name__)
EVENT_KODI_CALL_METHOD_RESULT = 'kodi_call_method_result'
CONF_TCP_PORT = 'tcp_port'
CONF_TURN_ON_ACTION = 'turn_on_action'
CONF_TURN_OFF_ACTION = 'turn_off_action'
CONF_ENABLE_WEBSOCKET = 'enable_websocket'
DEFAULT_NAME = 'Kodi'
DEFAULT_PORT = 8080
DEFAULT_TCP_PORT = 9090
DEFAULT_TIMEOUT = 5
DEFAULT_PROXY_SSL = False
DEFAULT_ENABLE_WEBSOCKET = True
DEPRECATED_TURN_OFF_ACTIONS = {
None: None,
'quit': 'Application.Quit',
'hibernate': 'System.Hibernate',
'suspend': 'System.Suspend',
'reboot': 'System.Reboot',
'shutdown': 'System.Shutdown'
}
# https://github.com/xbmc/xbmc/blob/master/xbmc/media/MediaType.h
MEDIA_TYPES = {
'music': MEDIA_TYPE_MUSIC,
'artist': MEDIA_TYPE_MUSIC,
'album': MEDIA_TYPE_MUSIC,
'song': MEDIA_TYPE_MUSIC,
'video': MEDIA_TYPE_VIDEO,
'set': MEDIA_TYPE_PLAYLIST,
'musicvideo': MEDIA_TYPE_VIDEO,
'movie': MEDIA_TYPE_MOVIE,
'tvshow': MEDIA_TYPE_TVSHOW,
'season': MEDIA_TYPE_TVSHOW,
'episode': MEDIA_TYPE_TVSHOW,
# Type 'channel' is used for radio or tv streams from pvr
'channel': MEDIA_TYPE_CHANNEL,
# Type 'audio' is used for audio media, that Kodi couldn't scroblle
'audio': MEDIA_TYPE_MUSIC,
}
SUPPORT_KODI = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \
SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | SUPPORT_SEEK | \
SUPPORT_PLAY_MEDIA | SUPPORT_STOP | SUPPORT_SHUFFLE_SET | \
SUPPORT_PLAY | SUPPORT_VOLUME_STEP
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_TCP_PORT, default=DEFAULT_TCP_PORT): cv.port,
vol.Optional(CONF_PROXY_SSL, default=DEFAULT_PROXY_SSL): cv.boolean,
vol.Optional(CONF_TURN_ON_ACTION): cv.SCRIPT_SCHEMA,
vol.Optional(CONF_TURN_OFF_ACTION):
vol.Any(cv.SCRIPT_SCHEMA, vol.In(DEPRECATED_TURN_OFF_ACTIONS)),
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Inclusive(CONF_USERNAME, 'auth'): cv.string,
vol.Inclusive(CONF_PASSWORD, 'auth'): cv.string,
vol.Optional(CONF_ENABLE_WEBSOCKET, default=DEFAULT_ENABLE_WEBSOCKET):
cv.boolean,
})
SERVICE_ADD_MEDIA = 'kodi_add_to_playlist'
SERVICE_CALL_METHOD = 'kodi_call_method'
DATA_KODI = 'kodi'
ATTR_MEDIA_TYPE = 'media_type'
ATTR_MEDIA_NAME = 'media_name'
ATTR_MEDIA_ARTIST_NAME = 'artist_name'
ATTR_MEDIA_ID = 'media_id'
ATTR_METHOD = 'method'
MEDIA_PLAYER_ADD_MEDIA_SCHEMA = MEDIA_PLAYER_SCHEMA.extend({
vol.Required(ATTR_MEDIA_TYPE): cv.string,
vol.Optional(ATTR_MEDIA_ID): cv.string,
vol.Optional(ATTR_MEDIA_NAME): cv.string,
vol.Optional(ATTR_MEDIA_ARTIST_NAME): cv.string,
})
MEDIA_PLAYER_CALL_METHOD_SCHEMA = MEDIA_PLAYER_SCHEMA.extend({
vol.Required(ATTR_METHOD): cv.string,
}, extra=vol.ALLOW_EXTRA)
SERVICE_TO_METHOD = {
SERVICE_ADD_MEDIA: {
'method': 'async_add_media_to_playlist',
'schema': MEDIA_PLAYER_ADD_MEDIA_SCHEMA},
SERVICE_CALL_METHOD: {
'method': 'async_call_method',
'schema': MEDIA_PLAYER_CALL_METHOD_SCHEMA},
}
def _check_deprecated_turn_off(hass, turn_off_action):
"""Create an equivalent script for old turn off actions."""
if isinstance(turn_off_action, str):
method = DEPRECATED_TURN_OFF_ACTIONS[turn_off_action]
new_config = OrderedDict(
[('service', '{}.{}'.format(DOMAIN, SERVICE_CALL_METHOD)),
('data_template', OrderedDict(
[('entity_id', '{{ entity_id }}'),
('method', method)]))])
example_conf = dump(OrderedDict(
[(CONF_TURN_OFF_ACTION, new_config)]))
_LOGGER.warning(
"The '%s' action for turn off Kodi is deprecated and "
"will cease to function in a future release. You need to "
"change it for a generic Home Assistant script sequence, "
"which is, for this turn_off action, like this:\n%s",
turn_off_action, example_conf)
new_config['data_template'] = OrderedDict(
[(key, Template(value, hass))
for key, value in new_config['data_template'].items()])
turn_off_action = [new_config]
return turn_off_action
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up the Kodi platform."""
if DATA_KODI not in hass.data:
hass.data[DATA_KODI] = dict()
unique_id = None
# Is this a manual configuration?
if discovery_info is None:
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
tcp_port = config.get(CONF_TCP_PORT)
encryption = config.get(CONF_PROXY_SSL)
websocket = config.get(CONF_ENABLE_WEBSOCKET)
else:
name = "{} ({})".format(DEFAULT_NAME, discovery_info.get('hostname'))
host = discovery_info.get('host')
port = discovery_info.get('port')
tcp_port = DEFAULT_TCP_PORT
encryption = DEFAULT_PROXY_SSL
websocket = DEFAULT_ENABLE_WEBSOCKET
properties = discovery_info.get('properties')
if properties is not None:
unique_id = properties.get('uuid', None)
# Only add a device once, so discovered devices do not override manual
# config.
ip_addr = socket.gethostbyname(host)
if ip_addr in hass.data[DATA_KODI]:
return
# If we got an unique id, check that it does not exist already.
# This is necessary as netdisco does not deterministally return the same
# advertisement when the service is offered over multiple IP addresses.
if unique_id is not None:
for device in hass.data[DATA_KODI].values():
if device.unique_id == unique_id:
return
entity = KodiDevice(
hass,
name=name,
host=host, port=port, tcp_port=tcp_port, encryption=encryption,
username=config.get(CONF_USERNAME),
password=config.get(CONF_PASSWORD),
turn_on_action=config.get(CONF_TURN_ON_ACTION),
turn_off_action=config.get(CONF_TURN_OFF_ACTION),
timeout=config.get(CONF_TIMEOUT), websocket=websocket,
unique_id=unique_id)
hass.data[DATA_KODI][ip_addr] = entity
async_add_entities([entity], update_before_add=True)
async def async_service_handler(service):
"""Map services to methods on MediaPlayerDevice."""
method = SERVICE_TO_METHOD.get(service.service)
if not method:
return
params = {key: value for key, value in service.data.items()
if key != 'entity_id'}
entity_ids = service.data.get('entity_id')
if entity_ids:
target_players = [player
for player in hass.data[DATA_KODI].values()
if player.entity_id in entity_ids]
else:
target_players = hass.data[DATA_KODI].values()
update_tasks = []
for player in target_players:
await getattr(player, method['method'])(**params)
for player in target_players:
if player.should_poll:
update_coro = player.async_update_ha_state(True)
update_tasks.append(update_coro)
if update_tasks:
await asyncio.wait(update_tasks, loop=hass.loop)
if hass.services.has_service(DOMAIN, SERVICE_ADD_MEDIA):
return
for service in SERVICE_TO_METHOD:
schema = SERVICE_TO_METHOD[service]['schema']
hass.services.async_register(
DOMAIN, service, async_service_handler,
schema=schema)
def cmd(func):
"""Catch command exceptions."""
@wraps(func)
async def wrapper(obj, *args, **kwargs):
"""Wrap all command methods."""
import jsonrpc_base
try:
await func(obj, *args, **kwargs)
except jsonrpc_base.jsonrpc.TransportError as exc:
# If Kodi is off, we expect calls to fail.
if obj.state == STATE_OFF:
log_function = _LOGGER.info
else:
log_function = _LOGGER.error
log_function("Error calling %s on entity %s: %r",
func.__name__, obj.entity_id, exc)
return wrapper
class KodiDevice(MediaPlayerDevice):
"""Representation of a XBMC/Kodi device."""
def __init__(self, hass, name, host, port, tcp_port, encryption=False,
username=None, password=None,
turn_on_action=None, turn_off_action=None,
timeout=DEFAULT_TIMEOUT, websocket=True,
unique_id=None):
"""Initialize the Kodi device."""
import jsonrpc_async
import jsonrpc_websocket
self.hass = hass
self._name = name
self._unique_id = unique_id
self._media_position_updated_at = None
self._media_position = None
kwargs = {
'timeout': timeout,
'session': async_get_clientsession(hass),
}
if username is not None:
kwargs['auth'] = aiohttp.BasicAuth(username, password)
image_auth_string = "{}:{}@".format(username, password)
else:
image_auth_string = ""
http_protocol = 'https' if encryption else 'http'
ws_protocol = 'wss' if encryption else 'ws'
self._http_url = '{}://{}:{}/jsonrpc'.format(http_protocol, host, port)
self._image_url = '{}://{}{}:{}/image'.format(
http_protocol, image_auth_string, host, port)
self._ws_url = '{}://{}:{}/jsonrpc'.format(ws_protocol, host, tcp_port)
self._http_server = jsonrpc_async.Server(self._http_url, **kwargs)
if websocket:
# Setup websocket connection
self._ws_server = jsonrpc_websocket.Server(self._ws_url, **kwargs)
# Register notification listeners
self._ws_server.Player.OnPause = self.async_on_speed_event
self._ws_server.Player.OnPlay = self.async_on_speed_event
self._ws_server.Player.OnAVStart = self.async_on_speed_event
self._ws_server.Player.OnAVChange = self.async_on_speed_event
self._ws_server.Player.OnResume = self.async_on_speed_event
self._ws_server.Player.OnSpeedChanged = self.async_on_speed_event
self._ws_server.Player.OnSeek = self.async_on_speed_event
self._ws_server.Player.OnStop = self.async_on_stop
self._ws_server.Application.OnVolumeChanged = \
self.async_on_volume_changed
self._ws_server.System.OnQuit = self.async_on_quit
self._ws_server.System.OnRestart = self.async_on_quit
self._ws_server.System.OnSleep = self.async_on_quit
def on_hass_stop(event):
"""Close websocket connection when hass stops."""
self.hass.async_create_task(self._ws_server.close())
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, on_hass_stop)
else:
self._ws_server = None
# Script creation for the turn on/off config options
if turn_on_action is not None:
turn_on_action = script.Script(
self.hass, turn_on_action,
"{} turn ON script".format(self.name),
self.async_update_ha_state(True))
if turn_off_action is not None:
turn_off_action = script.Script(
self.hass, _check_deprecated_turn_off(hass, turn_off_action),
"{} turn OFF script".format(self.name))
self._turn_on_action = turn_on_action
self._turn_off_action = turn_off_action
self._enable_websocket = websocket
self._players = list()
self._properties = {}
self._item = {}
self._app_properties = {}
@callback
def async_on_speed_event(self, sender, data):
"""Handle player changes between playing and paused."""
self._properties['speed'] = data['player']['speed']
if not hasattr(data['item'], 'id'):
# If no item id is given, perform a full update
force_refresh = True
else:
# If a new item is playing, force a complete refresh
force_refresh = data['item']['id'] != self._item.get('id')
self.async_schedule_update_ha_state(force_refresh)
@callback
def async_on_stop(self, sender, data):
"""Handle the stop of the player playback."""
# Prevent stop notifications which are sent after quit notification
if self._players is None:
return
self._players = []
self._properties = {}
self._item = {}
self._media_position_updated_at = None
self._media_position = None
self.async_schedule_update_ha_state()
@callback
def async_on_volume_changed(self, sender, data):
"""Handle the volume changes."""
self._app_properties['volume'] = data['volume']
self._app_properties['muted'] = data['muted']
self.async_schedule_update_ha_state()
@callback
def async_on_quit(self, sender, data):
"""Reset the player state on quit action."""
self._players = None
self._properties = {}
self._item = {}
self._app_properties = {}
self.hass.async_create_task(self._ws_server.close())
async def _get_players(self):
"""Return the active player objects or None."""
import jsonrpc_base
try:
return await self.server.Player.GetActivePlayers()
except jsonrpc_base.jsonrpc.TransportError:
if self._players is not None:
_LOGGER.info("Unable to fetch kodi data")
_LOGGER.debug("Unable to fetch kodi data", exc_info=True)
return None
@property
def unique_id(self):
"""Return the unique id of the device."""
return self._unique_id
@property
def state(self):
"""Return the state of the device."""
if self._players is None:
return STATE_OFF
if not self._players:
return STATE_IDLE
if self._properties['speed'] == 0:
return STATE_PAUSED
return STATE_PLAYING
async def async_ws_connect(self):
"""Connect to Kodi via websocket protocol."""
import jsonrpc_base
try:
ws_loop_future = await self._ws_server.ws_connect()
except jsonrpc_base.jsonrpc.TransportError:
_LOGGER.info("Unable to connect to Kodi via websocket")
_LOGGER.debug(
"Unable to connect to Kodi via websocket", exc_info=True)
return
async def ws_loop_wrapper():
"""Catch exceptions from the websocket loop task."""
try:
await ws_loop_future
except jsonrpc_base.TransportError:
# Kodi abruptly ends ws connection when exiting. We will try
# to reconnect on the next poll.
pass
# Update HA state after Kodi disconnects
self.async_schedule_update_ha_state()
# Create a task instead of adding a tracking job, since this task will
# run until the websocket connection is closed.
self.hass.loop.create_task(ws_loop_wrapper())
async def async_update(self):
"""Retrieve latest state."""
self._players = await self._get_players()
if self._players is None:
self._properties = {}
self._item = {}
self._app_properties = {}
return
if self._enable_websocket and not self._ws_server.connected:
self.hass.async_create_task(self.async_ws_connect())
self._app_properties = \
await self.server.Application.GetProperties(
['volume', 'muted']
)
if self._players:
player_id = self._players[0]['playerid']
assert isinstance(player_id, int)
self._properties = await self.server.Player.GetProperties(
player_id,
['time', 'totaltime', 'speed', 'live']
)
position = self._properties['time']
if self._media_position != position:
self._media_position_updated_at = dt_util.utcnow()
self._media_position = position
self._item = (await self.server.Player.GetItem(
player_id,
['title', 'file', 'uniqueid', 'thumbnail', 'artist',
'albumartist', 'showtitle', 'album', 'season', 'episode']
))['item']
else:
self._properties = {}
self._item = {}
self._app_properties = {}
self._media_position = None
self._media_position_updated_at = None
@property
def server(self):
"""Active server for json-rpc requests."""
if self._enable_websocket and self._ws_server.connected:
return self._ws_server
return self._http_server
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def should_poll(self):
"""Return True if entity has to be polled for state."""
return not (self._enable_websocket and self._ws_server.connected)
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
if 'volume' in self._app_properties:
return self._app_properties['volume'] / 100.0
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._app_properties.get('muted')
@property
def media_content_id(self):
"""Content ID of current playing media."""
return self._item.get('uniqueid', None)
@property
def media_content_type(self):
"""Content type of current playing media.
If the media type cannot be detected, the player type is used.
"""
if MEDIA_TYPES.get(self._item.get('type')) is None and self._players:
return MEDIA_TYPES.get(self._players[0]['type'])
return MEDIA_TYPES.get(self._item.get('type'))
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
if self._properties.get('live'):
return None
total_time = self._properties.get('totaltime')
if total_time is None:
return None
return (
total_time['hours'] * 3600 +
total_time['minutes'] * 60 +
total_time['seconds'])
@property
def media_position(self):
"""Position of current playing media in seconds."""
time = self._properties.get('time')
if time is None:
return None
return (
time['hours'] * 3600 +
time['minutes'] * 60 +
time['seconds'])
@property
def media_position_updated_at(self):
"""Last valid time of media position."""
return self._media_position_updated_at
@property
def media_image_url(self):
"""Image url of current playing media."""
thumbnail = self._item.get('thumbnail')
if thumbnail is None:
return None
url_components = urllib.parse.urlparse(thumbnail)
if url_components.scheme == 'image':
return '{}/{}'.format(
self._image_url,
urllib.parse.quote_plus(thumbnail))
@property
def media_title(self):
"""Title of current playing media."""
# find a string we can use as a title
item = self._item
return item.get('title') or item.get('label') or item.get('file')
@property
def media_series_title(self):
"""Title of series of current playing media, TV show only."""
return self._item.get('showtitle')
@property
def media_season(self):
"""Season of current playing media, TV show only."""
return self._item.get('season')
@property
def media_episode(self):
"""Episode of current playing media, TV show only."""
return self._item.get('episode')
@property
def media_album_name(self):
"""Album name of current playing media, music track only."""
return self._item.get('album')
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
artists = self._item.get('artist', [])
if artists:
return artists[0]
return None
@property
def media_album_artist(self):
"""Album artist of current playing media, music track only."""
artists = self._item.get('albumartist', [])
if artists:
return artists[0]
return None
@property
def supported_features(self):
"""Flag media player features that are supported."""
supported_features = SUPPORT_KODI
if self._turn_on_action is not None:
supported_features |= SUPPORT_TURN_ON
if self._turn_off_action is not None:
supported_features |= SUPPORT_TURN_OFF
return supported_features
@cmd
async def async_turn_on(self):
"""Execute turn_on_action to turn on media player."""
if self._turn_on_action is not None:
await self._turn_on_action.async_run(
variables={"entity_id": self.entity_id})
else:
_LOGGER.warning("turn_on requested but turn_on_action is none")
@cmd
async def async_turn_off(self):
"""Execute turn_off_action to turn off media player."""
if self._turn_off_action is not None:
await self._turn_off_action.async_run(
variables={"entity_id": self.entity_id})
else:
_LOGGER.warning("turn_off requested but turn_off_action is none")
@cmd
async def async_volume_up(self):
"""Volume up the media player."""
assert (
await self.server.Input.ExecuteAction('volumeup')) == 'OK'
@cmd
async def async_volume_down(self):
"""Volume down the media player."""
assert (
await self.server.Input.ExecuteAction('volumedown')) == 'OK'
@cmd
def async_set_volume_level(self, volume):
"""Set volume level, range 0..1.
This method must be run in the event loop and returns a coroutine.
"""
return self.server.Application.SetVolume(int(volume * 100))
@cmd
def async_mute_volume(self, mute):
"""Mute (true) or unmute (false) media player.
This method must be run in the event loop and returns a coroutine.
"""
return self.server.Application.SetMute(mute)
async def async_set_play_state(self, state):
"""Handle play/pause/toggle."""
players = await self._get_players()
if players is not None and players:
await self.server.Player.PlayPause(
players[0]['playerid'], state)
@cmd
def async_media_play_pause(self):
"""Pause media on media player.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_set_play_state('toggle')
@cmd
def async_media_play(self):
"""Play media.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_set_play_state(True)
@cmd
def async_media_pause(self):
"""Pause the media player.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_set_play_state(False)
@cmd
async def async_media_stop(self):
"""Stop the media player."""
players = await self._get_players()
if players:
await self.server.Player.Stop(players[0]['playerid'])
async def _goto(self, direction):
"""Handle for previous/next track."""
players = await self._get_players()
if players:
if direction == 'previous':
# First seek to position 0. Kodi goes to the beginning of the
# current track if the current track is not at the beginning.
await self.server.Player.Seek(players[0]['playerid'], 0)
await self.server.Player.GoTo(
players[0]['playerid'], direction)
@cmd
def async_media_next_track(self):
"""Send next track command.
This method must be run in the event loop and returns a coroutine.
"""
return self._goto('next')
@cmd
def async_media_previous_track(self):
"""Send next track command.
This method must be run in the event loop and returns a coroutine.
"""
return self._goto('previous')
@cmd
async def async_media_seek(self, position):
"""Send seek command."""
players = await self._get_players()
time = {}
time['milliseconds'] = int((position % 1) * 1000)
position = int(position)
time['seconds'] = int(position % 60)
position /= 60
time['minutes'] = int(position % 60)
position /= 60
time['hours'] = int(position)
if players:
await self.server.Player.Seek(players[0]['playerid'], time)
@cmd
def async_play_media(self, media_type, media_id, **kwargs):
"""Send the play_media command to the media player.
This method must be run in the event loop and returns a coroutine.
"""
if media_type == "CHANNEL":
return self.server.Player.Open(
{"item": {"channelid": int(media_id)}})
if media_type == "PLAYLIST":
return self.server.Player.Open(
{"item": {"playlistid": int(media_id)}})
return self.server.Player.Open(
{"item": {"file": str(media_id)}})
async def async_set_shuffle(self, shuffle):
"""Set shuffle mode, for the first player."""
if not self._players:
raise RuntimeError("Error: No active player.")
await self.server.Player.SetShuffle(
{"playerid": self._players[0]['playerid'], "shuffle": shuffle})
async def async_call_method(self, method, **kwargs):
"""Run Kodi JSONRPC API method with params."""
import jsonrpc_base
_LOGGER.debug("Run API method %s, kwargs=%s", method, kwargs)
result_ok = False
try:
result = await getattr(self.server, method)(**kwargs)
result_ok = True
except jsonrpc_base.jsonrpc.ProtocolError as exc:
result = exc.args[2]['error']
_LOGGER.error("Run API method %s.%s(%s) error: %s",
self.entity_id, method, kwargs, result)
except jsonrpc_base.jsonrpc.TransportError:
result = None
_LOGGER.warning("TransportError trying to run API method "
"%s.%s(%s)", self.entity_id, method, kwargs)
if isinstance(result, dict):
event_data = {'entity_id': self.entity_id,
'result': result,
'result_ok': result_ok,
'input': {'method': method, 'params': kwargs}}
_LOGGER.debug("EVENT kodi_call_method_result: %s", event_data)
self.hass.bus.async_fire(EVENT_KODI_CALL_METHOD_RESULT,
event_data=event_data)
return result
async def async_add_media_to_playlist(
self, media_type, media_id=None, media_name='ALL', artist_name=''):
"""Add a media to default playlist (i.e. playlistid=0).
First the media type must be selected, then
the media can be specified in terms of id or
name and optionally artist name.
All the albums of an artist can be added with
media_name="ALL"
"""
import jsonrpc_base
params = {"playlistid": 0}
if media_type == "SONG":
if media_id is None:
media_id = await self.async_find_song(
media_name, artist_name)
if media_id:
params["item"] = {"songid": int(media_id)}
elif media_type == "ALBUM":
if media_id is None:
if media_name == "ALL":
await self.async_add_all_albums(artist_name)
return
media_id = await self.async_find_album(
media_name, artist_name)
if media_id:
params["item"] = {"albumid": int(media_id)}
else:
raise RuntimeError("Unrecognized media type.")
if media_id is not None:
try:
await self.server.Playlist.Add(params)
except jsonrpc_base.jsonrpc.ProtocolError as exc:
result = exc.args[2]['error']
_LOGGER.error("Run API method %s.Playlist.Add(%s) error: %s",
self.entity_id, media_type, result)
except jsonrpc_base.jsonrpc.TransportError:
_LOGGER.warning("TransportError trying to add playlist to %s",
self.entity_id)
else:
_LOGGER.warning("No media detected for Playlist.Add")
async def async_add_all_albums(self, artist_name):
"""Add all albums of an artist to default playlist (i.e. playlistid=0).
The artist is specified in terms of name.
"""
artist_id = await self.async_find_artist(artist_name)
albums = await self.async_get_albums(artist_id)
for alb in albums['albums']:
await self.server.Playlist.Add(
{"playlistid": 0, "item": {"albumid": int(alb['albumid'])}})
async def async_clear_playlist(self):
"""Clear default playlist (i.e. playlistid=0)."""
return self.server.Playlist.Clear({"playlistid": 0})
async def async_get_artists(self):
"""Get artists list."""
return await self.server.AudioLibrary.GetArtists()
async def async_get_albums(self, artist_id=None):
"""Get albums list."""
if artist_id is None:
return await self.server.AudioLibrary.GetAlbums()
return (await self.server.AudioLibrary.GetAlbums(
{"filter": {"artistid": int(artist_id)}}))
async def async_find_artist(self, artist_name):
"""Find artist by name."""
artists = await self.async_get_artists()
try:
out = self._find(
artist_name, [a['artist'] for a in artists['artists']])
return artists['artists'][out[0][0]]['artistid']
except KeyError:
_LOGGER.warning("No artists were found: %s", artist_name)
return None
async def async_get_songs(self, artist_id=None):
"""Get songs list."""
if artist_id is None:
return await self.server.AudioLibrary.GetSongs()
return (await self.server.AudioLibrary.GetSongs(
{"filter": {"artistid": int(artist_id)}}))
async def async_find_song(self, song_name, artist_name=''):
"""Find song by name and optionally artist name."""
artist_id = None
if artist_name != '':
artist_id = await self.async_find_artist(artist_name)
songs = await self.async_get_songs(artist_id)
if songs['limits']['total'] == 0:
return None
out = self._find(song_name, [a['label'] for a in songs['songs']])
return songs['songs'][out[0][0]]['songid']
async def async_find_album(self, album_name, artist_name=''):
"""Find album by name and optionally artist name."""
artist_id = None
if artist_name != '':
artist_id = await self.async_find_artist(artist_name)
albums = await self.async_get_albums(artist_id)
try:
out = self._find(
album_name, [a['label'] for a in albums['albums']])
return albums['albums'][out[0][0]]['albumid']
except KeyError:
_LOGGER.warning("No albums were found with artist: %s, album: %s",
artist_name, album_name)
return None
@staticmethod
def _find(key_word, words):
key_word = key_word.split(' ')
patt = [re.compile(
'(^| )' + k + '( |$)', re.IGNORECASE) for k in key_word]
out = [[i, 0] for i in range(len(words))]
for i in range(len(words)):
mtc = [p.search(words[i]) for p in patt]
rate = [m is not None for m in mtc].count(True)
out[i][1] = rate
return sorted(out, key=lambda out: out[1], reverse=True)
|
|
#!/usr/bin/env python
# -*- test-case-name: calendarserver.tools.test.test_export -*-
##
# Copyright (c) 2006-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
This tool reads calendar data from a series of inputs and generates a single
iCalendar file which can be opened in many calendar applications.
This can be used to quickly create an iCalendar file from a user's calendars.
This tool requires access to the calendar server's configuration and data
storage; it does not operate by talking to the server via the network. It
therefore does not apply any of the access restrictions that the server would.
As such, one should be mindful that data exported via this tool may be sensitive.
Please also note that this is not an appropriate tool for backups, as there is
data associated with users and calendars beyond the iCalendar as visible to the
owner of that calendar, including DAV properties, information about sharing, and
per-user data such as alarms.
"""
from __future__ import print_function
import itertools
import os
import shutil
import sys
from calendarserver.tools.cmdline import utilityMain, WorkerService
from twext.enterprise.dal.syntax import Select
from twext.python.log import Logger
from twisted.internet.defer import inlineCallbacks, returnValue, succeed
from twisted.python.text import wordWrap
from twisted.python.usage import Options, UsageError
from twistedcaldav import customxml
from twistedcaldav.ical import Component, Property
from twistedcaldav.stdconfig import DEFAULT_CONFIG_FILE
from txdav.base.propertystore.base import PropertyName
from txdav.caldav.datastore.scheduling.utils import normalizeCUAddr
from txdav.caldav.datastore.sql import Calendar
from txdav.common.datastore.sql_tables import schema
from txdav.xml import element as davxml
log = Logger()
def usage(e=None):
if e:
print(e)
print("")
try:
ExportOptions().opt_help()
except SystemExit:
pass
if e:
sys.exit(64)
else:
sys.exit(0)
description = '\n'.join(
wordWrap(
"""
Usage: calendarserver_export [options] [input specifiers]\n
""" + __doc__,
int(os.environ.get('COLUMNS', '80'))
)
)
class ExportOptions(Options):
"""
Command-line options for 'calendarserver_export'
@ivar exporters: a list of L{DirectoryExporter} objects which can identify the
calendars to export, given a directory service. This list is built by
parsing --record and --collection options.
"""
synopsis = description
optFlags = [
['debug', 'D', "Debug logging."],
]
optParameters = [
['config', 'f', DEFAULT_CONFIG_FILE, "Specify caldavd.plist configuration path."],
]
def __init__(self):
super(ExportOptions, self).__init__()
self.exporters = []
self.outputName = '-'
self.outputDirectoryName = None
self.exportAll = False
self.exportAllType = "VEVENT"
self.convertToMailto = False
def opt_uid(self, uid):
"""
Add a calendar home directly by its UID (which is usually a directory
service's GUID).
"""
self.exporters.append(UIDExporter(uid))
def opt_record(self, recordName):
"""
Add a directory record's calendar home (format: 'recordType:shortName').
"""
recordType, shortName = recordName.split(":", 1)
self.exporters.append(DirectoryExporter(recordType, shortName))
opt_r = opt_record
def opt_collection(self, collectionName):
"""
Add a calendar collection. This option must be passed after --record
(or a synonym, like --user). for example, to export user1's calendars
called 'meetings' and 'team', invoke 'calendarserver_export --user=user1
--collection=meetings --collection=team'.
"""
self.exporters[-1].collections.append(collectionName)
opt_c = opt_collection
def opt_directory(self, dirname):
"""
Specify output directory path (works with calendars and contacts).
"""
self.outputDirectoryName = dirname
opt_d = opt_directory
def opt_output(self, filename):
"""
Specify output file path (default: '-', meaning stdout; only works with calendars).
"""
self.outputName = filename
opt_o = opt_output
def opt_user(self, user):
"""
Add a user's calendar home (shorthand for '-r users:shortName').
"""
self.opt_record("users:" + user)
opt_u = opt_user
def opt_all(self):
"""
Export calendars from every calendar home in the database.
"""
self.exportAll = True
def opt_mailto(self):
"""
When exporting, convert calendar user addresses to mailto: form where possible.
"""
self.convertToMailto = True
def opt_calendars(self):
"""
Export calendars (the default)
"""
if len(self.exporters) > 0:
self.exporters[-1].exportType = "VEVENT"
else:
self.exportAllType = "VEVENT"
def opt_contacts(self):
"""
Export VCARDs
"""
if len(self.exporters) > 0:
self.exporters[-1].exportType = "VCARD"
else:
self.exportAllType = "VCARD"
def openOutput(self):
"""
Open the appropriate output file based on the '--output' option.
"""
if self.outputName == '-':
return sys.stdout
else:
return open(self.outputName, 'wb')
class _ExporterBase(object):
"""
Base exporter implementation that works from a home UID.
@ivar collections: A list of the names of collections that this exporter
should enumerate.
@type collections: C{list} of C{str}
"""
def __init__(self, exportType="VEVENT"):
self.collections = []
self.exportType = exportType
def getHomeUID(self, exportService):
"""
Subclasses must implement this.
"""
raise NotImplementedError()
@inlineCallbacks
def listCollections(self, txn, exportService):
"""
Enumerate all calendars based on the directory record and/or calendars
for this calendar home.
"""
uid = yield self.getHomeUID(exportService)
if self.exportType == "VEVENT":
home = yield txn.calendarHomeWithUID(uid, create=True)
else:
home = yield txn.addressbookHomeWithUID(uid, create=True)
result = []
if self.collections:
for collection in self.collections:
if self.exportType == "VEVENT":
collection = yield home.calendarWithName(collection)
else:
collection = yield home.addressbookWithName(collection)
result.append(collection)
else:
if self.exportType == "VEVENT":
for collection in (yield home.calendars()):
if collection.name() != 'inbox':
result.append(collection)
else:
for collection in (yield home.addressbooks()):
result.append(collection)
returnValue(result)
class UIDExporter(_ExporterBase):
"""
An exporter that constructs a list of calendars based on the UID of the
home, and an optional list of calendar names.
@ivar uid: The UID of a calendar home to export.
@type uid: C{str}
"""
def __init__(self, uid, exportType="VEVENT"):
super(UIDExporter, self).__init__(exportType=exportType)
self.uid = uid
def getHomeUID(self, exportService):
return succeed(self.uid)
class DirectoryExporter(_ExporterBase):
"""
An exporter that constructs a list of calendars based on the directory
services record ID of the home, and an optional list of calendar names.
@ivar recordType: The directory record type to export. For example:
'users'.
@type recordType: C{str}
@ivar shortName: The shortName of the directory record to export, according
to C{recordType}.
@type shortName: C{str}
"""
def __init__(self, recordType, shortName, exportType="VEVENT"):
super(DirectoryExporter, self).__init__(exportType=exportType)
self.recordType = recordType
self.shortName = shortName
@inlineCallbacks
def getHomeUID(self, exportService):
"""
Retrieve the home UID.
"""
directory = exportService.directoryService()
record = yield directory.recordWithShortName(
directory.oldNameToRecordType(self.recordType),
self.shortName
)
returnValue(record.uid)
@inlineCallbacks
def exportToFile(calendars, fileobj, convertToMailto=False):
"""
Export some calendars to a file as their owner would see them.
@param calendars: an iterable of L{ICalendar} providers (or L{Deferred}s of
same).
@param fileobj: an object with a C{write} method that will accept some
iCalendar data.
@return: a L{Deferred} which fires when the export is complete. (Note that
the file will not be closed.)
@rtype: L{Deferred} that fires with C{None}
"""
comp = Component.newCalendar()
for calendar in calendars:
calendar = yield calendar
for obj in (yield calendar.calendarObjects()):
evt = yield obj.filteredComponent(
calendar.ownerCalendarHome().uid(), True
)
for sub in evt.subcomponents():
if sub.name() != 'VTIMEZONE':
# Omit all VTIMEZONE components here - we will include them later
# when we serialize the whole calendar.
if convertToMailto:
convertCUAsToMailto(sub)
comp.addComponent(sub)
fileobj.write(comp.getTextWithTimezones(True))
@inlineCallbacks
def exportToDirectory(collections, dirname, convertToMailto=False):
"""
Export some calendars to a file as their owner would see them.
@param calendars: an iterable of L{ICalendar} providers (or L{Deferred}s of
same).
@param dirname: the path to a directory to store calendar files in; each
calendar being exported will have its own .ics file
@return: a L{Deferred} which fires when the export is complete. (Note that
the file will not be closed.)
@rtype: L{Deferred} that fires with C{None}
"""
for collection in collections:
if isinstance(collection, Calendar):
homeUID = collection.ownerCalendarHome().uid()
calendarProperties = collection.properties()
comp = Component.newCalendar()
for element, propertyName in (
(davxml.DisplayName, "NAME"),
(customxml.CalendarColor, "COLOR"),
):
value = calendarProperties.get(PropertyName.fromElement(element), None)
if value:
comp.addProperty(Property(propertyName, str(value)))
source = "/calendars/__uids__/{}/{}/".format(homeUID, collection.name())
comp.addProperty(Property("SOURCE", source))
for obj in (yield collection.calendarObjects()):
evt = yield obj.filteredComponent(homeUID, True)
for sub in evt.subcomponents():
if sub.name() != 'VTIMEZONE':
# Omit all VTIMEZONE components here - we will include them later
# when we serialize the whole calendar.
if convertToMailto:
convertCUAsToMailto(sub)
comp.addComponent(sub)
filename = os.path.join(dirname, "{}_{}.ics".format(homeUID, collection.name()))
with open(filename, 'wb') as fileobj:
fileobj.write(comp.getTextWithTimezones(True))
else: # addressbook
homeUID = collection.ownerAddressBookHome().uid()
filename = os.path.join(dirname, "{}_{}.vcf".format(homeUID, collection.name()))
with open(filename, 'wb') as fileobj:
for obj in (yield collection.addressbookObjects()):
vcard = yield obj.component()
fileobj.write(vcard.getText())
def convertCUAsToMailto(comp):
"""
Replace non-mailto: CUAs with mailto: CUAs where possible (i.e. there is an
EMAIL parameter value attached)
"""
for attendeeProp in itertools.chain(comp.getAllAttendeeProperties(), [comp.getOrganizerProperty()]):
cuaddr = normalizeCUAddr(attendeeProp.value())
if not cuaddr.startswith("mailto:"):
emailAddress = attendeeProp.parameterValue("EMAIL", None)
if emailAddress:
attendeeProp.setValue("mailto:%s" % (emailAddress,))
attendeeProp.removeParameter("EMAIL")
class ExporterService(WorkerService, object):
"""
Service which runs, exports the appropriate records, then stops the reactor.
"""
def __init__(self, store, options, output, reactor, config):
super(ExporterService, self).__init__(store)
self.options = options
self.output = output
self.reactor = reactor
self.config = config
self._directory = self.store.directoryService()
@inlineCallbacks
def doWork(self):
"""
Do the export, stopping the reactor when done.
"""
txn = self.store.newTransaction()
if self.options.exportAll:
if self.options.exportAllType == "VEVENT":
homeTable = schema.CALENDAR_HOME
else:
homeTable = schema.ADDRESSBOOK_HOME
rows = (yield Select(
[homeTable.OWNER_UID, ],
From=homeTable,
).on(txn))
for uid in [row[0] for row in rows]:
self.options.exporters.append(UIDExporter(uid, exportType=self.options.exportAllType))
try:
allCollections = itertools.chain(
*[(yield exporter.listCollections(txn, self)) for exporter in
self.options.exporters]
)
if self.options.outputDirectoryName:
dirname = self.options.outputDirectoryName
if os.path.exists(dirname):
shutil.rmtree(dirname)
os.mkdir(dirname)
yield exportToDirectory(allCollections, dirname, self.options.convertToMailto)
else:
yield exportToFile(allCollections, self.output, self.options.convertToMailto)
self.output.close()
yield txn.commit()
# TODO: should be read-only, so commit/abort shouldn't make a
# difference. commit() for now, in case any transparent cache /
# update stuff needed to happen, don't want to undo it.
except:
log.failure("doWork()")
def directoryService(self):
"""
Get an appropriate directory service.
"""
return self._directory
def stopService(self):
"""
Stop the service. Nothing to do; everything should be finished by this
time.
"""
# TODO: stopping this service mid-export should really stop the export
# loop, but this is not implemented because nothing will actually do it
# except hitting ^C (which also calls reactor.stop(), so that will exit
# anyway).
def main(argv=sys.argv, stderr=sys.stderr, reactor=None):
"""
Do the export.
"""
if reactor is None:
from twisted.internet import reactor
options = ExportOptions()
try:
options.parseOptions(argv[1:])
except UsageError, e:
usage(e)
if options.outputDirectoryName:
output = None
else:
try:
output = options.openOutput()
except IOError, e:
stderr.write(
"Unable to open output file for writing: %s\n" % (e)
)
sys.exit(1)
def makeService(store):
from twistedcaldav.config import config
config.TransactionTimeoutSeconds = 0
return ExporterService(store, options, output, reactor, config)
utilityMain(options["config"], makeService, reactor, verbose=options["debug"])
|
|
import warnings
import numpy as np
__all__ = ["timtraceline", "timtracelines"]
_future_warning_metadata = (
"In a future version traces will be returned as a dictionary containing "
"metadata together with the trace. To already get the new behavior use "
"metadata=True."
)
def timtraceline(ml, xstart, ystart, zstart, hstepmax, vstepfrac=0.2, tmax=1e12,
nstepmax=100, win=[-1e30, 1e30, -1e30, 1e30], silent=False,
returnlayers=False, *, metadata=False):
verbose = False # used for debugging
if not metadata:
warnings.warn(_future_warning_metadata, FutureWarning, stacklevel=2)
# treating aquifer layers and leaky layers the same way
xw1, xw2, yw1, yw2 = win
terminate = False
message = "no message"
eps = 1e-10 # used to place point just above or below aquifer top or bottom
direction = np.sign(hstepmax) # negative means backwards
hstepmax = np.abs(hstepmax)
aq = ml.aq.find_aquifer_data(xstart, ystart)
if zstart > aq.z[0] or zstart < aq.z[-1]:
terminate = True
message = "starting z value not inside aquifer"
layer, ltype, modellayer = aq.findlayer(zstart)
# slightly alter starting location not to get stuck in surpring points
# starting at time 0
xyzt = [np.array([xstart * (1 + eps), ystart * (1 + eps), zstart, 0])]
layerlist = [] # to keep track of layers for plotting with colors
for _ in range(nstepmax):
if terminate:
break
x0, y0, z0, t0 = xyzt[-1]
aq = ml.aq.find_aquifer_data(x0, y0) # find new aquifer
layer, ltype, modellayer = aq.findlayer(z0)
layerlist.append(modellayer)
v0 = ml.velocomp(x0, y0, z0, aq, [layer, ltype]) * direction
if verbose:
print('xyz, layer', x0, y0, z0, layer)
print('v0, layer, ltype', v0, layer, ltype)
vx, vy, vz = v0
if ltype == "l": # in leaky layer
if vz > 0: # upward through leaky layer
if modellayer == 0: # steps out of the top
z1 = aq.z[modellayer]
terminate = True
else:
modellayer -= 1
# just above new bottom
z1 = aq.z[modellayer + 1] + eps * aq.Hlayer[modellayer]
elif vz < 0:
if modellayer == aq.nlayers - 1: # steps out of bottom
z1 = aq.z[modellayer + 1]
terminate = True
else:
modellayer += 1
# just below new top
z1 = aq.z[modellayer] - eps * aq.Hlayer[modellayer]
else:
message = "at point of zero leakage in leaky layer"
terminate = True
break
t1 = t0 + abs((z1 - z0) / vz)
xyztnew = [np.array([x0, y0, z1, t1])]
else: # in aquifer layer
vh = np.sqrt(vx ** 2 + vy ** 2)
if vz > 0: # flows upward
if aq.z[modellayer] - z0 < vstepfrac * aq.Haq[layer]:
# just below top
z1 = aq.z[modellayer] - eps * aq.Hlayer[modellayer]
else:
z1 = z0 + vstepfrac * aq.Haq[layer]
tvstep = (z1 - z0) / vz
elif vz < 0:
if z0 - aq.z[modellayer + 1] < vstepfrac * aq.Haq[layer]:
# just above bot
z1 = aq.z[modellayer + 1] + eps * aq.Hlayer[modellayer]
else:
z1 = z0 - vstepfrac * aq.Haq[layer]
tvstep = (z0 - z1) / abs(vz)
else: # vz=0
tvstep = np.inf
z1 = z0
if tvstep == np.inf and vh == 0: # this should never happen anymore
message = "at point of zero velocity"
terminate = True
break
if vh * tvstep > hstepmax:
# max horizonal step smaller than max vertical step
thstep = hstepmax / vh
z1 = z0 + thstep * vz
else:
thstep = tvstep
# z1 is already computed
x1 = x0 + thstep * vx
y1 = y0 + thstep * vy
t1 = t0 + thstep
xyzt1 = np.array([x1, y1, z1, t1])
# check if point needs to be changed
correction = True
for e in aq.elementlist:
changed, terminate, xyztnew, changemessage = e.changetrace(
xyzt[-1], xyzt1, aq, layer, ltype, modellayer,
direction, hstepmax)
if changed or terminate:
correction = False
if changemessage:
message = changemessage
break
if correction: # correction step
vx, vy, vz = 0.5 * (v0 + direction * \
ml.velocomp(x1, y1, z1, aq, [layer, ltype]))
if verbose:
print('xyz1, layer', x1, y1, z1, layer)
print('correction vx, vy, vz', vx, vy, vz)
vh = np.sqrt(vx ** 2 + vy ** 2)
if vz > 0: # flows upward
tvstep = min(aq.z[modellayer] - z0,
vstepfrac * aq.Haq[layer]) / vz
elif vz < 0:
tvstep = min(z0 - aq.z[modellayer + 1],
vstepfrac * aq.Haq[layer]) / abs(vz)
else: # vz=0
tvstep = np.inf
if vh * tvstep > hstepmax:
# max horizonal step smaller than vertical step
thstep = hstepmax / vh
x1 = x0 + thstep * vx
y1 = y0 + thstep * vy
z1 = z0 + thstep * vz
else:
thstep = tvstep
x1 = x0 + thstep * vx
y1 = y0 + thstep * vy
if vz > 0: # flows upward
if aq.z[modellayer] - z0 < vstepfrac * aq.Haq[layer]:
if modellayer == 0: # steps out of the top
z1 = aq.z[modellayer]
terminate = True
message = "flowed out of top"
else:
modellayer -= 1
# just above new bottom
z1 = aq.z[modellayer + 1] + \
eps * aq.Hlayer[modellayer]
else:
z1 = z0 + thstep * vz
else:
if z0 - aq.z[modellayer + 1] < vstepfrac * aq.Haq[layer]:
if modellayer == aq.nlayers - 1: # steps out of bottom
z1 = aq.z[modellayer + 1]
terminate = True
message = "flowed out of bottom"
else:
modellayer += 1
# just below new top
z1 = aq.z[modellayer] - \
eps * aq.Hlayer[modellayer]
else:
z1 = z0 + thstep * vz
if not terminate:
layer = aq.layernumber[modellayer]
ltype = aq.ltype[modellayer]
t1 = t0 + thstep
xyztnew = [np.array([x1, y1, z1, t1])]
# check again if point needs to be changed
for e in aq.elementlist:
changed, terminate, xyztchanged, changemessage = \
e.changetrace(xyzt[-1], xyztnew[0], aq, layer, ltype,
modellayer, direction, hstepmax)
if changed or terminate:
xyztnew = xyztchanged
if changemessage:
message = changemessage
break
# check if outside window
x1, y1, z1, t1 = xyztnew[0]
frac = -1 # used to check later whether something changed
if x1 < xw1:
frac = abs((x0 - xw1) / (x1 - x0))
x1, y1, z1, t1 = xyzt[-1] + frac * (xyztnew[0] - xyzt[-1])
message = "reached window boundary"
if x1 > xw2:
frac = abs((x0 - xw2) / (x1 - x0))
x1, y1, z1, t1 = xyzt[-1] + frac * (xyztnew[0] - xyzt[-1])
message = "reached window boundary"
if y1 < yw1:
frac = abs((y0 - yw1) / (y1 - y0))
x1, y1, z1, t1 = xyzt[-1] + frac * (xyztnew[0] - xyzt[-1])
message = "reached window boundary"
if y1 > yw2:
frac = abs((y0 - yw2) / (y1 - y0))
x1, y1, z1, t1 = xyzt[-1] + frac * (xyztnew[0] - xyzt[-1])
message = "reached window boundary"
if t1 > tmax:
frac = abs((tmax - t0) / (t1 - t0))
x1, y1, z1, t1 = xyzt[-1] + frac * (xyztnew[0] - xyzt[-1])
message = "reached tmax"
if frac > 0: # at least one of the above 5 ifs was true
terminate = True
xyztnew = [np.array([x1, y1, z1, t1])]
xyzt.extend(xyztnew)
if len(xyztnew) == 2:
layerlist.append(modellayer)
elif len(xyztnew) > 3:
print("len(xyztnew > 3 !")
print(xyztnew)
else:
message = "reached nstepmax iterations"
if not silent:
print(message)
if metadata:
result = {"trace": np.array(xyzt), "message": message,
"complete": terminate}
if returnlayers:
result["layers"] = layerlist
else:
if returnlayers:
result = np.array(xyzt), layerlist
else:
result = np.array(xyzt)
return result
def timtracelines(ml, xstart, ystart, zstart, hstepmax, vstepfrac=0.2,
tmax=1e12, nstepmax=100, silent=".",
win=[-1e30, 1e30, -1e30, 1e30], *, metadata=False):
xyztlist = []
for x, y, z in zip(xstart, ystart, zstart):
xyztlist.append(
timtraceline(ml, x, y, z, hstepmax=hstepmax, vstepfrac=vstepfrac,
tmax=tmax, nstepmax=nstepmax, silent=silent, win=win,
metadata=metadata)
)
if silent == ".":
print(".", end="", flush=True)
if silent == ".":
print("")
return xyztlist
def crossline(xa, ya, xb, yb, z1, z2):
eps = 1e-8
za = xa + ya * 1j
zb = xb + yb * 1j
Za = (2 * za - (z1 + z2)) / (z2 - z1)
Zb = (2 * zb - (z1 + z2)) / (z2 - z1)
if Za.imag * Zb.imag < 0:
Xa, Ya = Za.real, Za.imag
Xb, Yb = Zb.real, Zb.imag
X = Xa - Ya * (Xb - Xa) / (Yb - Ya)
if abs(X) <= 1:
Z = X + eps * np.sign(Yb) * 1j
z = 0.5 * ((z2 - z1) * Z + z1 + z2)
return True, z.real, z.imag
return False
|
|
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os
import platform
import sys
import unittest
import StringIO
from mock.mock import patch, MagicMock
from only_for_platform import os_distro_value
from ambari_commons import os_utils
from urllib2 import HTTPError
import shutil
# Mock classes for reading from a file
class MagicFile(object):
def __init__(self, data):
self.data = data
def read(self):
return self.data
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def __enter__(self):
return self
pass
project_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)),os.path.normpath("../../../../"))
shutil.copyfile(project_dir+"/ambari-server/conf/unix/ambari.properties", "/tmp/ambari.properties")
# We have to use this import HACK because the filename contains a dash
_search_file = os_utils.search_file
def search_file_proxy(filename, searchpatch, pathsep=os.pathsep):
global _search_file
if "ambari.properties" in filename:
return "/tmp/ambari.properties"
return _search_file(filename, searchpatch, pathsep)
os_utils.search_file = search_file_proxy
with patch.object(platform, "linux_distribution", return_value = MagicMock(return_value=('Redhat', '6.4', 'Final'))):
with patch("os.path.isdir", return_value = MagicMock(return_value=True)):
with patch("os.access", return_value = MagicMock(return_value=True)):
with patch.object(os_utils, "parse_log4j_file", return_value={'ambari.log.dir': '/var/log/ambari-server'}):
with patch("platform.linux_distribution", return_value = os_distro_value):
with patch("os.symlink"):
with patch("glob.glob", return_value = ['/etc/init.d/postgresql-9.3']):
_ambari_server_ = __import__('ambari-server')
with patch("__builtin__.open"):
from ambari_commons.exceptions import FatalException, NonFatalException
from ambari_server.properties import Properties
from ambari_server.setupSso import setup_sso, AMBARI_SSO_AUTH_ENABLED, \
SSO_PROVIDER_URL, SSO_CERTIFICATE, JWT_COOKIE_NAME, JWT_AUDIENCES, \
SSO_ENABLED_SERVICES, SSO_MANAGE_SERVICES
class TestSetupSso(unittest.TestCase):
@patch("ambari_server.setupSso.is_server_runing")
def test_sso_setup_should_fail_if_server_is_not_running(self, is_server_runing_mock):
out = StringIO.StringIO()
sys.stdout = out
is_server_runing_mock.return_value = (False, 0)
options = self._create_empty_options_mock()
try:
setup_sso(options)
self.fail("Should fail with non-fatal exception")
except FatalException as e:
self.assertTrue("Ambari Server is not running" in e.reason)
pass
sys.stdout = sys.__stdout__
pass
@patch("ambari_server.setupSso.get_silent")
@patch("ambari_server.setupSso.is_server_runing")
def test_silent_mode_is_not_allowed(self, is_server_runing_mock, get_silent_mock):
out = StringIO.StringIO()
sys.stdout = out
is_server_runing_mock.return_value = (True, 0)
get_silent_mock.return_value = True
options = self._create_empty_options_mock()
try:
setup_sso(options)
self.fail("Should fail with fatal exception")
except NonFatalException as e:
self.assertTrue("setup-sso is not enabled in silent mode." in e.reason)
pass
sys.stdout = sys.__stdout__
pass
@patch("ambari_server.setupSso.get_silent")
@patch("ambari_server.setupSso.is_server_runing")
def test_invalid_sso_enabled_cli_option_should_result_in_error(self, is_server_runing_mock, get_silent_mock):
out = StringIO.StringIO()
sys.stdout = out
is_server_runing_mock.return_value = (True, 0)
get_silent_mock.return_value = False
options = self._create_empty_options_mock()
options.sso_enabled = 'not_true_or_false'
try:
setup_sso(options)
self.fail("Should fail with fatal exception")
except FatalException as e:
self.assertTrue("--sso-enabled should be to either 'true' or 'false'" in e.reason)
pass
sys.stdout = sys.__stdout__
pass
@patch("ambari_server.setupSso.get_silent")
@patch("ambari_server.setupSso.is_server_runing")
def test_missing_sso_provider_url_cli_option_when_enabling_sso_should_result_in_error(self, is_server_runing_mock, get_silent_mock):
out = StringIO.StringIO()
sys.stdout = out
is_server_runing_mock.return_value = (True, 0)
get_silent_mock.return_value = False
options = self._create_empty_options_mock()
options.sso_enabled = 'true'
options.sso_provider_url = ''
try:
setup_sso(options)
self.fail("Should fail with fatal exception")
except FatalException as e:
self.assertTrue("Missing option: --sso-provider-url" in e.reason)
pass
sys.stdout = sys.__stdout__
pass
@patch("ambari_server.setupSso.get_silent")
@patch("ambari_server.setupSso.is_server_runing")
def test_missing_sso_public_cert_file_cli_option_when_enabling_sso_should_result_in_error(self, is_server_runing_mock, get_silent_mock):
out = StringIO.StringIO()
sys.stdout = out
is_server_runing_mock.return_value = (True, 0)
get_silent_mock.return_value = False
options = self._create_empty_options_mock()
options.sso_enabled = 'true'
options.sso_public_cert_file = ''
try:
setup_sso(options)
self.fail("Should fail with fatal exception")
except FatalException as e:
self.assertTrue("Missing option: --sso-public-cert-file" in e.reason)
pass
sys.stdout = sys.__stdout__
pass
@patch("ambari_server.setupSso.get_silent")
@patch("ambari_server.setupSso.is_server_runing")
def test_invalid_sso_provider_url_cli_option_when_enabling_sso_should_result_in_error(self, is_server_runing_mock, get_silent_mock):
out = StringIO.StringIO()
sys.stdout = out
is_server_runing_mock.return_value = (True, 0)
get_silent_mock.return_value = False
options = self._create_empty_options_mock()
options.sso_enabled = 'true'
options.sso_provider_url = '!invalidHost:invalidPort'
try:
setup_sso(options)
self.fail("Should fail with fatal exception")
except FatalException as e:
self.assertTrue("Invalid --sso-provider-url" in e.reason)
pass
options.sso_provider_url = 'The SSO provider URL is https://c7402.ambari.apache.org:8443/gateway/knoxsso/api/v1/websso'
try:
setup_sso(options)
self.fail("Should fail with fatal exception")
except FatalException as e:
self.assertTrue("Invalid --sso-provider-url" in e.reason)
pass
options.sso_provider_url = 'https://c7402.ambari.apache.org:8443/gateway/knoxsso/api/v1/websso is the SSO provider URL'
try:
setup_sso(options)
self.fail("Should fail with fatal exception")
except FatalException as e:
self.assertTrue("Invalid --sso-provider-url" in e.reason)
pass
sys.stdout = sys.__stdout__
pass
@patch("ambari_server.setupSso.perform_changes_via_rest_api")
@patch("ambari_server.setupSso.get_ambari_properties")
@patch("ambari_server.setupSso.get_silent")
@patch("ambari_server.setupSso.is_server_runing")
@patch("ambari_server.setupSso.get_json_via_rest_api")
@patch('__builtin__.open')
def test_all_cli_options_are_collected_when_enabling_sso(self, open_mock,
get_json_via_rest_api_mock,
is_server_runing_mock,
get_silent_mock,
get_ambari_properties_mock,
perform_changes_via_rest_api_mock):
out = StringIO.StringIO()
sys.stdout = out
certificate_data = '-----BEGIN CERTIFICATE-----\n' \
'MIIE3DCCA8SgAwIBAgIJAKfbOMmFyOlNMA0GCSqGSIb3DQEBBQUAMIGkMQswCQYD\n' \
'................................................................\n' \
'dXRpbmcxFzAVBgNVBAMTDmNsb3VkYnJlYWstcmdsMSUwIwYJKoZIhvcNAQkBFhZy\n' \
'-----END CERTIFICATE-----'
mock_file = MagicFile(certificate_data)
open_mock.side_effect = [mock_file]
get_json_via_rest_api_mock.return_value = (200, {})
is_server_runing_mock.return_value = (True, 0)
get_silent_mock.return_value = False
properties = Properties()
get_ambari_properties_mock.return_value = properties
sso_enabled = 'true'
sso_enabled_services = 'Ambari, SERVICE1, SERVICE2'
sso_provider_url = 'https://c7402.ambari.apache.org:8443/gateway/knoxsso/api/v1/websso'
sso_public_cert_file = '/test/file/path'
sso_jwt_cookie_name = 'test_cookie'
sso_jwt_audience_list = 'test, audience, list'
options = self._create_empty_options_mock()
options.sso_enabled = sso_enabled
options.sso_enabled_ambari = 'true'
options.sso_manage_services = 'true'
options.sso_provider_url = sso_provider_url
options.sso_public_cert_file = sso_public_cert_file
options.sso_jwt_cookie_name = sso_jwt_cookie_name
options.sso_jwt_audience_list = sso_jwt_audience_list
options.sso_enabled_services = sso_enabled_services
setup_sso(options)
self.assertTrue(perform_changes_via_rest_api_mock.called)
requestCall = perform_changes_via_rest_api_mock.call_args_list[0]
args, kwargs = requestCall
requestData = args[5]
self.assertTrue(isinstance(requestData, dict))
ssoProperties = requestData['Configuration']['properties']
self.assertEqual(ssoProperties[AMBARI_SSO_AUTH_ENABLED], sso_enabled)
self.assertEqual(ssoProperties[SSO_PROVIDER_URL], sso_provider_url)
self.assertEqual(ssoProperties[SSO_CERTIFICATE], certificate_data)
self.assertEqual(ssoProperties[JWT_COOKIE_NAME], sso_jwt_cookie_name)
self.assertEqual(ssoProperties[JWT_AUDIENCES], sso_jwt_audience_list)
sys.stdout = sys.__stdout__
pass
@patch("ambari_server.setupSso.perform_changes_via_rest_api")
@patch("ambari_server.setupSso.get_ambari_properties")
@patch("ambari_server.setupSso.get_silent")
@patch("ambari_server.setupSso.is_server_runing")
@patch("ambari_server.setupSso.get_json_via_rest_api")
@patch('__builtin__.open')
def test_only_sso_enabled_cli_option_is_collected_when_disabling_sso(self, open_mock,
get_json_via_rest_api_mock,
is_server_runing_mock,
get_silent_mock,
get_ambari_properties_mock,
perform_changes_via_rest_api_mock):
out = StringIO.StringIO()
sys.stdout = out
certificate_data = '-----BEGIN CERTIFICATE-----\n' \
'MIIE3DCCA8SgAwIBAgIJAKfbOMmFyOlNMA0GCSqGSIb3DQEBBQUAMIGkMQswCQYD\n' \
'................................................................\n' \
'dXRpbmcxFzAVBgNVBAMTDmNsb3VkYnJlYWstcmdsMSUwIwYJKoZIhvcNAQkBFhZy\n' \
'-----END CERTIFICATE-----'
mock_file = MagicFile(certificate_data)
open_mock.side_effect = [mock_file]
get_json_via_rest_api_mock.return_value = (200, {})
is_server_runing_mock.return_value = (True, 0)
get_silent_mock.return_value = False
properties = Properties()
get_ambari_properties_mock.return_value = properties
sso_enabled = 'false'
sso_provider_url = 'http://testHost:8080'
sso_public_cert_file = '/test/file/path'
sso_jwt_cookie_name = 'test_cookie'
sso_jwt_audience_list = 'test, audience, list'
options = self._create_empty_options_mock()
options.sso_enabled = sso_enabled
options.sso_provider_url = sso_provider_url
options.sso_public_cert_file = sso_public_cert_file
options.sso_jwt_cookie_name = sso_jwt_cookie_name
options.sso_jwt_audience_list = sso_jwt_audience_list
setup_sso(options)
self.assertTrue(perform_changes_via_rest_api_mock.called)
requestCall = perform_changes_via_rest_api_mock.call_args_list[0]
args, kwargs = requestCall
requestMethod = args[4]
self.assertTrue(isinstance(requestMethod, str))
self.assertEqual(requestMethod, "DELETE")
sys.stdout = sys.__stdout__
pass
@patch("ambari_server.setupSso.perform_changes_via_rest_api")
@patch("ambari_server.setupSso.get_YN_input")
@patch("ambari_server.setupSso.get_ambari_properties")
@patch("ambari_server.setupSso.get_silent")
@patch("ambari_server.setupSso.is_server_runing")
@patch("ambari_server.setupSso.get_json_via_rest_api")
@patch('__builtin__.open')
def test_sso_is_enabled_for_all_services_via_user_input(self, open_mock,
get_json_via_rest_api_mock,
is_server_runing_mock,
get_silent_mock,
get_ambari_properties_mock,
get_YN_input_mock,
perform_changes_via_rest_api_mock):
out = StringIO.StringIO()
sys.stdout = out
certificate_data = '-----BEGIN CERTIFICATE-----\n' \
'MIIE3DCCA8SgAwIBAgIJAKfbOMmFyOlNMA0GCSqGSIb3DQEBBQUAMIGkMQswCQYD\n' \
'................................................................\n' \
'dXRpbmcxFzAVBgNVBAMTDmNsb3VkYnJlYWstcmdsMSUwIwYJKoZIhvcNAQkBFhZy\n' \
'-----END CERTIFICATE-----'
mock_file = MagicFile(certificate_data)
open_mock.side_effect = [mock_file]
get_json_via_rest_api_mock.return_value = (200, {})
is_server_runing_mock.return_value = (True, 0)
get_silent_mock.return_value = False
get_ambari_properties_mock.return_value = Properties()
def yn_input_side_effect(*args, **kwargs):
if 'Manage SSO configurations' in args[0]:
return True
elif 'Manage SSO configurations' in args[0]:
return True
elif 'all services' in args[0]:
return True
else:
raise Exception("ShouldNotBeInvoked") # only the 'Use SSO for all services' question should be asked for now
get_YN_input_mock.side_effect = yn_input_side_effect
sso_enabled = 'true'
sso_provider_url = 'http://testHost:8080'
sso_public_cert_file = '/test/file/path'
sso_jwt_cookie_name = 'test_cookie'
sso_jwt_audience_list = 'test, audience, list'
options = self._create_empty_options_mock()
options.sso_enabled = sso_enabled
options.sso_enabled_ambari = 'true'
options.sso_manage_services = 'true'
options.sso_provider_url = sso_provider_url
options.sso_public_cert_file = sso_public_cert_file
options.sso_jwt_cookie_name = sso_jwt_cookie_name
options.sso_jwt_audience_list = sso_jwt_audience_list
setup_sso(options)
self.assertTrue(perform_changes_via_rest_api_mock.called)
requestCall = perform_changes_via_rest_api_mock.call_args_list[0]
args, kwargs = requestCall
requestData = args[5]
self.assertTrue(isinstance(requestData, dict))
ssoProperties = requestData['Configuration']['properties']
self.assertEqual(ssoProperties[AMBARI_SSO_AUTH_ENABLED], sso_enabled)
self.assertEqual(ssoProperties[SSO_PROVIDER_URL], sso_provider_url)
self.assertEqual(ssoProperties[SSO_CERTIFICATE], certificate_data)
self.assertEqual(ssoProperties[JWT_COOKIE_NAME], sso_jwt_cookie_name)
self.assertEqual(ssoProperties[JWT_AUDIENCES], sso_jwt_audience_list)
self.assertEqual(ssoProperties[SSO_MANAGE_SERVICES], "true")
self.assertEqual(ssoProperties[SSO_ENABLED_SERVICES], "*")
sys.stdout = sys.__stdout__
pass
@patch("ambari_server.setupSso.perform_changes_via_rest_api")
@patch("urllib2.urlopen")
@patch("ambari_server.setupSso.get_cluster_name")
@patch("ambari_server.setupSso.get_YN_input")
@patch("ambari_server.setupSso.get_ambari_properties")
@patch("ambari_server.setupSso.get_silent")
@patch("ambari_server.setupSso.is_server_runing")
@patch('__builtin__.open')
def test_setup_sso_should_not_fail_when_sso_config_cannot_be_loaded_due_to_404_error(self, open_mock,
is_server_runing_mock,
get_silent_mock,
get_ambari_properties_mock,
get_YN_input_mock,
get_cluster_name_mock,
urlopen_mock,
perform_changes_via_rest_api_mock):
out = StringIO.StringIO()
sys.stdout = out
certificate_data = '-----BEGIN CERTIFICATE-----\n' \
'MIIE3DCCA8SgAwIBAgIJAKfbOMmFyOlNMA0GCSqGSIb3DQEBBQUAMIGkMQswCQYD\n' \
'................................................................\n' \
'dXRpbmcxFzAVBgNVBAMTDmNsb3VkYnJlYWstcmdsMSUwIwYJKoZIhvcNAQkBFhZy\n' \
'-----END CERTIFICATE-----'
mock_file = MagicFile(certificate_data)
open_mock.side_effect = [mock_file]
is_server_runing_mock.return_value = (True, 0)
get_silent_mock.return_value = False
get_ambari_properties_mock.return_value = Properties()
get_cluster_name_mock.return_value = 'cluster1'
get_YN_input_mock.__return_value = True
urlopen_mock.side_effect = HTTPError(MagicMock(status=404), 404, 'not found', None, None)
sso_enabled = 'true'
sso_provider_url = 'http://testHost:8080'
sso_public_cert_file = '/test/file/path'
sso_jwt_cookie_name = 'test_cookie'
sso_jwt_audience_list = 'test, audience, list'
options = self._create_empty_options_mock()
options.sso_enabled = sso_enabled
options.sso_enabled_ambari = sso_enabled
options.sso_manage_services = 'true'
options.sso_provider_url = sso_provider_url
options.sso_public_cert_file = sso_public_cert_file
options.sso_jwt_cookie_name = sso_jwt_cookie_name
options.sso_jwt_audience_list = sso_jwt_audience_list
setup_sso(options)
self.assertTrue(perform_changes_via_rest_api_mock.called)
requestCall = perform_changes_via_rest_api_mock.call_args_list[0]
args, kwargs = requestCall
requestData = args[5]
self.assertTrue(isinstance(requestData, dict))
ssoProperties = requestData['Configuration']['properties']
self.assertEqual(ssoProperties[AMBARI_SSO_AUTH_ENABLED], sso_enabled)
self.assertEqual(ssoProperties[SSO_PROVIDER_URL], sso_provider_url)
self.assertEqual(ssoProperties[SSO_CERTIFICATE], certificate_data)
self.assertEqual(ssoProperties[JWT_COOKIE_NAME], sso_jwt_cookie_name)
self.assertEqual(ssoProperties[JWT_AUDIENCES], sso_jwt_audience_list)
self.assertEqual(ssoProperties[SSO_MANAGE_SERVICES], "true")
self.assertEqual(ssoProperties[SSO_ENABLED_SERVICES], "*")
@patch("urllib2.urlopen")
@patch("ambari_server.setupSso.perform_changes_via_rest_api")
@patch("ambari_server.setupSso.get_cluster_name")
@patch("ambari_server.setupSso.get_YN_input")
@patch("ambari_server.setupSso.get_ambari_properties")
@patch("ambari_server.setupSso.get_silent")
@patch("ambari_server.setupSso.is_server_runing")
@patch("ambari_server.setupSso.get_json_via_rest_api")
@patch('__builtin__.open')
def test_sso_enabled_services_are_collected_via_user_input(self, open_mock,
get_json_via_rest_api_mock,
is_server_runing_mock,
get_silent_mock,
get_ambari_properties_mock,
get_YN_input_mock,
get_cluster_name_mock,
perform_changes_via_rest_api_mock,
urlopen_mock):
out = StringIO.StringIO()
sys.stdout = out
certificate_data = '-----BEGIN CERTIFICATE-----\n' \
'MIIE3DCCA8SgAwIBAgIJAKfbOMmFyOlNMA0GCSqGSIb3DQEBBQUAMIGkMQswCQYD\n' \
'................................................................\n' \
'dXRpbmcxFzAVBgNVBAMTDmNsb3VkYnJlYWstcmdsMSUwIwYJKoZIhvcNAQkBFhZy\n' \
'-----END CERTIFICATE-----'
mock_file = MagicFile(certificate_data)
open_mock.side_effect = [mock_file]
eligible_services = \
"""
{
"href": "http://c7401:8080/api/v1/clusters/cluster1/services?ServiceInfo/sso_integration_supported=true",
"items": [
{
"href": "http://c7401:8080/api/v1/clusters/cluster1/services/HDFS",
"ServiceInfo": {
"cluster_name": "cluster1",
"service_name": "HDFS",
"sso_integration_supported": true,
"sso_integration_requires_kerberos": false,
"kerberos_enabled": false
}
},
{
"href": "http://c7401:8080/api/v1/clusters/cluster1/services/ZOOKEPER",
"ServiceInfo": {
"cluster_name": "cluster1",
"service_name": "ZOOKEPER",
"sso_integration_supported": true,
"sso_integration_requires_kerberos": false,
"kerberos_enabled": false
}
}
]
}
"""
eligible_services_json = {
"href": "http://c7401:8080/api/v1/clusters/cluster1/services?ServiceInfo/sso_integration_supported=true",
"items": [
{
"href": "http://c7401:8080/api/v1/clusters/cluster1/services/HDFS",
"ServiceInfo": {
"cluster_name": "cluster1",
"service_name": "HDFS",
"sso_integration_supported": True,
"sso_integration_requires_kerberos": False,
"kerberos_enabled": False
}
},
{
"href": "http://c7401:8080/api/v1/clusters/cluster1/services/ZOOKEPER",
"ServiceInfo": {
"cluster_name": "cluster1",
"service_name": "ZOOKEPER",
"sso_integration_supported": True,
"sso_integration_requires_kerberos": False,
"kerberos_enabled": False
}
}
]
}
get_json_via_rest_api_mock.return_value = (200, {})
get_json_via_rest_api_mock.return_value = (200, eligible_services_json)
is_server_runing_mock.return_value = (True, 0)
get_silent_mock.return_value = False
get_ambari_properties_mock.return_value = Properties()
get_cluster_name_mock.return_value = 'cluster1'
def yn_input_side_effect(*args, **kwargs):
if 'all services' in args[0]:
return False
else:
return True
get_YN_input_mock.side_effect = yn_input_side_effect
response = MagicMock()
response.getcode.return_value = 200
response.read.return_value = eligible_services
urlopen_mock.return_value = response
options = self._create_empty_options_mock()
options.sso_enabled = 'true'
options.sso_enabled_ambari = 'true'
options.sso_manage_services = 'true'
options.sso_provider_url = 'http://testHost:8080'
options.sso_public_cert_file = '/test/file/path'
options.sso_jwt_cookie_name = 'test_cookie'
options.sso_jwt_audience_list = 'test, audience, list'
setup_sso(options)
self.assertTrue(perform_changes_via_rest_api_mock.called)
requestCall = perform_changes_via_rest_api_mock.call_args_list[0]
args, kwargs = requestCall
requestData = args[5]
self.assertTrue(isinstance(requestData, dict))
ssoProperties = requestData['Configuration']['properties']
self.assertEqual(ssoProperties[SSO_MANAGE_SERVICES], "true")
self.assertEqual(ssoProperties[SSO_ENABLED_SERVICES], "HDFS,ZOOKEPER")
sys.stdout = sys.__stdout__
pass
def _create_empty_options_mock(self):
options = MagicMock()
options.sso_enabled = None
options.sso_enabled_ambari = None
options.sso_manage_services = None
options.sso_enabled_services = None
options.sso_provider_url = None
options.sso_public_cert_file = None
options.sso_jwt_cookie_name = None
options.sso_jwt_audience_list = None
return options
|
|
#!/usr/bin/env python3
#
# Copyright (c) 2019, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from binascii import hexlify
from enum import IntEnum
import io
import logging
import struct
from network_data import SubTlvsFactory
from tlvs_parsing import UnknownTlvFactory
import common
class TlvType(IntEnum):
CHANNEL = 0
PAN_ID = 1
EXTENDED_PANID = 2
NETWORK_NAME = 3
PSKC = 4
NETWORK_KEY = 5
NETWORK_KEY_SEQUENCE_COUNTER = 6
NETWORK_MESH_LOCAL_PREFIX = 7
STEERING_DATA = 8
BORDER_AGENT_LOCATOR = 9
COMMISSIONER_ID = 10
COMMISSIONER_SESSION_ID = 11
SECURITY_POLICY = 12
GET = 13
ACTIVE_TIMESTAMP = 14
COMMISSIONER_UDP_PORT = 15
STATE = 16
JOINER_DTLS_ENCAPSULATION = 17
JOINER_UDP_PORT = 18
JOINER_IID = 19
JOINER_ROUTER_LOCATOR = 20
JOINER_ROUTER_KEK = 21
PROVISIONING_URL = 32
VENDOR_NAME = 33
VENDOR_MODEL = 34
VENDOR_SW_VERSION = 35
VENDOR_DATA = 36
VENDOR_STACK_VERSION = 37
UDP_ENCAPSULATION = 48
IPV6_ADDRESS = 49
PENDING_TIMESTAMP = 51
DELAY_TIMER = 52
CHANNEL_MASK = 53
COUNT = 54
PERIOD = 55
SCAN_DURATION = 56
ENERGY_LIST = 57
CSL_SYNCHRONIZED_TIMEOUT = 85
CSL_CLOCK_ACCURACY = 86
DISCOVERY_REQUEST = 128
DISCOVERY_RESPONSE = 129
class MeshCopState(IntEnum):
ACCEPT = 0x1
REJECT = 0xff
class MeshCopMessageType(IntEnum):
JOIN_FIN_REQ = (1,)
JOIN_FIN_RSP = (2,)
JOIN_ENT_NTF = (3,)
JOIN_ENT_RSP = 4
def create_mesh_cop_message_type_set():
return [
MeshCopMessageType.JOIN_FIN_REQ,
MeshCopMessageType.JOIN_FIN_RSP,
MeshCopMessageType.JOIN_ENT_NTF,
MeshCopMessageType.JOIN_ENT_RSP,
]
# Channel TLV (0)
class Channel(object):
def __init__(self, channel_page, channel):
self._channel_page = channel_page
self._channel = channel
@property
def channel_page(self):
return self._channel_page
@property
def channel(self):
return self._channel
def __eq__(self, other):
common.expect_the_same_class(self, other)
return (self._channel_page == other._channel_page and self._channel == other.__channel)
def __repr__(self):
return 'Channel(channel_page={},channel={})'.format(self._channel_page, self._channel)
def to_hex(self):
return struct.pack('>BBBH', TlvType.CHANNEL, 3, self.channel_page, self.channel)
class ChannelFactory(object):
def parse(self, data, message_info):
data_tp = struct.unpack('>BH', data.read(3))
channel_page = data_tp[0]
channel = data_tp[1]
return Channel(channel_page, channel)
# PanId TLV (1)
class Panid(object):
# TODO: Not implemented yet
pass
class PanidFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# ExtendedPanid TLV (2)
class ExtendedPanid(object):
def __init__(self, extended_panid):
self._extended_panid = extended_panid
@property
def extended_panid(self):
return self._extended_panid
def __eq__(self, other):
return (isinstance(self, type(other)) and self.extended_panid == other.extended_panid)
def __repr__(self):
return "ExtendedPanid(extended_panid={})".format(self.extended_panid)
class ExtendedPanidFactory(object):
def parse(self, data, message_info):
extended_panid = struct.unpack(">Q", data.read(8))[0]
return ExtendedPanid(extended_panid)
# NetworkName TLV (3)
class NetworkName(object):
def __init__(self, network_name):
self._network_name = network_name
@property
def network_name(self):
return self._network_name
def __eq__(self, other):
return (isinstance(self, type(other)) and self.network_name == other.network_name)
def __repr__(self):
return "NetworkName(network_name={})".format(self.network_name)
class NetworkNameFactory(object):
def parse(self, data, message_info):
len = message_info.length
network_name = struct.unpack("{}s".format(10), data.read(len))[0]
return NetworkName(network_name)
# PSKc TLV (4)
class PSKc(object):
# TODO: Not implemented yet
pass
class PSKcFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# NetworkKey TLV (5)
class NetworkKey(object):
# TODO: Not implemented yet
pass
class NetworkKeyFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# NetworkKeySequenceCounter TLV (6)
class NetworkKeySequenceCounter(object):
# TODO: Not implemented yet
pass
class NetworkKeySequenceCounterFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# NetworkMeshLocalPrefix TLV (7)
class NetworkMeshLocalPrefix(object):
# TODO: Not implemented yet
pass
class NetworkMeshLocalPrefixFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# Steering Data TLV (8)
class SteeringData(object):
def __init__(self, bloom_filter):
self._bloom_filter = bloom_filter
@property
def bloom_filter(self):
return self._bloom_filter
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self._bloom_filter == other._bloom_filter
def __repr__(self):
return "SteeringData(bloom_filter={})".format(hexlify(self._bloom_filter))
def to_hex(self):
bloom_filter_len = len(self.bloom_filter)
return (struct.pack('>BB', TlvType.STEERING_DATA, bloom_filter_len) + self.bloom_filter)
class SteeringDataFactory:
def parse(self, data, message_info):
bloom_filter = data.read(message_info.length)
return SteeringData(bloom_filter)
# Border Agent Locator TLV (9)
class BorderAgentLocator(object):
def __init__(self, address):
self._border_agent_locator = address
@property
def border_agent_locator(self):
return self._border_agent_locator
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self._border_agent_locator == other._border_agent_locator
def __repr__(self):
return "BorderAgentLocator(rloc16={})".format(hex(self._border_agent_locator))
def to_hex(self):
return struct.pack('>BBH', TlvType.BORDER_AGENT_LOCATOR, 2, self.border_agent_locator)
class BorderAgentLocatorFactory:
def parse(self, data, message_info):
border_agent_locator = struct.unpack(">H", data.read(2))[0]
return BorderAgentLocator(border_agent_locator)
# CommissionerId TLV (10)
class CommissionerId(object):
def __init__(self, commissioner_id):
self._commissioner_id = commissioner_id
@property
def commissioner_id(self):
return self._commissioner_id
def __eq__(self, other):
return self.commissioner_id == other.commissioner_id
def __repr__(self):
return "CommissionerId(commissioner_id={})".format(self.commissioner_id)
class CommissionerIdFactory(object):
def parse(self, data, message_info):
commissioner_id = data.getvalue().decode('utf-8')
return CommissionerId(commissioner_id)
# Commissioner Session ID TLV (11)
class CommissionerSessionId(object):
def __init__(self, commissioner_session_id):
self._commissioner_session_id = commissioner_session_id
@property
def commissioner_session_id(self):
return self._commissioner_session_id
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self._commissioner_session_id == other._commissioner_session_id
def __repr__(self):
return "CommissionerSessionId(commissioner_session_id={})".format(self._commissioner_session_id)
def to_hex(self):
return struct.pack(
'>BBH',
TlvType.COMMISSIONER_SESSION_ID,
2,
self.commissioner_session_id,
)
class CommissionerSessionIdFactory:
def parse(self, data, message_info):
session_id = struct.unpack(">H", data.read(2))[0]
return CommissionerSessionId(session_id)
# SecurityPolicy TLV (12)
class SecurityPolicy(object):
# TODO: Not implemented yet
pass
class SecurityPolicyFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# Get TLV (13)
class Get(object):
# TODO: Not implemented yet
pass
class GetFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# ActiveTimestamp TLV (14)
class ActiveTimestamp(object):
# TODO: Not implemented yet
pass
class ActiveTimestampFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# Commissioner UDP Port TLV (15)
class CommissionerUdpPort(object):
def __init__(self, udp_port):
self._udp_port = udp_port
@property
def udp_port(self):
return self._udp_port
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self._udp_port == other._udp_port
def __repr__(self):
return "CommissionerUdpPort(udp_port={})".format(self._udp_port)
class CommissionerUdpPortFactory:
def parse(self, data, message_info):
udp_port = struct.unpack(">H", data.read(2))[0]
return CommissionerUdpPort(udp_port)
# State TLV (16)
class State(object):
def __init__(self, state):
self._state = state
@property
def state(self):
return self._state
def __eq__(self, other):
return self.state == other.state
def __repr__(self):
return "State(state={})".format(self.state)
class StateFactory:
def parse(self, data, message_info):
state = ord(data.read(1))
return State(state)
# JoinerDtlsEncapsulation TLV (17)
class JoinerDtlsEncapsulation(object):
# TODO: Not implemented yet
pass
class JoinerDtlsEncapsulationFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# JoinerUdpPort TLV (18)
class JoinerUdpPort(object):
def __init__(self, udp_port):
self._udp_port = udp_port
@property
def udp_port(self):
return self._udp_port
def __eq__(self, other):
return (isinstance(self, type(other)) and self.udp_port == other.udp_port)
def __repr__(self):
return "JoinerUdpPort(udp_port={})".format(self.udp_port)
class JoinerUdpPortFactory(object):
def parse(self, data, message_info):
udp_port = struct.unpack(">H", data.read(2))[0]
return JoinerUdpPort(udp_port)
# JoinerIID TLV (19)
class JoinerIID(object):
# TODO: Not implemented yet
pass
class JoinerIIDFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# JoinerRouterLocator TLV (20)
class JoinerRouterLocator(object):
# TODO: Not implemented yet
pass
class JoinerRouterLocatorFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# JoinerRouterKEK TLV (21)
class JoinerRouterKEK(object):
# TODO: Not implemented yet
pass
class JoinerRouterKEKFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# ProvisioningURL TLV (32)
class ProvisioningUrl(object):
def __init__(self, url):
self._url = url
@property
def url(self):
return self._url
def __repr__(self):
return "ProvisioningUrl(url={})".format(self.url)
class ProvisioningUrlFactory:
def parse(self, data, message_info):
url = data.getvalue().decode('utf-8')
return ProvisioningUrl(url)
# VendorName TLV (33)
class VendorName(object):
def __init__(self, vendor_name):
self._vendor_name = vendor_name
@property
def vendor_name(self):
return self._vendor_name
def __eq__(self, other):
return self.vendor_name == other.vendor_name
def __repr__(self):
return "VendorName(vendor_name={})".format(self.vendor_name)
class VendorNameFactory:
def parse(self, data, message_info):
vendor_name = data.getvalue().decode('utf-8')
return VendorName(vendor_name)
# VendorModel TLV (34)
class VendorModel(object):
def __init__(self, vendor_model):
self._vendor_model = vendor_model
@property
def vendor_model(self):
return self._vendor_model
def __eq__(self, other):
return self.vendor_model == other.vendor_model
def __repr__(self):
return "VendorModel(vendor_model={})".format(self.vendor_model)
class VendorModelFactory:
def parse(self, data, message_info):
vendor_model = data.getvalue().decode('utf-8')
return VendorModel(vendor_model)
# VendorSWVersion TLV (35)
class VendorSWVersion(object):
def __init__(self, vendor_sw_version):
self._vendor_sw_version = vendor_sw_version
@property
def vendor_sw_version(self):
return self._vendor_sw_version
def __eq__(self, other):
return self.vendor_sw_version == other.vendor_sw_version
def __repr__(self):
return "VendorName(vendor_sw_version={})".format(self.vendor_sw_version)
class VendorSWVersionFactory:
def parse(self, data, message_info):
vendor_sw_version = data.getvalue()
return VendorSWVersion(vendor_sw_version)
# VendorData TLV (36)
class VendorData(object):
def __init__(self, data):
self._vendor_data = data
@property
def vendor_data(self):
return self._vendor_data
def __repr__(self):
return "Vendor(url={})".format(self.vendor_data)
class VendorDataFactory(object):
def parse(self, data, message_info):
return VendorData(data)
# VendorStackVersion TLV (37)
class VendorStackVersion(object):
def __init__(self, stack_vendor_oui, build, rev, minor, major):
self._stack_vendor_oui = stack_vendor_oui
self._build = build
self._rev = rev
self._minor = minor
self._major = major
return
@property
def stack_vendor_oui(self):
return self._stack_vendor_oui
@property
def build(self):
return self._build
@property
def rev(self):
return self._rev
@property
def minor(self):
return self._minor
@property
def major(self):
return self._major
def __repr__(self):
return "VendorStackVersion(vendor_stack_version={}, build={}, rev={}, minor={}, major={})".format(
self.stack_vendor_oui, self.build, self.rev, self.minor, self.major)
class VendorStackVersionFactory:
def parse(self, data, message_info):
stack_vendor_oui = struct.unpack(">H", data.read(2))[0]
rest = struct.unpack(">BBBB", data.read(4))
build = rest[1] << 4 | (0xf0 & rest[2])
rev = 0xF & rest[2]
minor = rest[3] & 0xf0
major = rest[3] & 0xF
return VendorStackVersion(stack_vendor_oui, build, rev, minor, major)
# UdpEncapsulation TLV (48)
class UdpEncapsulation(object):
# TODO: Not implemented yet
pass
class UdpEncapsulationFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# Ipv6Address TLV (49)
class Ipv6Address(object):
# TODO: Not implemented yet
pass
class Ipv6AddressFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# PendingTimestamp TLV (51)
class PendingTimestamp(object):
# TODO: Not implemented yet
pass
class PendingTimestampFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# DelayTimer TLV (52)
class DelayTimer(object):
# TODO: Not implemented yet
pass
class DelayTimerFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# ChannelMask TLV (53)
class ChannelMask(object):
# TODO: Not implemented yet
pass
class ChannelMaskFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# Count TLV (54)
class Count(object):
# TODO: Not implemented yet
pass
class CountFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# Period TLV (55)
class Period(object):
# TODO: Not implemented yet
pass
class PeriodFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# ScanDuration TLV (56)
class ScanDuration(object):
# TODO: Not implemented yet
pass
class ScanDurationFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# EnergyList TLV (57)
class EnergyList(object):
# TODO: Not implemented yet
pass
class EnergyListFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# Discovery Request TLV (128)
class DiscoveryRequest(object):
def __init__(self, version, joiner_flag):
self._version = version
self._joiner_flag = joiner_flag
@property
def version(self):
return self._version
@property
def joiner_flag(self):
return self._joiner_flag
def __eq__(self, other):
return (isinstance(self, type(other)) and self.version == other.version and
self.joiner_flag == other.joiner_flag)
def __repr__(self):
return "DiscoveryRequest(version={}, joiner_flag={})".format(self.version, self.joiner_flag)
class DiscoveryRequestFactory(object):
def parse(self, data, message_info):
data_byte = struct.unpack(">B", data.read(1))[0]
version = (data_byte & 0xf0) >> 4
joiner_flag = (data_byte & 0x08) >> 3
return DiscoveryRequest(version, joiner_flag)
# Discovery Response TLV (128)
class DiscoveryResponse(object):
def __init__(self, version, native_flag):
self._version = version
self._native_flag = native_flag
@property
def version(self):
return self._version
@property
def native_flag(self):
return self._native_flag
def __eq__(self, other):
return (isinstance(self, type(other)) and self.version == other.version and
self.native_flag == other.native_flag)
def __repr__(self):
return "DiscoveryResponse(version={}, native_flag={})".format(self.version, self.native_flag)
class DiscoveryResponseFactory(object):
def parse(self, data, message_info):
data_byte = struct.unpack(">B", data.read(1))[0]
version = (data_byte & 0xf0) >> 4
native_flag = (data_byte & 0x08) >> 3
return DiscoveryResponse(version, native_flag)
class MeshCopCommand(object):
def __init__(self, _type, tlvs):
self._type = _type
self._tlvs = tlvs
@property
def type(self):
return self._type
@property
def tlvs(self):
return self._tlvs
def __repr__(self):
tlvs_str = ", ".join(["{}".format(tlv) for tlv in self.tlvs])
return "MeshCopCommand(type={}, tlvs=[{}])".format(self.type, tlvs_str)
def create_deault_mesh_cop_msg_type_map():
return {
'JOIN_FIN.req': MeshCopMessageType.JOIN_FIN_REQ,
'JOIN_FIN.rsp': MeshCopMessageType.JOIN_FIN_RSP,
'JOIN_ENT.ntf': MeshCopMessageType.JOIN_ENT_NTF,
'JOIN_ENT.rsp': MeshCopMessageType.JOIN_ENT_RSP,
}
class MeshCopCommandFactory:
def __init__(self, tlvs_factories):
self._tlvs_factories = tlvs_factories
self._mesh_cop_msg_type_map = create_deault_mesh_cop_msg_type_map()
def _get_length(self, data):
return ord(data.read(1))
def _get_tlv_factory(self, _type):
try:
return self._tlvs_factories[_type]
except KeyError:
logging.error('Could not find TLV factory. Unsupported TLV type: {}'.format(_type))
return UnknownTlvFactory(_type)
def _parse_tlv(self, data):
_type = TlvType(ord(data.read(1)))
length = self._get_length(data)
value = data.read(length)
factory = self._get_tlv_factory(_type)
return factory.parse(io.BytesIO(value), None) # message_info not needed here
def _get_mesh_cop_msg_type(self, msg_type_str):
try:
return self._mesh_cop_msg_type_map[msg_type_str]
except KeyError:
raise KeyError('Mesh cop message type not found: {}'.format(msg_type_str))
def parse(self, cmd_type_str, data):
cmd_type = self._get_mesh_cop_msg_type(cmd_type_str)
tlvs = []
while data.tell() < len(data.getvalue()):
tlv = self._parse_tlv(data)
tlvs.append(tlv)
return MeshCopCommand(cmd_type, tlvs)
def create_default_mesh_cop_tlv_factories():
return {
TlvType.STATE: StateFactory(),
TlvType.PROVISIONING_URL: ProvisioningUrlFactory(),
TlvType.VENDOR_NAME: VendorNameFactory(),
TlvType.VENDOR_MODEL: VendorModelFactory(),
TlvType.VENDOR_SW_VERSION: VendorSWVersionFactory(),
TlvType.VENDOR_DATA: VendorDataFactory(),
TlvType.VENDOR_STACK_VERSION: VendorStackVersionFactory(),
}
class ThreadDiscoveryTlvsFactory(SubTlvsFactory):
def __init__(self, sub_tlvs_factories):
super(ThreadDiscoveryTlvsFactory, self).__init__(sub_tlvs_factories)
|
|
import struct
import numpy
import pickle
debug_level = 0
def unpack_stream(stream):
if debug_level > 0:
print('> packet: waiting for next character')
buffer = stream.read(1)
if not buffer:
raise NameError('read failed')
(btype,) = struct.unpack('c', buffer)
if debug_level > 0:
print("> packet: got '{}'".format(btype))
if btype == b'A' or btype == b'C':
# Read next character
buffer = stream.read(1)
(command,) = struct.unpack('c', buffer)
return (str(btype, 'utf-8'), str(command, 'utf-8'))
elif btype == b'S':
# Read length (int)
buffer = stream.read(4)
(blen,) = struct.unpack('<I', buffer)
# Read string (blen*char)
buffer = stream.read(blen)
(bmessage,) = struct.unpack('%ds' % (blen,), buffer)
return ('S', str(bmessage, 'utf-8'))
elif btype == b'I':
# Read int
buffer = stream.read(4)
(i,) = struct.unpack('<i', buffer)
return ('I', i)
elif btype == b'F':
# Read float
buffer = stream.read(4)
(f,) = struct.unpack('<f', buffer)
return ('F', f)
elif btype == b'D':
# Read double
buffer = stream.read(8)
(d,) = struct.unpack('<d', buffer)
return ('D', d)
elif btype == b'V':
# Read type (char)
buffer = stream.read(1)
(vtype,) = struct.unpack('c', buffer)
# Read length (int)
buffer = stream.read(4)
(vlen,) = struct.unpack('<I', buffer)
# Read data (blen*)
if debug_level > 0:
print("> packet::vector: '{}[{}]'".format(vtype, vlen))
if vtype == b'I':
vector = numpy.zeros(vlen, int)
buffer = stream.read(4 * vlen)
for k in range(vlen):
(vector[k],) = struct.unpack('<i', buffer[k*4:(k+1)*4])
elif vtype == b'F':
vector = numpy.zeros(vlen, float)
buffer = stream.read(4 * vlen)
for k in range(vlen):
(vector[k],) = struct.unpack('<f', buffer[k*4:(k+1)*4])
elif vtype == b'D':
vector = numpy.zeros(vlen, float)
buffer = stream.read(8 * vlen)
for k in range(vlen):
(vector[k],) = struct.unpack('<d', buffer[k*8:(k+1)*8])
else:
# error
pass
# return vector
return ('V', vector)
elif btype == b'M':
# Read row size (int)
buffer = stream.read(4)
(rsize,) = struct.unpack('<I', buffer)
# read vector
(vtype, vector) = unpack_stream(stream)
# resize vector as matrix
vector = numpy.resize(vector, (rsize, int(vector.size/rsize)))
# return vector
return ('M', vector)
elif btype == b'P' or btype == b'E' or btype == b'K' or btype == b'R':
# Read object size (int)
buffer = stream.read(4)
(bsize,) = struct.unpack('<I', buffer)
# read object
buffer = stream.read(bsize)
# unpickle
object = pickle.loads(buffer)
# return object
if btype == b'P':
return ('P', object)
elif btype == b'E':
return ('E', object)
elif btype == b'K':
return ('K', object)
else: # btype == b'R':
return ('R', object)
else:
raise NameError('Unknown type')
def pack_vector(type, content):
buffer = b''
type = '<' + type
if len(content.shape) > 1:
# matrix
for k in range(content.shape[0]):
for value in content[k,:]:
buffer += struct.pack(type, value)
else:
# vector
for value in content:
buffer += struct.pack(type, value)
return buffer
def pack(type, content):
# command
if type == 'A':
bmessage = bytes(content[0], 'utf-8')
return struct.pack('cc', b'A', bmessage)
elif type == 'C':
bmessage = bytes(content[0], 'utf-8')
return struct.pack('cc', b'C', bmessage)
# message
elif type == 'S':
bmessage = bytes(content, 'utf-8')
blen = len(bmessage)
return struct.pack('<cI%ds' % (blen,),
b'S',
blen,
bmessage)
# integer
elif type == 'I':
return struct.pack('<ci', b'I', content)
# float
elif type == 'F':
return struct.pack('<cf', b'F', content)
# double
elif type == 'D':
return struct.pack('<cd', b'D', content)
#vector
elif type == 'V':
vlen = content.size
if numpy.issubsctype(content, numpy.int):
return ( struct.pack('<ccI', b'V', b'I', vlen) +
pack_vector('i', content) )
elif numpy.issubsctype(content, numpy.float32):
return ( struct.pack('<ccI', b'V', b'F', vlen) +
pack_vector('f', content) )
elif numpy.issubsctype(content, numpy.float):
return ( struct.pack('<ccI', b'V', b'D', vlen) +
pack_vector('d', content) )
else:
# error
pass
#matrix
elif type == 'M':
rsize = content.shape[0]
return ( struct.pack('<cI', b'M', rsize) +
pack('V', content) )
# pickle
elif type == 'P':
#print('content = {}'.format(content))
try:
bmessage = pickle.dumps(content)
except pickle.PicklingError:
# try wrapping in list
bmessage = pickle.dumps(list(content))
except:
print('*** PACKET FAILED TO PICKLE ***')
print('content = {}'.format(content))
bmessage = pickle.dumps()
return struct.pack('<cI', b'P', len(bmessage)) + bmessage
# pickle (Exception)
elif type == 'E':
try:
bmessage = pickle.dumps(content)
except:
print('*** PACKET FAILED TO PICKLE ***')
print('content = {}'.format(content))
return struct.pack('<cI', b'E', len(bmessage)) + bmessage
# pickle (kwargs)
elif type == 'K':
try:
bmessage = pickle.dumps(content)
except:
print('*** PACKET FAILED TO PICKLE ***')
print('content = {}'.format(content))
bmessage = pickle.dumps({})
return struct.pack('<cI', b'K', len(bmessage)) + bmessage
# pickle (vargs)
elif type == 'R':
bmessage = pickle.dumps(content)
return struct.pack('<cI', b'R', len(bmessage)) + bmessage
else:
raise NameError('Unknown type')
|
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from peewee import *
from playhouse.pool import PooledMySQLDatabase
# mysql connection pool
db = PooledMySQLDatabase(
database='oms',
host='127.0.0.1',
port=3306,
user='root',
passwd='123456',
charset='utf8',
max_connections=20,
stale_timeout=300
)
# base model
class BaseModel(Model):
class Meta:
database = db
# task table
class Task(BaseModel):
id = IntegerField()
task_id = CharField(unique=True)
creator = CharField()
ip = CharField()
create_time = IntegerField()
target = CharField()
version = IntegerField()
type = CharField()
content = CharField()
description = CharField()
executor = CharField()
status = BooleanField()
start_time = IntegerField()
revert_time = IntegerField()
percent = IntegerField()
revert = BooleanField()
class Meta:
db_table = 'task'
# machine table
class Machine(BaseModel):
id = IntegerField()
machine_name = CharField(unique=True)
inside_ip = CharField()
outside_ip = CharField()
usage = CharField()
is_initialized = IntegerField()
location = CharField()
remarks = CharField()
nginx = IntegerField()
mysql = IntegerField()
php = IntegerField()
redis = IntegerField()
memcache = IntegerField()
jdk = IntegerField()
tomcat = IntegerField()
class Meta:
db_table = 'machine'
# machine table
class SshKeyInfo(BaseModel):
id = IntegerField()
username = CharField()
ip = CharField()
system_user = CharField()
class Meta:
db_table = 'ssh_key_info'
# user table
class User(BaseModel):
id = IntegerField()
mail = CharField(unique=True)
username = CharField(unique=True)
nickname = CharField(unique=True)
passwd = CharField()
salt = CharField()
department = CharField()
permissions = CharField()
class Meta:
db_table = 'user'
# permissions table
class Permissions(BaseModel):
id = IntegerField()
permission = CharField(unique=True)
permission_desc = CharField()
permission_code = CharField(unique=True)
class Meta:
db_table = 'permissions'
def get_user_task_num_by_time(begin_time=0, end_time=0):
user_list = []
for a in User.select(User.username):
user_list.append(a.__dict__['_data']['username'])
user_task_statistic = {}
query = (Task
.select(Task.creator, fn.COUNT(Task.task_id).alias('task_sum'),
fn.FROM_UNIXTIME(Task.create_time, '%Y%m%d').alias('create_date'))
.where(
(Task.create_time >= begin_time) &
(Task.create_time <= end_time))
.group_by(Task.creator, SQL('create_date')))
try:
for info in query.execute():
creator = info.__dict__['_data']['creator']
task_sum = info.__dict__['task_sum']
create_date = info.__dict__['create_date']
if create_date not in user_task_statistic:
user_task_statistic[create_date] = {creator: task_sum}
else:
user_task_statistic[create_date][creator] = task_sum
print user_task_statistic, user_list
except Exception:
return False
else:
return user_task_statistic, user_list
def get_permission(is_all=False, start=0, count=10):
data_list = []
if is_all:
try:
for info in Permissions.select():
_data = info.__dict__['_data']
permission_code = _data['permission_code']
if len(permission_code) == 1:
pid = '0'
else:
pid = '.'.join(permission_code.split('.')[:-1])
name = _data['permission_desc'] + '[' + _data['permission'] + ']' + '[' + _data['permission_code'] + ']'
permission_dcit = {
'id': permission_code,
'pId': pid,
'name': name
}
data_list.append(permission_dcit)
except Exception:
return False
else:
return data_list
else:
try:
for info in Permissions.select().offset(start).limit(count):
data_list.append(info.__dict__['_data'])
except Exception:
return False
else:
return data_list
def add(ssh_key_dict):
try:
SshKeyInfo.select().where((SshKeyInfo.ip == ssh_key_dict['ip'])
& (SshKeyInfo.username == ssh_key_dict['username'])
& (SshKeyInfo.system_user == ssh_key_dict['system_user'])
).get()
except Exception:
ssh_key_info = SshKeyInfo()
for key in ssh_key_dict:
setattr(ssh_key_info, key, ssh_key_dict[key])
try:
ssh_key_info.save()
except Exception:
log.exception('exception')
return False
else:
return True
else:
return true
def test(ssh_key_dict):
try:
SshKeyInfo.select().where((SshKeyInfo.ip == ssh_key_dict['ip'])
& (SshKeyInfo.username == ssh_key_dict['username'])
& (SshKeyInfo.system_user == ssh_key_dict['system_user'])
).get()
except Exception as e:
print "bbbb"
print e
else:
print "aaa"
test({'ip': '192.168.1.111', 'username': 'guoxu', 'system_user': 'admin'})
def get(mode=None, username=None, ip=None):
data_list = []
if mode == 'ip':
print ip
try:
for info in SshKeyInfo.select().where(SshKeyInfo.ip == ip):
data = info.__dict__['_data']
data_list.append({data['username']: data['system_user']})
except Exception as e:
print e
return False
else:
return data_list
elif mode == 'user':
try:
for info in SshKeyInfo.select().where(SshKeyInfo.username == username):
data = info.__dict__['_data']
data_list.append({data['ip']: data['system_user']})
except Exception as e:
print e
return False
else:
return data_list
else:
return False
def delete(username, ip, system_user):
del_data = (SshKeyInfo
.delete()
.where(
(SshKeyInfo.username == username) &
(SshKeyInfo.ip == ip) &
(SshKeyInfo.system_user == system_user)))
try:
del_data.execute()
except Exception:
log.exception('exception')
return False
else:
return True
def get_user_list():
data_list = []
try:
for info in User.select().order_by(User.id):
data_list.append(info.__dict__['_data']['username'])
except Exception as e:
print e
return False
else:
return data_list
def get_machine_list():
data_list = []
try:
for info in Machine.select().order_by(Machine.id):
data_list.append(info.__dict__['_data']['inside_ip'])
except Exception as e:
print e
return False
else:
return data_list
|
|
# coding: utf-8
from sqlalchemy import BINARY, Column, Index, Integer, String, VARBINARY
from sqlalchemy import String, Unicode, ForeignKey
from sqlalchemy.orm import relationship, backref
from dbdatetime import dbdatetime
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
class AlmanacBinding(Base):
__tablename__ = 'almanac_binding'
__table_args__ = (
Index('key_service', 'servicePHID', 'interfacePHID', unique=True),
)
id = Column(Integer, primary_key=True)
phid = Column(String, nullable=False, unique=True)
servicePHID = Column(String, nullable=False)
devicePHID = Column(String, nullable=False, index=True)
interfacePHID = Column(String, nullable=False, index=True)
mailKey = Column(BINARY(20), nullable=False)
dateCreated = Column(dbdatetime, nullable=False)
dateModified = Column(dbdatetime, nullable=False)
class AlmanacBindingTransaction(Base):
__tablename__ = 'almanac_bindingtransaction'
id = Column(Integer, primary_key=True)
phid = Column(String, nullable=False, unique=True)
authorPHID = Column(String, nullable=False)
objectPHID = Column(String, nullable=False, index=True)
viewPolicy = Column(String, nullable=False)
editPolicy = Column(String, nullable=False)
commentPHID = Column(String)
commentVersion = Column(Integer, nullable=False)
transactionType = Column(Unicode(32), nullable=False)
oldValue = Column(Unicode, nullable=False)
newValue = Column(Unicode, nullable=False)
contentSource = Column(Unicode, nullable=False)
usermetadata = Column('metadata', Unicode, nullable=False)
dateCreated = Column(dbdatetime, nullable=False)
dateModified = Column(dbdatetime, nullable=False)
class AlmanacDevice(Base):
__tablename__ = 'almanac_device'
id = Column(Integer, primary_key=True)
phid = Column(String, nullable=False, unique=True)
name = Column(Unicode(128), nullable=False, index=True)
dateCreated = Column(dbdatetime, nullable=False)
dateModified = Column(dbdatetime, nullable=False)
nameIndex = Column(BINARY(12), nullable=False, unique=True)
mailKey = Column(BINARY(20), nullable=False)
viewPolicy = Column(String, nullable=False)
editPolicy = Column(String, nullable=False)
isLocked = Column(Integer, nullable=False)
class AlmanacDeviceTransaction(Base):
__tablename__ = 'almanac_devicetransaction'
id = Column(Integer, primary_key=True)
phid = Column(String, nullable=False, unique=True)
authorPHID = Column(String, nullable=False)
objectPHID = Column(String, nullable=False, index=True)
viewPolicy = Column(String, nullable=False)
editPolicy = Column(String, nullable=False)
commentPHID = Column(String)
commentVersion = Column(Integer, nullable=False)
transactionType = Column(Unicode(32), nullable=False)
oldValue = Column(Unicode, nullable=False)
newValue = Column(Unicode, nullable=False)
contentSource = Column(Unicode, nullable=False)
usermetadata = Column('metadata', Unicode, nullable=False)
dateCreated = Column(dbdatetime, nullable=False)
dateModified = Column(dbdatetime, nullable=False)
class AlmanacInterface(Base):
__tablename__ = 'almanac_interface'
__table_args__ = (
Index('key_location', 'networkPHID', 'address', 'port'),
)
id = Column(Integer, primary_key=True)
phid = Column(String, nullable=False, unique=True)
devicePHID = Column(String, nullable=False, index=True)
networkPHID = Column(String, nullable=False)
address = Column(Unicode(64), nullable=False)
port = Column(Integer, nullable=False)
dateCreated = Column(dbdatetime, nullable=False)
dateModified = Column(dbdatetime, nullable=False)
class AlmanacNetwork(Base):
__tablename__ = 'almanac_network'
id = Column(Integer, primary_key=True)
phid = Column(String, nullable=False, unique=True)
name = Column(Unicode(128), nullable=False)
mailKey = Column(BINARY(20), nullable=False)
viewPolicy = Column(String, nullable=False)
editPolicy = Column(String, nullable=False)
dateCreated = Column(dbdatetime, nullable=False)
dateModified = Column(dbdatetime, nullable=False)
class AlmanacNetworkTransaction(Base):
__tablename__ = 'almanac_networktransaction'
id = Column(Integer, primary_key=True)
phid = Column(String, nullable=False, unique=True)
authorPHID = Column(String, nullable=False)
objectPHID = Column(String, nullable=False, index=True)
viewPolicy = Column(String, nullable=False)
editPolicy = Column(String, nullable=False)
commentPHID = Column(String)
commentVersion = Column(Integer, nullable=False)
transactionType = Column(Unicode(32), nullable=False)
oldValue = Column(Unicode, nullable=False)
newValue = Column(Unicode, nullable=False)
contentSource = Column(Unicode, nullable=False)
usermetadata = Column('metadata', Unicode, nullable=False)
dateCreated = Column(dbdatetime, nullable=False)
dateModified = Column(dbdatetime, nullable=False)
class AlmanacProperty(Base):
__tablename__ = 'almanac_property'
__table_args__ = (
Index('objectPHID', 'objectPHID', 'fieldIndex', unique=True),
)
id = Column(Integer, primary_key=True)
objectPHID = Column(String, nullable=False)
fieldIndex = Column(BINARY(12), nullable=False)
fieldName = Column(Unicode(128), nullable=False)
fieldValue = Column(Unicode, nullable=False)
class AlmanacService(Base):
__tablename__ = 'almanac_service'
id = Column(Integer, primary_key=True)
phid = Column(String, nullable=False, unique=True)
name = Column(Unicode(128), nullable=False, index=True)
nameIndex = Column(BINARY(12), nullable=False, unique=True)
mailKey = Column(BINARY(20), nullable=False)
viewPolicy = Column(String, nullable=False)
editPolicy = Column(String, nullable=False)
dateCreated = Column(dbdatetime, nullable=False)
dateModified = Column(dbdatetime, nullable=False)
serviceClass = Column(Unicode(64), nullable=False, index=True)
isLocked = Column(Integer, nullable=False)
class AlmanacServiceTransaction(Base):
__tablename__ = 'almanac_servicetransaction'
id = Column(Integer, primary_key=True)
phid = Column(String, nullable=False, unique=True)
authorPHID = Column(String, nullable=False)
objectPHID = Column(String, nullable=False, index=True)
viewPolicy = Column(String, nullable=False)
editPolicy = Column(String, nullable=False)
commentPHID = Column(String)
commentVersion = Column(Integer, nullable=False)
transactionType = Column(Unicode(32), nullable=False)
oldValue = Column(Unicode, nullable=False)
newValue = Column(Unicode, nullable=False)
contentSource = Column(Unicode, nullable=False)
usermetadata = Column('metadata', Unicode, nullable=False)
dateCreated = Column(dbdatetime, nullable=False)
dateModified = Column(dbdatetime, nullable=False)
class Edge(Base):
__tablename__ = 'edge'
__table_args__ = (
Index('key_dst', 'dst', 'type', 'src', unique=True),
Index('src', 'src', 'type', 'dateCreated', 'seq')
)
src = Column(String, primary_key=True, nullable=False)
type = Column(Integer, primary_key=True, nullable=False)
dst = Column(String, primary_key=True, nullable=False)
dateCreated = Column(dbdatetime, nullable=False)
seq = Column(Integer, nullable=False)
dataID = Column(Integer)
class EdgeData(Base):
__tablename__ = 'edgedata'
id = Column(Integer, primary_key=True)
data = Column(Unicode, nullable=False)
|
|
#! /usr/bin/env python3
"""
This script performs event averaging for particle
spectra and anisotropic flow coefficients calculated
from event-by-event simulations
v_n is analyzed up to n = 6
Format for particle_XXX_vndata.dat file:
n_order real_part real_part_err imag_part imag_part_err
Format for particle_XXX_vndata_diff.dat file:
pT(GeV) pT_err(GeV) dN/(2pi dy pT dpT)(GeV^-2) dN/(2pi dy pT dpT)_err(GeV^-2)
vn_real vn_real_err vn_imag vn_imag_err
All the errors are only statistic errors
"""
from sys import argv, exit
from os import path, mkdir
from glob import glob
import h5py
from numpy import *
import shutil
# define colors
purple = "\033[95m"
green = "\033[92m"
blue = "\033[94m"
yellow = "\033[93m"
red = "\033[91m"
normal = "\033[0m"
try:
data_path = path.abspath(argv[1])
data_name = data_path.split("/")[-1]
results_folder_name = data_name.split(".h5")[0]
avg_folder = path.join(path.abspath(argv[2]),
results_folder_name)
print("output folder: %s" % avg_folder)
if(path.isdir(avg_folder)):
print("folder %s already exists!" % avg_folder)
var = input("do you want to delete it? [y/N]")
if 'y' in var:
shutil.rmtree(avg_folder)
else:
print("please choose another folder path~")
exit(0)
mkdir(avg_folder)
except IndexError:
print("Usage: %s working_folder results_folder" % argv[0])
exit(1)
rap_region = "-0.5_0.5"
n_order = 7
hf = h5py.File(data_path, "r")
event_list = list(hf.keys())
nev = len(event_list)
print("processing two particle correlations ...")
file_name_ch = 'particle_9999_vn2_eta_%s.dat' % rap_region
file_name_ss = 'particle_9999_Cn2_ss_eta_%s.dat' % rap_region
file_name_os = 'particle_9999_Cn2_os_eta_%s.dat' % rap_region
Qn2_array_ch = []
Qn2_array_ss = []
Qn2_array_os = []
for ifolder, event_name in enumerate(event_list):
event_group = hf.get(event_name)
temp_data_vn2 = event_group.get(file_name_ch)
Qn2_array_ch.append(temp_data_vn2)
temp_data_vn2 = event_group.get(file_name_ss)
Qn2_array_ss.append(temp_data_vn2)
temp_data_vn2 = event_group.get(file_name_os)
Qn2_array_os.append(temp_data_vn2)
Qn2_array_ch = array(Qn2_array_ch)
Qn2_array_ss = array(Qn2_array_ss)
Qn2_array_os = array(Qn2_array_os)
output_filename = ("two_particle_correlation_STAR.dat")
output_filename_ss = ("two_particle_correlation_ss_STAR.dat")
output_filename_os = ("two_particle_correlation_os_STAR.dat")
f = open(output_filename, 'w')
f_ss = open(output_filename_ss, 'w')
f_os = open(output_filename_os, 'w')
f.write("# n vn{2}^2 vn{2}^2_err\n")
f_ss.write("# n C_n{2} C_n{2}_err\n")
f_os.write("# n C_n{2} C_n{2}_err\n")
Npair = mean(Qn2_array_ch[:, 0, 3])
Npair_err = sqrt(mean(Qn2_array_ch[:, 0, 4]) - Npair**2.)/sqrt(nev)
f.write("%s %.5e %.5e\n" % (0, Npair, Npair_err))
Npair_ss = mean(Qn2_array_ss[:, 0, 3])
Npair_ss_err = sqrt(mean(Qn2_array_ss[:, 0, 4]) - Npair_ss**2.)/sqrt(nev)
f_ss.write("%s %.5e %.5e\n" % (0, Npair_ss, Npair_ss_err))
Npair_os = mean(Qn2_array_os[:, 0, 3])
Npair_os_err = sqrt(mean(Qn2_array_os[:, 0, 4]) - Npair_os**2.)/sqrt(nev)
f_os.write("%s %.5e %.5e\n" % (0, Npair_os, Npair_os_err))
for ii in range(1, 9):
vn2_ch = mean(Qn2_array_ch[:, ii, 3])
vn2_ch_err = sqrt(mean(Qn2_array_ch[:, ii, 4]) - vn2_ch**2.)/sqrt(nev)
vn2_ch = vn2_ch/Npair
vn2_ch_err = vn2_ch_err/Npair
f.write("%s %.5e %.5e\n" % (ii, vn2_ch, vn2_ch_err))
vn2_ss = mean(Qn2_array_ss[:, ii, 3])
vn2_ss_err = sqrt(mean(Qn2_array_ss[:, ii, 4]) - vn2_ss**2.)/sqrt(nev)
vn2_ss = vn2_ss/Npair_ss
vn2_ss_err = vn2_ss_err/Npair_ss
f_ss.write("%s %.5e %.5e\n" % (ii, vn2_ss, vn2_ss_err))
vn2_os = mean(Qn2_array_os[:, ii, 3])
vn2_os_err = sqrt(mean(Qn2_array_os[:, ii, 4]) - vn2_os**2.)/sqrt(nev)
vn2_os = vn2_os/Npair_os
vn2_os_err = vn2_os_err/Npair_os
f_os.write("%s %.5e %.5e\n" % (ii, vn2_os, vn2_os_err))
f.close()
f_ss.close()
f_os.close()
shutil.move(output_filename, avg_folder)
shutil.move(output_filename_ss, avg_folder)
shutil.move(output_filename_os, avg_folder)
print("processing two particle correlations delta eta dependence ...")
file_name_ch = 'particle_9999_vn2_eta12_pT_0.2_3.dat'
file_name_ss = 'particle_9999_vn2_eta12_ss_pT_0.2_3.dat'
file_name_os = 'particle_9999_vn2_eta12_os_pT_0.2_3.dat'
Qn2_array_ch = []
Qn2_array_ss = []
Qn2_array_os = []
for ifolder, event_name in enumerate(event_list):
event_group = hf.get(event_name)
temp_data_vn2 = event_group.get(file_name_ch)
Qn2_array_ch.append(temp_data_vn2)
temp_data_vn2 = event_group.get(file_name_ss)
Qn2_array_ss.append(temp_data_vn2)
temp_data_vn2 = event_group.get(file_name_os)
Qn2_array_os.append(temp_data_vn2)
Qn2_array_ch = array(Qn2_array_ch)
Qn2_array_ss = array(Qn2_array_ss)
Qn2_array_os = array(Qn2_array_os)
output_filename = ("two_particle_correlation_delta_eta12_STAR.dat")
output_filename_ss = ("two_particle_correlation_delta_eta12_ss_STAR.dat")
output_filename_os = ("two_particle_correlation_delta_eta12_os_STAR.dat")
f = open(output_filename, 'w')
f.write("# rap vn{2}^2 vn{2}^2_err\n")
f_ss = open(output_filename_ss, 'w')
f_ss.write("# rap vn{2}^2 vn{2}^2_err\n")
f_os = open(output_filename_os, 'w')
f_os.write("# rap vn{2}^2 vn{2}^2_err\n")
Npair = mean(Qn2_array_ch[:, :, 3], axis=0)
#Npair_err = sqrt(mean(Qn2_array[:, :, 4], axis=0) - Npair**2.)/sqrt(nev)
Npair_err = std(Qn2_array_ch[:, :, 3], axis=0)/sqrt(nev)
output = []
output.append(Qn2_array_ch[0, :, 0])
output.append(Npair)
output.append(Npair_err)
Npair_ss = mean(Qn2_array_ss[:, :, 3], axis=0)
Npair_ss_err = std(Qn2_array_ss[:, :, 3], axis=0)/sqrt(nev)
output_ss = []
output_ss.append(Qn2_array_ss[0, :, 0])
output_ss.append(Npair_ss)
output_ss.append(Npair_ss_err)
Npair_os = mean(Qn2_array_os[:, :, 3], axis=0)
Npair_os_err = std(Qn2_array_os[:, :, 3], axis=0)/sqrt(nev)
output_os = []
output_os.append(Qn2_array_os[0, :, 0])
output_os.append(Npair_os)
output_os.append(Npair_os_err)
for ii in range(1, 9):
#vn2_ch = mean(Qn2_array[:, :, 4*ii+3], axis=0)
#vn2_ch_err = sqrt(mean(Qn2_array[:, :, 4*ii+4], axis=0)
# - vn2_ch**2.)/sqrt(nev)
#vn2_ch = vn2_ch/Npair
#vn2_ch_err = vn2_ch_err/Npair
vn2_ch = mean(Qn2_array_ch[:, :, 4*ii+1]*Qn2_array_ch[:, :, 3], axis=0)/Npair
vn2_ch_err = sqrt(mean(Qn2_array_ch[:, :, 4*ii+2]**2., axis=0))/sqrt(nev)
output.append(vn2_ch)
output.append(vn2_ch_err)
vn2_ss = mean(Qn2_array_ss[:, :, 4*ii+1]*Qn2_array_ss[:, :, 3], axis=0)/Npair_ss
vn2_ss_err = sqrt(mean(Qn2_array_ss[:, :, 4*ii+2]**2., axis=0))/sqrt(nev)
output_ss.append(vn2_ss)
output_ss.append(vn2_ss_err)
vn2_os = mean(Qn2_array_os[:, :, 4*ii+1]*Qn2_array_os[:, :, 3], axis=0)/Npair_os
vn2_os_err = sqrt(mean(Qn2_array_os[:, :, 4*ii+2]**2., axis=0))/sqrt(nev)
output_os.append(vn2_os)
output_os.append(vn2_os_err)
output = array(output)
output = output.transpose()
output_ss = array(output_ss)
output_ss = output_ss.transpose()
output_os = array(output_os)
output_os = output_os.transpose()
for irap in range(len(Npair)):
f.write("%.5e "*19 % tuple(output[irap, :]))
f.write("\n")
f_ss.write("%.5e "*19 % tuple(output_ss[irap, :]))
f_ss.write("\n")
f_os.write("%.5e "*19 % tuple(output_os[irap, :]))
f_os.write("\n")
f.close()
f_ss.close()
f_os.close()
shutil.move(output_filename, avg_folder)
shutil.move(output_filename_ss, avg_folder)
shutil.move(output_filename_os, avg_folder)
print("processing three particle correlations ...")
# load the file
file_name_ss = 'particle_9999_Cmnk_ss_eta_%s.dat' % rap_region
file_name_os = 'particle_9999_Cmnk_os_eta_%s.dat' % rap_region
file_name_ch = 'particle_9999_Cmnk_eta_%s.dat' % rap_region
file_name_vn2 = 'particle_9999_vn2_eta_%s.dat' % rap_region
file_name_spvn = 'particle_9999_vndata_diff_eta_%s.dat' % rap_region
C_mnk_ch_array = []
C_mnk_ss_array = []
C_mnk_os_array = []
Qn2_array = []
dN_array = []; pT_array = []
for ifolder, event_name in enumerate(event_list):
event_group = hf.get(event_name)
temp_data_ch = event_group.get(file_name_ch)
temp_data_ss = event_group.get(file_name_ss)
temp_data_os = event_group.get(file_name_os)
temp_data_vn2 = event_group.get(file_name_vn2)
temp_data_spvn = event_group.get(file_name_spvn)
C_mnk_ch_array.append(temp_data_ch)
C_mnk_ss_array.append(temp_data_ss)
C_mnk_os_array.append(temp_data_os)
Qn2_array.append(temp_data_vn2[:, 3:5])
dN_array.append(temp_data_spvn[:, 2])
pT_array.append(temp_data_spvn[:, 0])
C_mnk_ch_array = array(C_mnk_ch_array)
C_mnk_ss_array = array(C_mnk_ss_array)
C_mnk_os_array = array(C_mnk_os_array)
Qn2_array = array(Qn2_array)
dN_array = array(dN_array)
pT_array = array(pT_array)
n_pT = len(pT_array[0, :])
pT_spectra = zeros([n_pT])
for ipT in range(len(pT_array[0, :])):
dN_temp = sum(dN_array[:, ipT]*pT_array[:, ipT])
if (dN_temp > 0):
pT_spectra[ipT] = (
sum(pT_array[:, ipT]**2.*dN_array[:, ipT])/dN_temp)
else:
pT_spectra[ipT] = mean(pT_array[:, ipT])
dN_spectra = mean(pT_array*dN_array, 0)/pT_spectra # dN/(2pi dy pT dpT)
pT_interp = linspace(0.05, 3.5, 40)
dN_interp = exp(interp(pT_interp, pT_spectra, log(dN_spectra+1e-30)))
mean_pT_sq = sum(pT_interp**3.*dN_interp)/sum(pT_interp*dN_interp)
pT_interp = linspace(0.2, 3.5, 40)
dN_interp = exp(interp(pT_interp, pT_spectra, log(dN_spectra+1e-30)))
mean_pT = sum(pT_interp**2.*dN_interp)/sum(pT_interp*dN_interp)
mean_pT_sq_2 = sum(pT_interp**3.*dN_interp)/sum(pT_interp*dN_interp)
factor = mean_pT**2./mean_pT_sq
factor2 = mean_pT_sq_2/mean_pT_sq
print(factor, factor2)
nev = len(C_mnk_ch_array[:, 0, 1])
corr_label = ['000', '112', '123', '224', '235', '134', '246', '336', '347']
output_filename = ("three_plane_correlation_STAR.dat")
f = open(output_filename, 'w')
f.write("#C_mnk_ss C_mnk_ss_err C_mnk_os C_mnk_os_err C_mnk_ch C_mnk_ch_err\n")
for ii in range(1, len(corr_label)):
momentum_conservation = 0.0
#if ii == 1:
# momentum_conservation = - factor*2.*Qn2_array[:, 2, 0]
C_mnk_ch_avg = (
sum(C_mnk_ch_array[:, 0, 1]*C_mnk_ch_array[:, ii, 1]
+ momentum_conservation)/(sum(C_mnk_ch_array[:, 0, 1])))
C_mnk_ch_err = (
sum(C_mnk_ch_array[:, 0, 1]*C_mnk_ch_array[:, ii, 2])
/(sum(C_mnk_ch_array[:, 0, 1]))/sqrt(nev))
C_mnk_os_avg = (sum(C_mnk_os_array[:, 0, 1]*C_mnk_os_array[:, ii, 1])
/(sum(C_mnk_os_array[:, 0, 1])))
C_mnk_os_err = (sum(C_mnk_os_array[:, 0, 1]*C_mnk_os_array[:, ii, 2])
/(sum(C_mnk_os_array[:, 0, 1]))/sqrt(nev))
C_mnk_ss_avg = (sum(C_mnk_ss_array[:, 0, 1]*C_mnk_ss_array[:, ii, 1])
/(sum(C_mnk_ss_array[:, 0, 1])))
C_mnk_ss_err = (sum(C_mnk_ss_array[:, 0, 1]*C_mnk_ss_array[:, ii, 2])
/(sum(C_mnk_ss_array[:, 0, 1]))/sqrt(nev))
C_mnk_os13_avg = (sum(C_mnk_os_array[:, 0, 1]*C_mnk_os_array[:, ii, 3])
/(sum(C_mnk_os_array[:, 0, 1])))
C_mnk_os13_err = (sum(C_mnk_os_array[:, 0, 1]*C_mnk_os_array[:, ii, 4])
/(sum(C_mnk_os_array[:, 0, 1]))/sqrt(nev))
C_mnk_ss13_avg = (sum(C_mnk_ss_array[:, 0, 1]*C_mnk_ss_array[:, ii, 3])
/(sum(C_mnk_ss_array[:, 0, 1])))
C_mnk_ss13_err = (sum(C_mnk_ss_array[:, 0, 1]*C_mnk_ss_array[:, ii, 4])
/(sum(C_mnk_ss_array[:, 0, 1]))/sqrt(nev))
f.write("%s %.5e %.5e %.5e %.5e %.5e %.5e %.5e %.5e %.5e %.5e\n"
% (corr_label[ii], C_mnk_ss_avg, C_mnk_ss_err,
C_mnk_os_avg, C_mnk_os_err, C_mnk_ch_avg, C_mnk_ch_err,
C_mnk_ss13_avg, C_mnk_ss13_err, C_mnk_os13_avg, C_mnk_os13_err))
f.close()
shutil.move(output_filename, avg_folder)
print("processing three particle correlations as a function of delta eta ...")
eta_type_list = ['eta12', 'eta13']
for itype in range(len(eta_type_list)):
eta_type = eta_type_list[itype]
# load the file
file_name_ss = 'particle_9999_Cmnk_%s_ss_pT_0.2_3.dat' % eta_type
file_name_os = 'particle_9999_Cmnk_%s_os_pT_0.2_3.dat' % eta_type
file_name_ch = 'particle_9999_Cmnk_%s_pT_0.2_3.dat' % eta_type
C_mnk_ch_array = []
C_mnk_ss_array = []
C_mnk_os_array = []
for ifolder, event_name in enumerate(event_list):
event_group = hf.get(event_name)
temp_data_ch = event_group.get(file_name_ch)
temp_data_ss = event_group.get(file_name_ss)
temp_data_os = event_group.get(file_name_os)
C_mnk_ch_array.append(temp_data_ch)
C_mnk_ss_array.append(temp_data_ss)
C_mnk_os_array.append(temp_data_os)
C_mnk_ch_array = array(C_mnk_ch_array)
C_mnk_ss_array = array(C_mnk_ss_array)
C_mnk_os_array = array(C_mnk_os_array)
nev, nrap, n_corr = C_mnk_ch_array.shape
results_ch = zeros([nrap, n_corr])
results_ch[:, 0] = mean(C_mnk_ch_array[:, :, 0], axis=0)
results_ss = zeros([nrap, n_corr])
results_ss[:, 0] = mean(C_mnk_ss_array[:, :, 0], axis=0)
results_os = zeros([nrap, n_corr])
results_os[:, 0] = mean(C_mnk_os_array[:, :, 0], axis=0)
for ii in range(0, len(corr_label)):
if ii == 0:
results_ch[:, 2*ii+1] = mean(C_mnk_ch_array[:, :, 2*ii+1], axis=0)
results_ch[:, 2*ii+2] = (
sqrt(mean(C_mnk_ch_array[:, :, 2*ii+2]**2., axis=0))/sqrt(nev))
results_ss[:, 2*ii+1] = mean(C_mnk_ss_array[:, :, 2*ii+1], axis=0)
results_ss[:, 2*ii+2] = (
sqrt(mean(C_mnk_ss_array[:, :, 2*ii+2]**2., axis=0))/sqrt(nev))
results_os[:, 2*ii+1] = mean(C_mnk_os_array[:, :, 2*ii+1], axis=0)
results_os[:, 2*ii+2] = (
sqrt(mean(C_mnk_os_array[:, :, 2*ii+2]**2., axis=0))/sqrt(nev))
else:
results_ch[:, 2*ii+1] = (
mean(C_mnk_ch_array[:, :, 2*ii+1]*C_mnk_ch_array[:, :, 1], axis=0)
/mean(C_mnk_ch_array[:, :, 1], axis=0))
results_ch[:, 2*ii+2] = (
sqrt(mean(C_mnk_ch_array[:, :, 2*ii+2]**2., axis=0))/sqrt(nev))
results_ss[:, 2*ii+1] = (
mean(C_mnk_ss_array[:, :, 2*ii+1]*C_mnk_ss_array[:, :, 1], axis=0)
/mean(C_mnk_ss_array[:, :, 1], axis=0))
results_ss[:, 2*ii+2] = (
sqrt(mean(C_mnk_ss_array[:, :, 2*ii+2]**2., axis=0))/sqrt(nev))
results_os[:, 2*ii+1] = (
mean(C_mnk_os_array[:, :, 2*ii+1]*C_mnk_os_array[:, :, 1], axis=0)
/mean(C_mnk_os_array[:, :, 1], axis=0))
results_os[:, 2*ii+2] = (
sqrt(mean(C_mnk_os_array[:, :, 2*ii+2]**2., axis=0))/sqrt(nev))
ncol = 2*len(corr_label) + 1
output_filename = ("three_plane_correlation_ch_delta_%s_STAR.dat" % eta_type)
f = open(output_filename, 'w')
f.write("# %s C_nmk C_nmk_err (000, 112, 123, 224, 235, 134, 246, 336, 347)\n"
% eta_type)
for irap in range(len(Npair)):
f.write("%.5e "*ncol % tuple(results_ch[irap, :]))
f.write("\n")
f.close()
shutil.move(output_filename, avg_folder)
output_filename = ("three_plane_correlation_ss_delta_%s_STAR.dat" % eta_type)
f = open(output_filename, 'w')
f.write("# %s C_nmk C_nmk_err (000, 112, 123, 224, 235, 134, 246, 336, 347)\n"
% eta_type)
for irap in range(len(Npair)):
f.write("%.5e "*ncol % tuple(results_ss[irap, :]))
f.write("\n")
f.close()
shutil.move(output_filename, avg_folder)
output_filename = ("three_plane_correlation_os_delta_%s_STAR.dat" % eta_type)
f = open(output_filename, 'w')
f.write("# %s C_nmk C_nmk_err (000, 112, 123, 224, 235, 134, 246, 336, 347)\n"
% eta_type)
for irap in range(len(Npair)):
f.write("%.5e "*ncol % tuple(results_os[irap, :]))
f.write("\n")
f.close()
shutil.move(output_filename, avg_folder)
print("processing four particle correlations ...")
file_name_ch = 'particle_9999_Cn4_eta_%s.dat' % rap_region
file_name_C2 = 'particle_9999_vn2_eta_%s.dat' % rap_region
Qn4_ch_array = []
Qn2_array_ch = []
for ifolder, event_name in enumerate(event_list):
event_group = hf.get(event_name)
temp_data_ch = event_group.get(file_name_ch)
Qn4_ch_array.append(temp_data_ch)
temp_data_vn2 = event_group.get(file_name_C2)
Qn2_array_ch.append(temp_data_vn2)
Qn4_ch_array = array(Qn4_ch_array)
Qn2_array_ch = array(Qn2_array_ch)
nev = len(Qn4_ch_array[:, 0, 1])
output_filename = ("charged_hadron_Cn4_STAR.dat")
f = open(output_filename, 'w')
f.write("# n Cn4_ch Cn4_ch_err\n")
for ii in range(1, len(Qn4_ch_array[0, :, 0])):
vn4_ch_avg = (sum(Qn4_ch_array[:, 0, 1]*Qn4_ch_array[:, ii, 1])
/(sum(Qn4_ch_array[:, 0, 1])))
vn4_ch_err = (sum(Qn4_ch_array[:, 0, 1]*Qn4_ch_array[:, ii, 2])
/(sum(Qn4_ch_array[:, 0, 1]))/sqrt(nev))
vn2_ch = mean(Qn2_array_ch[:, ii, 3])
vn2_ch_err = sqrt(mean(Qn2_array_ch[:, ii, 4]) - vn2_ch**2.)/sqrt(nev)
vn2_ch = vn2_ch/mean(Qn2_array_ch[:, 0, 3])
vn2_ch_err = vn2_ch_err/mean(Qn2_array_ch[:, 0, 3])
Cn4_ch_avg = vn4_ch_avg - 2.*vn2_ch**2.
Cn4_ch_err = sqrt(vn4_ch_err**2. + (2.*2.*vn2_ch*vn2_ch_err)**2.)
vn4_avg = 0.0
vn4_err = 0.0
if Cn4_ch_avg < 0.:
vn4_avg = (-Cn4_ch_avg)**0.25
vn4_err = Cn4_ch_err/(4.*(-Cn4_ch_avg)**0.75)
f.write("%s %.5e %.5e %.5e %.5e\n"
% (ii, vn4_avg, vn4_err, Cn4_ch_avg, Cn4_ch_err))
f.close()
shutil.move(output_filename, avg_folder)
# load the file
file_name_ch = 'particle_9999_SCmn_eta_%s.dat' % rap_region
SC_mn_ch_array = []
for ifolder, event_name in enumerate(event_list):
event_group = hf.get(event_name)
temp_data_ch = event_group.get(file_name_ch)
SC_mn_ch_array.append(temp_data_ch)
SC_mn_ch_array = array(SC_mn_ch_array)
nev = len(SC_mn_ch_array[:, 0, 1])
corr_label = ['00', '32', '42', '52', '43', '53']
mn_idx = [(0, 0), (3, 2), (4, 2), (5, 2), (4, 3), (5, 3)]
output_filename = ("symmetric_cumulant_STAR.dat")
f = open(output_filename, 'w')
f.write("# name SC_mn_ch SC_mn_ch_err\n")
for ii in range(1, len(corr_label)):
vmvn_ch_avg = (sum(SC_mn_ch_array[:, 0, 1]*SC_mn_ch_array[:, ii, 1])
/(sum(SC_mn_ch_array[:, 0, 1])))
vmvn_ch_err = (sum(SC_mn_ch_array[:, 0, 1]*SC_mn_ch_array[:, ii, 2])
/(sum(SC_mn_ch_array[:, 0, 1]))/sqrt(nev))
vm2_ch = mean(Qn2_array_ch[:, mn_idx[ii][0], 3])
vm2_ch_err = (sqrt(mean(Qn2_array_ch[:, mn_idx[ii][0], 4]) - vn2_ch**2.)
/sqrt(nev))
vm2_ch = vm2_ch/mean(Qn2_array_ch[:, 0, 3])
vm2_ch_err = vm2_ch_err/mean(Qn2_array_ch[:, 0, 3])
vn2_ch = mean(Qn2_array_ch[:, mn_idx[ii][1], 3])
vn2_ch_err = (sqrt(mean(Qn2_array_ch[:, mn_idx[ii][1], 4]) - vn2_ch**2.)
/sqrt(nev))
vn2_ch = vn2_ch/mean(Qn2_array_ch[:, 0, 3])
vn2_ch_err = vn2_ch_err/mean(Qn2_array_ch[:, 0, 3])
SC_mn_ch_avg = vmvn_ch_avg - vm2_ch*vn2_ch
SC_mn_ch_err = sqrt(vmvn_ch_err**2. + (vm2_ch_err*vn2_ch)**2.
+ (vm2_ch*vn2_ch_err)**2.)
f.write("%s %.5e %.5e\n" % (corr_label[ii], SC_mn_ch_avg, SC_mn_ch_err))
f.close()
shutil.move(output_filename, avg_folder)
print("Analysis is done.")
|
|
#
# Copyright (c) 2016-2018 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Description:
# This script takes an ELF file, converts it to its binary in-memory
# representation, signs it (using either an online service or a
# locally-stored key), and replaces the contents of the specified
# ELF section with the generated signature.
#
# Usage:
# elf_sign.py <elfPath> <signingKey> <sectionName> <elf2binCmd>
#
# elfPath - Path to the ELF we want to sign
# signingKey - Key to use (either a service URL or a local key path)
# sectionName - ELF section to replace with signature
# elf2binCmd - Command for converting ELF to in-memory binary representation
#
import sys, os, subprocess, tempfile, collections, hashlib, binascii, pyasn1, pyasn1.codec.ber.decoder, pycurl, StringIO
SIGNATURE_NONE = b'\x00'
SIGNATURE_ECDSA_SHA256_SECP224R1 = b'\x01'
# Dump integer to a C array, used for debugging only
def c_array(name, val):
length = len(val)
res = ""
for i in range(0, length):
if (i % 16) == 0:
res = res + " "
res = res + "0x%02x, " % val[i]
if ((i % 16) == 15) | (i == (length - 1)):
res = res + "\n"
res = " uint8_t " + name + "[] = {\n" + res + " };"
print res
# Read a little endian value from an ELF file
def elf_read_le(buf, offset, n):
val = 0
for i in range(0, n):
val = (val << 8) | ord(buf[offset + n - 1 - i])
return val
# Replace the contents of an ELF section. Used to replace blank signature data with the actual signature.
# binutils objcopy has a new command '--update-section .sectionname=file', but is not yet available. This does the same thing.
def elf_update_section(elfPath, sectionName, sectionData):
sectionTuple = collections.namedtuple("sectionTuple", "name_offset, name, offset, size")
# Read in the original ELF
elfSize = os.stat(elfPath).st_size
elf = open(elfPath, "rb")
output = elf.read()
elf.close()
# Do some sanity checking on the ELF file headers
magic = output[0:4]
assert magic == b'\x7fELF', 'Magic number does not match'
ei_class = output[4]
assert ei_class == b'\x01', 'Only 32-bit ELF files are supported'
ei_data = output[5]
assert ei_class == b'\x01', "Only LE ELF files are supported"
ei_version = output[6]
assert ei_version == b'\x01', "Only ELF version 1 is supported"
e_shoff = elf_read_le(output, 0x20, 4) # Start of section header table
e_shentsize = elf_read_le(output, 0x2e, 2) # Size of a section header table entry
e_shnum = elf_read_le(output, 0x30, 2) # Number of entries in the section header table
e_shstrndx = elf_read_le(output, 0x32, 2) # Index of the section header table entry that contains the section names
assert (e_shoff + (e_shnum * e_shentsize)) <= elfSize, "Section header runs past end of file"
assert e_shstrndx <= e_shnum, "Section name index > number of sections"
# Read in all the sections in the table
sections = []
for i in range(0, e_shnum):
sh_name = elf_read_le(output, (e_shoff + (i * e_shentsize) + 0), 4)
sh_offset = elf_read_le(output, (e_shoff + (i * e_shentsize) + 16), 4)
sh_size = elf_read_le(output, (e_shoff + (i * e_shentsize) + 20), 4)
assert (sh_offset + sh_size) <= elfSize, "Section data runs past end of file"
s = sectionTuple(name_offset = sh_name, name = "", offset = sh_offset, size = sh_size)
sections.append(s)
# Lookup the section names
for i in range(0, e_shnum):
s = sectionTuple(name_offset = sections[i].name_offset, \
name = output[(sections[e_shstrndx].offset + sections[i].name_offset):].partition(b'\x00')[0], \
offset = sections[i].offset, \
size = sections[i].size)
sections[i] = s
# Find the section we want to update
sectionIndex = -1
for i in range(0, e_shnum):
if sections[i].name == sectionName:
sectionIndex = i
assert sectionIndex >= 0, "Section %s not found in ELF" % sectionName
assert len(sectionData) == sections[sectionIndex].size, "Size of signature data file (%d) doesn't match size of section (%d)" % (len(sectionData), sections[sectionIndex].size)
# Replace the ELF section with the new content
output = output[0:sections[sectionIndex].offset] + \
sectionData + \
output[(sections[sectionIndex].offset + sections[sectionIndex].size):]
elf = open(elfPath, "wb")
elf.write(output)
elf.close();
# Dump an integer as a byte array, in the big endian format used by micro-ecc
def int_to_bytearray(val, length):
res = bytearray(length)
for i in range(0, length):
res[length - (1 + i)] = (val & 0xff)
val = (val & ~0xff) >> 8
assert val == 0, "Dumped int to C array, but length %i not big enough" % length
return res
def main(argv):
elfPath = sys.argv[1] # Path to the ELF we want to sign
signingKey = sys.argv[2] # Key to use (either a service URL or a local key path)
sectionName = sys.argv[3] # ELF section to replace with signature
elf2binCmd = sys.argv[4] # Command for converting ELF to in-memory binary representation
# Generate a tempfile that we can dump the binary to. Objdump cannot dump to a pipe.
tempBinFile = tempfile.NamedTemporaryFile();
elf2binCmdline = elf2binCmd + " " + elfPath + " " + tempBinFile.name
if 'debug' in globals():
print "Signing %s, section '%s' using %s" % (elfPath, sectionName, signingKey)
print "Generating bin using '%s'" % elf2binCmdline
# Generate the binary that we sign (the provided command removes the signature placeholder section)
os.system(elf2binCmdline);
# Compute the SHA-256 hash of the image we are signing
h = open(tempBinFile.name)
hash = binascii.hexlify(hashlib.sha256(h.read()).digest())
h.close()
# Dump out the length and hash of the signed image
if 'debug' in globals():
print "Signed length = %d bytes" % os.stat(tempBinFile.name).st_size
print "Image SHA-256 = %s" % hash
# If the signingKey looks like a URL, we do online signing; otherwise, use a locally stored key
if signingKey.startswith('https://'):
# Append the hash to the URL
signingKey = signingKey + "&hash=" + hash
if 'debug' in globals():
print "Signing using remote service URL: %s" % signingKey
# Get the auth parameter that should have been exported from the environment
assert 'auth' in os.environ, "Signing service credentials 'auth' not exported from environment"
# Use cURL to request signing by the service
buffer = StringIO.StringIO()
curl = pycurl.Curl()
curl.setopt(pycurl.URL, signingKey)
if 'allowSelfSignedTLSCerts' in globals():
curl.setopt(pycurl.SSL_VERIFYPEER, False)
curl.setopt(pycurl.FAILONERROR, True)
curl.setopt(pycurl.WRITEDATA, buffer)
curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC)
curl.setopt(pycurl.USERPWD, os.environ['auth'])
try:
curl.perform()
except pycurl.error as e:
# Handle HTTP error return codes user the assert below, to make it easier to diagnose issues
if e.args[0] != curl.E_HTTP_RETURNED_ERROR:
raise e
http_code = curl.getinfo(pycurl.HTTP_CODE)
assert http_code == 200, "HTTP error %d returned by service" % http_code
curl.close()
signature = buffer.getvalue()
assert len(signature) == (2 * 60), "Signature returned by service has wrong length (%d != %d)" % (len(signature), (2 * 60))
if 'debug' in globals():
print "Service returned signature: %s" % signature
sig = bytearray(binascii.unhexlify(signature))
else:
if 'debug' in globals():
print "Signing using locally stored key"
# Sign the binary, currently using a local key and OpenSSL.
process = subprocess.Popen(["openssl", "dgst", "-sha256", "-sign", signingKey, tempBinFile.name], stdout=subprocess.PIPE)
stdout = process.communicate()[0]
# Extract the signature values from the DER output
res = pyasn1.codec.ber.decoder.decode(stdout)[0]
assert len(res) == 2, "Expected 2 values in generated EC signature, got %d" % len(res)
assert isinstance(res.getComponentByPosition(0), pyasn1.type.univ.Integer), "EC signature result values weren't integers"
assert isinstance(res.getComponentByPosition(1), pyasn1.type.univ.Integer), "EC signature result values weren't integers"
r = int(res.getComponentByPosition(0))
s = int(res.getComponentByPosition(1))
# Generate the signature block.
# The size of this signature block needs to match the size of the signature
# placeholder section that was set aside in the linker script.
# The signature data (r,s) also needs to be 4-byte aligned.
sig = SIGNATURE_ECDSA_SHA256_SECP224R1 + \
b'\x00\x00\x00' + \
int_to_bytearray(r, (224/8)) + \
int_to_bytearray(s, (224/8))
# Dump out the r,s values
if 'debug' in globals():
c_array("signature_r", int_to_bytearray(r, (224/8)))
c_array("signature_s", int_to_bytearray(s, (224/8)))
# Dump out the complete generated signature
if 'debug' in globals():
c_array("signature", sig)
# Update the ELF section with the generated signature data
elf_update_section(elfPath, sectionName, sig)
tempBinFile.close()
if __name__ == "__main__":
main(sys.argv[1:])
|
|
import csv
from datetime import datetime, date
import json
import os
import re
import six
from urlparse import urlparse
from zipfile import ZipFile
from django.core.files.base import File
from django.core.files.temp import NamedTemporaryFile
from django.core.files.storage import get_storage_class
from django.contrib.auth.models import User
from django.shortcuts import render_to_response
from openpyxl.date_time import SharedDate
from openpyxl.workbook import Workbook
from pyxform.question import Question
from pyxform.section import Section, RepeatingSection
from savReaderWriter import SavWriter
from json2xlsclient.client import Client
from onadata.apps.logger.models import Attachment, Instance, XForm
from onadata.apps.main.models.meta_data import MetaData
from onadata.apps.viewer.models.export import Export
from onadata.apps.viewer.models.parsed_instance import\
_is_invalid_for_mongo, _encode_for_mongo, _decode_from_mongo,\
ParsedInstance
from onadata.libs.utils.viewer_tools import create_attachments_zipfile,\
image_urls
from onadata.libs.utils.common_tags import (
ID, XFORM_ID_STRING, STATUS, ATTACHMENTS, GEOLOCATION, BAMBOO_DATASET_ID,
DELETEDAT, INDEX, PARENT_INDEX, PARENT_TABLE_NAME,
SUBMISSION_TIME, UUID, TAGS, NOTES, VERSION, SUBMITTED_BY, DURATION)
from onadata.libs.exceptions import J2XException, NoRecordsFoundError
from onadata.libs.utils.osm import get_combined_osm
QUESTION_TYPES_TO_EXCLUDE = [
u'note',
]
# the bind type of select multiples that we use to compare
MULTIPLE_SELECT_BIND_TYPE = u"select"
GEOPOINT_BIND_TYPE = u"geopoint"
def encode_if_str(row, key, encode_dates=False):
val = row.get(key)
if isinstance(val, six.string_types):
return val.encode('utf-8')
if encode_dates and isinstance(val, datetime):
try:
return val.strftime('%Y-%m-%dT%H:%M:%S%z').encode('utf-8')
except ValueError:
raise Exception(u"%s has an invalid datetime format" % (val))
if encode_dates and isinstance(val, date):
try:
return val.strftime('%Y-%m-%d').encode('utf-8')
except ValueError:
raise Exception(u"%s has an invalid date format" % (val))
return val
def question_types_to_exclude(_type):
return _type in QUESTION_TYPES_TO_EXCLUDE
def str_to_bool(s):
if s in ['True', 'true', 'TRUE']:
return True
else:
return False
class DictOrganizer(object):
def set_dict_iterator(self, dict_iterator):
self._dict_iterator = dict_iterator
# Every section will get its own table
# I need to think of an easy way to flatten out a dictionary
# parent name, index, table name, data
def _build_obs_from_dict(self, d, obs, table_name,
parent_table_name, parent_index):
if table_name not in obs:
obs[table_name] = []
this_index = len(obs[table_name])
obs[table_name].append({
u"_parent_table_name": parent_table_name,
u"_parent_index": parent_index,
})
for k, v in d.items():
if type(v) != dict and type(v) != list:
assert k not in obs[table_name][-1]
obs[table_name][-1][k] = v
obs[table_name][-1][u"_index"] = this_index
for k, v in d.items():
if type(v) == dict:
kwargs = {
"d": v,
"obs": obs,
"table_name": k,
"parent_table_name": table_name,
"parent_index": this_index
}
self._build_obs_from_dict(**kwargs)
if type(v) == list:
for i, item in enumerate(v):
kwargs = {
"d": item,
"obs": obs,
"table_name": k,
"parent_table_name": table_name,
"parent_index": this_index,
}
self._build_obs_from_dict(**kwargs)
return obs
def get_observation_from_dict(self, d):
result = {}
assert len(d.keys()) == 1
root_name = d.keys()[0]
kwargs = {
"d": d[root_name],
"obs": result,
"table_name": root_name,
"parent_table_name": u"",
"parent_index": -1,
}
self._build_obs_from_dict(**kwargs)
return result
def dict_to_joined_export(data, index, indices, name):
"""
Converts a dict into one or more tabular datasets
"""
output = {}
# TODO: test for _geolocation and attachment lists
if isinstance(data, dict):
for key, val in data.iteritems():
if isinstance(val, list) and key not in [NOTES, TAGS]:
output[key] = []
for child in val:
if key not in indices:
indices[key] = 0
indices[key] += 1
child_index = indices[key]
new_output = dict_to_joined_export(
child, child_index, indices, key)
d = {INDEX: child_index, PARENT_INDEX: index,
PARENT_TABLE_NAME: name}
# iterate over keys within new_output and append to
# main output
for out_key, out_val in new_output.iteritems():
if isinstance(out_val, list):
if out_key not in output:
output[out_key] = []
output[out_key].extend(out_val)
else:
d.update(out_val)
output[key].append(d)
else:
if name not in output:
output[name] = {}
if key in [TAGS]:
output[name][key] = ",".join(val)
elif key in [NOTES]:
note_list = [v if isinstance(v, six.string_types)
else v['note'] for v in val]
output[name][key] = "\r\n".join(note_list)
else:
output[name][key] = val
return output
class ExportBuilder(object):
IGNORED_COLUMNS = [XFORM_ID_STRING, STATUS, ATTACHMENTS, GEOLOCATION,
BAMBOO_DATASET_ID, DELETEDAT]
# fields we export but are not within the form's structure
EXTRA_FIELDS = [ID, UUID, SUBMISSION_TIME, INDEX, PARENT_TABLE_NAME,
PARENT_INDEX, TAGS, NOTES, VERSION, DURATION, SUBMITTED_BY]
SPLIT_SELECT_MULTIPLES = True
BINARY_SELECT_MULTIPLES = False
# column group delimiters
GROUP_DELIMITER_SLASH = '/'
GROUP_DELIMITER_DOT = '.'
GROUP_DELIMITER = GROUP_DELIMITER_SLASH
GROUP_DELIMITERS = [GROUP_DELIMITER_SLASH, GROUP_DELIMITER_DOT]
TYPES_TO_CONVERT = ['int', 'decimal', 'date'] # , 'dateTime']
CONVERT_FUNCS = {
'int': lambda x: int(x),
'decimal': lambda x: float(x),
'date': lambda x: ExportBuilder.string_to_date_with_xls_validation(x),
'dateTime': lambda x: datetime.strptime(x[:19], '%Y-%m-%dT%H:%M:%S')
}
TRUNCATE_GROUP_TITLE = False
XLS_SHEET_NAME_MAX_CHARS = 31
@classmethod
def string_to_date_with_xls_validation(cls, date_str):
date_obj = datetime.strptime(date_str, '%Y-%m-%d').date()
try:
SharedDate().datetime_to_julian(date_obj)
except ValueError:
return date_str
else:
return date_obj
@classmethod
def format_field_title(cls, abbreviated_xpath, field_delimiter,
remove_group_name=False):
if field_delimiter != '/':
return field_delimiter.join(abbreviated_xpath.split('/'))
# Check if to truncate the group name prefix
if remove_group_name:
abbreviated_xpath_list = abbreviated_xpath.split(field_delimiter)
return abbreviated_xpath_list[len(abbreviated_xpath_list)-1]
else:
return abbreviated_xpath
def set_survey(self, survey):
# TODO resolve circular import
from onadata.apps.viewer.models.data_dictionary import\
DataDictionary
def build_sections(
current_section, survey_element, sections, select_multiples,
gps_fields, encoded_fields, field_delimiter='/',
remove_group_name=False):
for child in survey_element.children:
current_section_name = current_section['name']
# if a section, recurs
if isinstance(child, Section):
# if its repeating, build a new section
if isinstance(child, RepeatingSection):
# section_name in recursive call changes
section = {
'name': child.get_abbreviated_xpath(),
'elements': []}
self.sections.append(section)
build_sections(
section, child, sections, select_multiples,
gps_fields, encoded_fields, field_delimiter,
remove_group_name)
else:
# its a group, recurs using the same section
build_sections(
current_section, child, sections, select_multiples,
gps_fields, encoded_fields, field_delimiter,
remove_group_name)
elif isinstance(child, Question) and child.bind.get(u"type")\
not in QUESTION_TYPES_TO_EXCLUDE:
# add to survey_sections
if isinstance(child, Question):
child_xpath = child.get_abbreviated_xpath()
current_section['elements'].append({
'title': ExportBuilder.format_field_title(
child.get_abbreviated_xpath(),
field_delimiter, remove_group_name),
'xpath': child_xpath,
'type': child.bind.get(u"type")
})
if _is_invalid_for_mongo(child_xpath):
if current_section_name not in encoded_fields:
encoded_fields[current_section_name] = {}
encoded_fields[current_section_name].update(
{child_xpath: _encode_for_mongo(child_xpath)})
# if its a select multiple, make columns out of its choices
if child.bind.get(u"type") == MULTIPLE_SELECT_BIND_TYPE\
and self.SPLIT_SELECT_MULTIPLES:
for c in child.children:
_xpath = c.get_abbreviated_xpath()
_title = ExportBuilder.format_field_title(
_xpath, field_delimiter, remove_group_name)
choice = {
'title': _title,
'xpath': _xpath,
'type': 'string'
}
if choice not in current_section['elements']:
current_section['elements'].append(choice)
_append_xpaths_to_section(
current_section_name, select_multiples,
child.get_abbreviated_xpath(),
[c.get_abbreviated_xpath()
for c in child.children])
# split gps fields within this section
if child.bind.get(u"type") == GEOPOINT_BIND_TYPE:
# add columns for geopoint components
xpaths = DataDictionary.get_additional_geopoint_xpaths(
child.get_abbreviated_xpath())
current_section['elements'].extend(
[
{
'title': ExportBuilder.format_field_title(
xpath, field_delimiter,
remove_group_name),
'xpath': xpath,
'type': 'decimal'
}
for xpath in xpaths
])
_append_xpaths_to_section(
current_section_name, gps_fields,
child.get_abbreviated_xpath(), xpaths)
def _append_xpaths_to_section(current_section_name, field_list, xpath,
xpaths):
if current_section_name not in field_list:
field_list[current_section_name] = {}
field_list[
current_section_name][xpath] = xpaths
self.survey = survey
self.select_multiples = {}
self.gps_fields = {}
self.encoded_fields = {}
main_section = {'name': survey.name, 'elements': []}
self.sections = [main_section]
build_sections(
main_section, self.survey, self.sections,
self.select_multiples, self.gps_fields, self.encoded_fields,
self.GROUP_DELIMITER, self.TRUNCATE_GROUP_TITLE)
def section_by_name(self, name):
matches = filter(lambda s: s['name'] == name, self.sections)
assert(len(matches) == 1)
return matches[0]
@classmethod
def split_select_multiples(cls, row, select_multiples):
# for each select_multiple, get the associated data and split it
for xpath, choices in select_multiples.iteritems():
# get the data matching this xpath
data = row.get(xpath)
selections = []
if data:
selections = [
u'{0}/{1}'.format(
xpath, selection) for selection in data.split()]
if not cls.BINARY_SELECT_MULTIPLES:
row.update(dict(
[(choice, choice in selections if selections else None)
for choice in choices]))
else:
YES = 1
NO = 0
row.update(dict(
[(choice, YES if choice in selections else NO)
for choice in choices]))
return row
@classmethod
def split_gps_components(cls, row, gps_fields):
# for each gps_field, get associated data and split it
for xpath, gps_components in gps_fields.iteritems():
data = row.get(xpath)
if data:
gps_parts = data.split()
if len(gps_parts) > 0:
row.update(zip(gps_components, gps_parts))
return row
@classmethod
def decode_mongo_encoded_fields(cls, row, encoded_fields):
for xpath, encoded_xpath in encoded_fields.iteritems():
if row.get(encoded_xpath):
val = row.pop(encoded_xpath)
row.update({xpath: val})
return row
@classmethod
def decode_mongo_encoded_section_names(cls, data):
return dict([(_decode_from_mongo(k), v) for k, v in data.iteritems()])
@classmethod
def convert_type(cls, value, data_type):
"""
Convert data to its native type e.g. string '1' to int 1
@param value: the string value to convert
@param data_type: the native data type to convert to
@return: the converted value
"""
func = ExportBuilder.CONVERT_FUNCS.get(data_type, lambda x: x)
try:
return func(value)
except ValueError:
return value
def pre_process_row(self, row, section):
"""
Split select multiples, gps and decode . and $
"""
section_name = section['name']
# first decode fields so that subsequent lookups
# have decoded field names
if section_name in self.encoded_fields:
row = ExportBuilder.decode_mongo_encoded_fields(
row, self.encoded_fields[section_name])
if self.SPLIT_SELECT_MULTIPLES and\
section_name in self.select_multiples:
row = ExportBuilder.split_select_multiples(
row, self.select_multiples[section_name])
if section_name in self.gps_fields:
row = ExportBuilder.split_gps_components(
row, self.gps_fields[section_name])
# convert to native types
for elm in section['elements']:
# only convert if its in our list and its not empty, just to
# optimize
value = row.get(elm['xpath'])
if elm['type'] in ExportBuilder.TYPES_TO_CONVERT\
and value is not None and value != '':
row[elm['xpath']] = ExportBuilder.convert_type(
value, elm['type'])
return row
def to_zipped_csv(self, path, data, *args, **kwargs):
def write_row(row, csv_writer, fields):
csv_writer.writerow(
[encode_if_str(row, field) for field in fields])
csv_defs = {}
for section in self.sections:
csv_file = NamedTemporaryFile(suffix=".csv")
csv_writer = csv.writer(csv_file)
csv_defs[section['name']] = {
'csv_file': csv_file, 'csv_writer': csv_writer}
# write headers
for section in self.sections:
fields = [element['title'] for element in section['elements']]\
+ self.EXTRA_FIELDS
csv_defs[section['name']]['csv_writer'].writerow(
[f.encode('utf-8') for f in fields])
index = 1
indices = {}
survey_name = self.survey.name
for d in data:
# decode mongo section names
joined_export = dict_to_joined_export(d, index, indices,
survey_name)
output = ExportBuilder.decode_mongo_encoded_section_names(
joined_export)
# attach meta fields (index, parent_index, parent_table)
# output has keys for every section
if survey_name not in output:
output[survey_name] = {}
output[survey_name][INDEX] = index
output[survey_name][PARENT_INDEX] = -1
for section in self.sections:
# get data for this section and write to csv
section_name = section['name']
csv_def = csv_defs[section_name]
fields = [
element['xpath'] for element in
section['elements']] + self.EXTRA_FIELDS
csv_writer = csv_def['csv_writer']
# section name might not exist within the output, e.g. data was
# not provided for said repeat - write test to check this
row = output.get(section_name, None)
if type(row) == dict:
write_row(
self.pre_process_row(row, section),
csv_writer, fields)
elif type(row) == list:
for child_row in row:
write_row(
self.pre_process_row(child_row, section),
csv_writer, fields)
index += 1
# write zipfile
with ZipFile(path, 'w') as zip_file:
for section_name, csv_def in csv_defs.iteritems():
csv_file = csv_def['csv_file']
csv_file.seek(0)
zip_file.write(
csv_file.name, "_".join(section_name.split("/")) + ".csv")
# close files when we are done
for section_name, csv_def in csv_defs.iteritems():
csv_def['csv_file'].close()
@classmethod
def get_valid_sheet_name(cls, desired_name, existing_names):
# a sheet name has to be <= 31 characters and not a duplicate of an
# existing sheet
# truncate sheet_name to XLSDataFrameBuilder.SHEET_NAME_MAX_CHARS
new_sheet_name = \
desired_name[:cls.XLS_SHEET_NAME_MAX_CHARS]
# make sure its unique within the list
i = 1
generated_name = new_sheet_name
while generated_name in existing_names:
digit_length = len(str(i))
allowed_name_len = cls.XLS_SHEET_NAME_MAX_CHARS - \
digit_length
# make name the required len
if len(generated_name) > allowed_name_len:
generated_name = generated_name[:allowed_name_len]
generated_name = "{0}{1}".format(generated_name, i)
i += 1
return generated_name
def to_xls_export(self, path, data, *args, **kwargs):
def write_row(data, work_sheet, fields, work_sheet_titles):
# update parent_table with the generated sheet's title
data[PARENT_TABLE_NAME] = work_sheet_titles.get(
data.get(PARENT_TABLE_NAME))
work_sheet.append([data.get(f) for f in fields])
wb = Workbook(optimized_write=True)
work_sheets = {}
# map of section_names to generated_names
work_sheet_titles = {}
for section in self.sections:
section_name = section['name']
work_sheet_title = ExportBuilder.get_valid_sheet_name(
"_".join(section_name.split("/")), work_sheet_titles.values())
work_sheet_titles[section_name] = work_sheet_title
work_sheets[section_name] = wb.create_sheet(
title=work_sheet_title)
# write the headers
for section in self.sections:
section_name = section['name']
headers = [
element['title'] for element in
section['elements']] + self.EXTRA_FIELDS
# get the worksheet
ws = work_sheets[section_name]
ws.append(headers)
index = 1
indices = {}
survey_name = self.survey.name
for d in data:
joined_export = dict_to_joined_export(d, index, indices,
survey_name)
output = ExportBuilder.decode_mongo_encoded_section_names(
joined_export)
# attach meta fields (index, parent_index, parent_table)
# output has keys for every section
if survey_name not in output:
output[survey_name] = {}
output[survey_name][INDEX] = index
output[survey_name][PARENT_INDEX] = -1
for section in self.sections:
# get data for this section and write to xls
section_name = section['name']
fields = [
element['xpath'] for element in
section['elements']] + self.EXTRA_FIELDS
ws = work_sheets[section_name]
# section might not exist within the output, e.g. data was
# not provided for said repeat - write test to check this
row = output.get(section_name, None)
if type(row) == dict:
write_row(
self.pre_process_row(row, section),
ws, fields, work_sheet_titles)
elif type(row) == list:
for child_row in row:
write_row(
self.pre_process_row(child_row, section),
ws, fields, work_sheet_titles)
index += 1
wb.save(filename=path)
def to_flat_csv_export(
self, path, data, username, id_string, filter_query,
start=None, end=None):
# TODO resolve circular import
from onadata.libs.utils.csv_builder import CSVDataFrameBuilder
csv_builder = CSVDataFrameBuilder(
username, id_string, filter_query, self.GROUP_DELIMITER,
self.SPLIT_SELECT_MULTIPLES, self.BINARY_SELECT_MULTIPLES,
start, end, self.TRUNCATE_GROUP_TITLE
)
csv_builder.export_to(path)
def to_zipped_sav(self, path, data, *args, **kwargs):
def write_row(row, csv_writer, fields):
sav_writer.writerow(
[encode_if_str(row, field, True) for field in fields])
sav_defs = {}
# write headers
for section in self.sections:
fields = [element['title'] for element in section['elements']]\
+ self.EXTRA_FIELDS
c = 0
var_labels = {}
var_names = []
tmp_k = {}
for field in fields:
c += 1
var_name = 'var%d' % c
var_labels[var_name] = field
var_names.append(var_name)
tmp_k[field] = var_name
var_types = dict(
[(tmp_k[element['title']],
0 if element['type'] in ['decimal', 'int'] else 255)
for element in section['elements']] +
[(tmp_k[item],
0 if item in ['_id', '_index', '_parent_index'] else 255)
for item in self.EXTRA_FIELDS]
)
sav_file = NamedTemporaryFile(suffix=".sav")
sav_writer = SavWriter(sav_file.name, varNames=var_names,
varTypes=var_types,
varLabels=var_labels, ioUtf8=True)
sav_defs[section['name']] = {
'sav_file': sav_file, 'sav_writer': sav_writer}
index = 1
indices = {}
survey_name = self.survey.name
for d in data:
# decode mongo section names
joined_export = dict_to_joined_export(d, index, indices,
survey_name)
output = ExportBuilder.decode_mongo_encoded_section_names(
joined_export)
# attach meta fields (index, parent_index, parent_table)
# output has keys for every section
if survey_name not in output:
output[survey_name] = {}
output[survey_name][INDEX] = index
output[survey_name][PARENT_INDEX] = -1
for section in self.sections:
# get data for this section and write to csv
section_name = section['name']
sav_def = sav_defs[section_name]
fields = [
element['xpath'] for element in
section['elements']] + self.EXTRA_FIELDS
sav_writer = sav_def['sav_writer']
row = output.get(section_name, None)
if type(row) == dict:
write_row(
self.pre_process_row(row, section),
sav_writer, fields)
elif type(row) == list:
for child_row in row:
write_row(
self.pre_process_row(child_row, section),
sav_writer, fields)
index += 1
for section_name, sav_def in sav_defs.iteritems():
sav_def['sav_writer'].closeSavFile(
sav_def['sav_writer'].fh, mode='wb')
# write zipfile
with ZipFile(path, 'w') as zip_file:
for section_name, sav_def in sav_defs.iteritems():
sav_file = sav_def['sav_file']
sav_file.seek(0)
zip_file.write(
sav_file.name, "_".join(section_name.split("/")) + ".sav")
# close files when we are done
for section_name, sav_def in sav_defs.iteritems():
sav_def['sav_file'].close()
def dict_to_flat_export(d, parent_index=0):
pass
def generate_export(export_type, extension, username, id_string,
export_id=None, filter_query=None, group_delimiter='/',
split_select_multiples=True,
binary_select_multiples=False, start=None, end=None,
remove_group_name=False):
"""
Create appropriate export object given the export type
"""
# TODO resolve circular import
from onadata.apps.viewer.models.export import Export
export_type_func_map = {
Export.XLS_EXPORT: 'to_xls_export',
Export.CSV_EXPORT: 'to_flat_csv_export',
Export.CSV_ZIP_EXPORT: 'to_zipped_csv',
Export.SAV_ZIP_EXPORT: 'to_zipped_sav',
}
xform = XForm.objects.get(
user__username__iexact=username, id_string__iexact=id_string)
records = ParsedInstance.query_data(xform, query=filter_query,
start=start, end=end)
export_builder = ExportBuilder()
export_builder.TRUNCATE_GROUP_TITLE = remove_group_name
export_builder.GROUP_DELIMITER = group_delimiter
export_builder.SPLIT_SELECT_MULTIPLES = split_select_multiples
export_builder.BINARY_SELECT_MULTIPLES = binary_select_multiples
export_builder.set_survey(xform.data_dictionary().survey)
temp_file = NamedTemporaryFile(suffix=("." + extension))
# get the export function by export type
func = getattr(export_builder, export_type_func_map[export_type])
try:
func.__call__(
temp_file.name, records, username, id_string, filter_query,
start=start, end=end
)
except NoRecordsFoundError:
pass
# generate filename
basename = "%s_%s" % (
id_string, datetime.now().strftime("%Y_%m_%d_%H_%M_%S"))
filename = basename + "." + extension
# check filename is unique
while not Export.is_filename_unique(xform, filename):
filename = increment_index_in_filename(filename)
file_path = os.path.join(
username,
'exports',
id_string,
export_type,
filename)
# TODO: if s3 storage, make private - how will we protect local storage??
storage = get_storage_class()()
# seek to the beginning as required by storage classes
temp_file.seek(0)
export_filename = storage.save(file_path, File(temp_file, file_path))
temp_file.close()
dir_name, basename = os.path.split(export_filename)
# get or create export object
if export_id:
export = Export.objects.get(id=export_id)
else:
export = Export(xform=xform, export_type=export_type)
export.filedir = dir_name
export.filename = basename
export.internal_status = Export.SUCCESSFUL
# dont persist exports that have a filter
if filter_query is None and start is None and end is None:
export.save()
return export
def should_create_new_export(xform, export_type):
# TODO resolve circular import
from onadata.apps.viewer.models.export import Export
if Export.objects.filter(
xform=xform, export_type=export_type).count() == 0\
or Export.exports_outdated(xform, export_type=export_type):
return True
return False
def newset_export_for(xform, export_type):
"""
Make sure you check that an export exists before calling this,
it will a DoesNotExist exception otherwise
"""
# TODO resolve circular import
from onadata.apps.viewer.models.export import Export
return Export.objects.filter(xform=xform, export_type=export_type)\
.latest('created_on')
def increment_index_in_filename(filename):
"""
filename should be in the form file.ext or file-2.ext - we check for the
dash and index and increment appropriately
"""
# check for an index i.e. dash then number then dot extension
regex = re.compile(r"(.+?)\-(\d+)(\..+)")
match = regex.match(filename)
if match:
basename = match.groups()[0]
index = int(match.groups()[1]) + 1
ext = match.groups()[2]
else:
index = 1
# split filename from ext
basename, ext = os.path.splitext(filename)
new_filename = "%s-%d%s" % (basename, index, ext)
return new_filename
def generate_attachments_zip_export(
export_type, extension, username, id_string, export_id=None,
filter_query=None):
# TODO resolve circular import
from onadata.apps.viewer.models.export import Export
xform = XForm.objects.get(user__username=username, id_string=id_string)
attachments = Attachment.objects.filter(instance__xform=xform)
basename = "%s_%s" % (id_string,
datetime.now().strftime("%Y_%m_%d_%H_%M_%S"))
filename = basename + "." + extension
file_path = os.path.join(
username,
'exports',
id_string,
export_type,
filename)
storage = get_storage_class()()
zip_file = None
try:
zip_file = create_attachments_zipfile(attachments)
try:
temp_file = open(zip_file.name)
export_filename = storage.save(
file_path,
File(temp_file, file_path))
finally:
temp_file.close()
finally:
zip_file and zip_file.close()
dir_name, basename = os.path.split(export_filename)
# get or create export object
if(export_id):
export = Export.objects.get(id=export_id)
else:
export = Export.objects.create(xform=xform, export_type=export_type)
export.filedir = dir_name
export.filename = basename
export.internal_status = Export.SUCCESSFUL
export.save()
return export
def generate_kml_export(
export_type, extension, username, id_string, export_id=None,
filter_query=None):
# TODO resolve circular import
from onadata.apps.viewer.models.export import Export
user = User.objects.get(username=username)
xform = XForm.objects.get(user__username=username, id_string=id_string)
response = render_to_response(
'survey.kml', {'data': kml_export_data(id_string, user)})
basename = "%s_%s" % (id_string,
datetime.now().strftime("%Y_%m_%d_%H_%M_%S"))
filename = basename + "." + extension
file_path = os.path.join(
username,
'exports',
id_string,
export_type,
filename)
storage = get_storage_class()()
temp_file = NamedTemporaryFile(suffix=extension)
temp_file.write(response.content)
temp_file.seek(0)
export_filename = storage.save(
file_path,
File(temp_file, file_path))
temp_file.close()
dir_name, basename = os.path.split(export_filename)
# get or create export object
if(export_id):
export = Export.objects.get(id=export_id)
else:
export = Export.objects.create(xform=xform, export_type=export_type)
export.filedir = dir_name
export.filename = basename
export.internal_status = Export.SUCCESSFUL
export.save()
return export
def kml_export_data(id_string, user):
# TODO resolve circular import
from onadata.apps.viewer.models.data_dictionary import DataDictionary
dd = DataDictionary.objects.get(id_string=id_string, user=user)
instances = Instance.objects.filter(
xform__user=user, xform__id_string=id_string, geom__isnull=False
).order_by('id')
data_for_template = []
labels = {}
def cached_get_labels(xpath):
if xpath in labels.keys():
return labels[xpath]
labels[xpath] = dd.get_label(xpath)
return labels[xpath]
for instance in instances:
# read the survey instances
data_for_display = instance.get_dict()
xpaths = data_for_display.keys()
xpaths.sort(cmp=instance.xform.data_dictionary().get_xpath_cmp())
label_value_pairs = [
(cached_get_labels(xpath), data_for_display[xpath]) for xpath in
xpaths if not xpath.startswith(u"_")]
table_rows = ['<tr><td>%s</td><td>%s</td></tr>' % (k, v) for k, v
in label_value_pairs]
img_urls = image_urls(instance)
img_url = img_urls[0] if img_urls else ""
point = instance.point
if point:
data_for_template.append({
'name': id_string,
'id': instance.id,
'lat': point.y,
'lng': point.x,
'image_urls': img_urls,
'table': '<table border="1"><a href="#"><img width="210" '
'class="thumbnail" src="%s" alt=""></a>%s'
'</table>' % (img_url, ''.join(table_rows))})
return data_for_template
def generate_osm_export(
export_type, extension, username, id_string, export_id=None,
filter_query=None):
# TODO resolve circular import
from onadata.apps.viewer.models.export import Export
xform = XForm.objects.get(user__username=username, id_string=id_string)
attachments = Attachment.objects.filter(
extension=Attachment.OSM,
instance__xform=xform
)
content = get_combined_osm([a.media_file for a in attachments])
basename = "%s_%s" % (id_string,
datetime.now().strftime("%Y_%m_%d_%H_%M_%S"))
filename = basename + "." + extension
file_path = os.path.join(
username,
'exports',
id_string,
export_type,
filename)
storage = get_storage_class()()
temp_file = NamedTemporaryFile(suffix=extension)
temp_file.write(content)
temp_file.seek(0)
export_filename = storage.save(
file_path,
File(temp_file, file_path))
temp_file.close()
dir_name, basename = os.path.split(export_filename)
# get or create export object
if(export_id):
export = Export.objects.get(id=export_id)
else:
export = Export.objects.create(xform=xform, export_type=export_type)
export.filedir = dir_name
export.filename = basename
export.internal_status = Export.SUCCESSFUL
export.save()
return export
def _get_records(instances):
return [clean_keys_of_slashes(instance)
for instance in instances]
def clean_keys_of_slashes(record):
"""
Replaces the slashes found in a dataset keys with underscores
:param record: list containing a couple of dictionaries
:return: record with keys without slashes
"""
for key in record.keys():
value = record[key]
if '/' in key:
# replace with _
record[key.replace('/', '_')]\
= record.pop(key)
# Check if the value is a list containing nested dict and apply same
if value:
if isinstance(value, list) and isinstance(value[0], dict):
for v in value:
clean_keys_of_slashes(v)
return record
def _get_server_from_metadata(xform, meta, token):
report_templates = MetaData.external_export(xform)
if meta:
try:
int(meta)
except ValueError:
raise Exception(u"Invalid metadata pk {0}".format(meta))
# Get the external server from the metadata
result = report_templates.get(pk=meta)
server = result.external_export_url
name = result.external_export_name
elif token:
server = token
name = None
else:
# Take the latest value in the metadata
if not report_templates:
raise Exception(
u"Could not find the template token: Please upload template.")
server = report_templates[0].external_export_url
name = report_templates[0].external_export_name
return server, name
def generate_external_export(
export_type, username, id_string, export_id=None, token=None,
filter_query=None, meta=None, data_id=None):
xform = XForm.objects.get(
user__username__iexact=username, id_string__iexact=id_string)
user = User.objects.get(username=username)
server, name = _get_server_from_metadata(xform, meta, token)
# dissect the url
parsed_url = urlparse(server)
token = parsed_url.path[5:]
ser = parsed_url.scheme + '://' + parsed_url.netloc
# Get single submission data
if data_id:
inst = Instance.objects.filter(xform__user=user,
xform__id_string=id_string,
deleted_at=None,
pk=data_id)
instances = [inst[0].get_dict() if inst else {}]
else:
instances = ParsedInstance.query_data(xform, query=filter_query)
records = _get_records(instances)
status_code = 0
if records and server:
try:
client = Client(ser)
response = client.xls.create(token, json.dumps(records))
if hasattr(client.xls.conn, 'last_response'):
status_code = client.xls.conn.last_response.status_code
except Exception as e:
raise J2XException(
u"J2X client could not generate report. Server -> {0},"
u" Error-> {1}".format(server, e)
)
else:
if not server:
raise J2XException(u"External server not set")
elif not records:
raise J2XException(
u"No record to export. Form -> {0}".format(id_string)
)
# get or create export object
if export_id:
export = Export.objects.get(id=export_id)
else:
export = Export.objects.create(xform=xform, export_type=export_type)
export.export_url = response
if status_code == 201:
export.internal_status = Export.SUCCESSFUL
export.filename = name + '-' + response[5:] if name else response[5:]
export.export_url = ser + response
else:
export.internal_status = Export.FAILED
export.save()
return export
def upload_template_for_external_export(server, file_obj):
try:
client = Client(server)
response = client.template.create(template_file=file_obj)
if hasattr(client.template.conn, 'last_response'):
status_code = client.template.conn.last_response.status_code
except Exception as e:
response = str(e)
status_code = 500
return str(status_code) + '|' + response
|
|
import sys
import datetime
import logging
import json
import subprocess
import socket
import time
import os
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from twisted.internet import threads, reactor, defer, task
from twisted.python.failure import Failure
from twisted.enterprise import adbapi
from collections import defaultdict, deque
from zope.interface import implements
from decimal import Decimal
from lbryum import SimpleConfig, Network
from lbryum.lbrycrd import COIN
import lbryum.wallet
from lbryum.commands import known_commands, Commands
from lbryum.transaction import Transaction
from lbrynet.interfaces import IRequestCreator, IQueryHandlerFactory, IQueryHandler, IWallet
from lbrynet.core.client.ClientRequest import ClientRequest
from lbrynet.core.Error import UnknownNameError, InvalidStreamInfoError, RequestCanceledError
from lbrynet.core.Error import InsufficientFundsError
from lbrynet.metadata.Metadata import Metadata
log = logging.getLogger(__name__)
alert = logging.getLogger("lbryalert." + __name__)
class ReservedPoints(object):
def __init__(self, identifier, amount):
self.identifier = identifier
self.amount = amount
def _catch_connection_error(f):
def w(*args):
try:
return f(*args)
except socket.error:
raise ValueError("Unable to connect to an lbrycrd server. Make sure an lbrycrd server " +
"is running and that this application can connect to it.")
return w
class Wallet(object):
"""This class implements the Wallet interface for the LBRYcrd payment system"""
implements(IWallet)
_FIRST_RUN_UNKNOWN = 0
_FIRST_RUN_YES = 1
_FIRST_RUN_NO = 2
def __init__(self, db_dir):
self.db_dir = db_dir
self.db = None
self.next_manage_call = None
self.wallet_balance = Decimal(0.0)
self.total_reserved_points = Decimal(0.0)
self.peer_addresses = {} # {Peer: string}
self.queued_payments = defaultdict(Decimal) # {address(string): amount(Decimal)}
self.expected_balances = defaultdict(Decimal) # {address(string): amount(Decimal)}
self.current_address_given_to_peer = {} # {Peer: address(string)}
self.expected_balance_at_time = deque() # (Peer, address(string), amount(Decimal), time(datetime), count(int),
# incremental_amount(float))
self.max_expected_payment_time = datetime.timedelta(minutes=3)
self.stopped = True
self.is_lagging = None
self.manage_running = False
self._manage_count = 0
self._balance_refresh_time = 3
self._batch_count = 20
self._first_run = self._FIRST_RUN_UNKNOWN
def start(self):
def start_manage():
self.stopped = False
self.manage()
return True
d = self._open_db()
d.addCallback(lambda _: self._clean_bad_records())
d.addCallback(lambda _: self._start())
d.addCallback(lambda _: start_manage())
return d
@staticmethod
def log_stop_error(err):
log.error("An error occurred stopping the wallet: %s", err.getTraceback())
def stop(self):
self.stopped = True
# If self.next_manage_call is None, then manage is currently running or else
# start has not been called, so set stopped and do nothing else.
if self.next_manage_call is not None:
self.next_manage_call.cancel()
self.next_manage_call = None
d = self.manage(do_full=True)
d.addErrback(self.log_stop_error)
d.addCallback(lambda _: self._stop())
d.addErrback(self.log_stop_error)
return d
def manage(self, do_full=False):
self.next_manage_call = None
have_set_manage_running = [False]
self._manage_count += 1
if self._manage_count % self._batch_count == 0:
self._manage_count = 0
do_full = True
def check_if_manage_running():
d = defer.Deferred()
def fire_if_not_running():
if self.manage_running is False:
self.manage_running = True
have_set_manage_running[0] = True
d.callback(True)
elif do_full is False:
d.callback(False)
else:
task.deferLater(reactor, 1, fire_if_not_running)
fire_if_not_running()
return d
d = check_if_manage_running()
def do_manage():
if do_full:
d = self._check_expected_balances()
d.addCallback(lambda _: self._send_payments())
else:
d = defer.succeed(True)
d.addCallback(lambda _: self.get_balance())
def set_wallet_balance(balance):
if self.wallet_balance != balance:
log.info("Got a new balance: %s", str(balance))
self.wallet_balance = balance
d.addCallback(set_wallet_balance)
return d
d.addCallback(lambda should_run: do_manage() if should_run else None)
def set_next_manage_call():
if not self.stopped:
self.next_manage_call = reactor.callLater(self._balance_refresh_time, self.manage)
d.addCallback(lambda _: set_next_manage_call())
def log_error(err):
log.error("Something went wrong during manage. Error message: %s", err.getErrorMessage())
return err
d.addErrback(log_error)
def set_manage_not_running(arg):
if have_set_manage_running[0] is True:
self.manage_running = False
return arg
d.addBoth(set_manage_not_running)
return d
def get_info_exchanger(self):
return LBRYcrdAddressRequester(self)
def get_wallet_info_query_handler_factory(self):
return LBRYcrdAddressQueryHandlerFactory(self)
def reserve_points(self, identifier, amount):
"""
Ensure a certain amount of points are available to be sent as payment, before the service is rendered
@param identifier: The peer to which the payment will ultimately be sent
@param amount: The amount of points to reserve
@return: A ReservedPoints object which is given to send_points once the service has been rendered
"""
rounded_amount = Decimal(str(round(amount, 8)))
#if peer in self.peer_addresses:
if self.wallet_balance >= self.total_reserved_points + rounded_amount:
self.total_reserved_points += rounded_amount
return ReservedPoints(identifier, rounded_amount)
return None
def cancel_point_reservation(self, reserved_points):
"""
Return all of the points that were reserved previously for some ReservedPoints object
@param reserved_points: ReservedPoints previously returned by reserve_points
@return: None
"""
self.total_reserved_points -= reserved_points.amount
def send_points(self, reserved_points, amount):
"""
Schedule a payment to be sent to a peer
@param reserved_points: ReservedPoints object previously returned by reserve_points
@param amount: amount of points to actually send, must be less than or equal to the
amount reserved in reserved_points
@return: Deferred which fires when the payment has been scheduled
"""
rounded_amount = Decimal(str(round(amount, 8)))
peer = reserved_points.identifier
assert(rounded_amount <= reserved_points.amount)
assert(peer in self.peer_addresses)
self.queued_payments[self.peer_addresses[peer]] += rounded_amount
# make any unused points available
self.total_reserved_points -= (reserved_points.amount - rounded_amount)
log.info("ordering that %s points be sent to %s", str(rounded_amount),
str(self.peer_addresses[peer]))
peer.update_stats('points_sent', amount)
return defer.succeed(True)
def send_points_to_address(self, reserved_points, amount):
"""
Schedule a payment to be sent to an address
@param reserved_points: ReservedPoints object previously returned by reserve_points
@param amount: amount of points to actually send. must be less than or equal to the
amount reselved in reserved_points
@return: Deferred which fires when the payment has been scheduled
"""
rounded_amount = Decimal(str(round(amount, 8)))
address = reserved_points.identifier
assert(rounded_amount <= reserved_points.amount)
self.queued_payments[address] += rounded_amount
self.total_reserved_points -= (reserved_points.amount - rounded_amount)
log.info("Ordering that %s points be sent to %s", str(rounded_amount),
str(address))
return defer.succeed(True)
def add_expected_payment(self, peer, amount):
"""Increase the number of points expected to be paid by a peer"""
rounded_amount = Decimal(str(round(amount, 8)))
assert(peer in self.current_address_given_to_peer)
address = self.current_address_given_to_peer[peer]
log.info("expecting a payment at address %s in the amount of %s", str(address), str(rounded_amount))
self.expected_balances[address] += rounded_amount
expected_balance = self.expected_balances[address]
expected_time = datetime.datetime.now() + self.max_expected_payment_time
self.expected_balance_at_time.append((peer, address, expected_balance, expected_time, 0, amount))
peer.update_stats('expected_points', amount)
def update_peer_address(self, peer, address):
self.peer_addresses[peer] = address
def get_new_address_for_peer(self, peer):
def set_address_for_peer(address):
self.current_address_given_to_peer[peer] = address
return address
d = self.get_new_address()
d.addCallback(set_address_for_peer)
return d
def _send_payments(self):
payments_to_send = {}
for address, points in self.queued_payments.items():
if points > 0:
log.info("Should be sending %s points to %s", str(points), str(address))
payments_to_send[address] = points
self.total_reserved_points -= points
self.wallet_balance -= points
else:
log.info("Skipping dust")
del self.queued_payments[address]
if payments_to_send:
log.info("Creating a transaction with outputs %s", str(payments_to_send))
d = self._do_send_many(payments_to_send)
d.addCallback(lambda txid: log.debug("Sent transaction %s", txid))
return d
log.debug("There were no payments to send")
return defer.succeed(True)
def get_stream_info_for_name(self, name):
d = self._get_value_for_name(name)
d.addCallback(self._get_stream_info_from_value, name)
return d
def get_txid_for_name(self, name):
d = self._get_value_for_name(name)
d.addCallback(lambda r: None if 'txid' not in r else r['txid'])
return d
def get_stream_info_from_txid(self, name, txid):
d = self.get_claims_from_tx(txid)
def get_claim_for_name(claims):
for claim in claims:
if claim['name'] == name:
claim['txid'] = txid
return claim
return Failure(UnknownNameError(name))
d.addCallback(get_claim_for_name)
d.addCallback(self._get_stream_info_from_value, name)
return d
def _get_stream_info_from_value(self, result, name):
def _check_result_fields(r):
for k in ['value', 'txid', 'n', 'height', 'amount']:
assert k in r, "getvalueforname response missing field %s" % k
def _log_success(claim_id):
log.info("lbry://%s complies with %s, claimid: %s", name, metadata.version, claim_id)
return defer.succeed(None)
if 'error' in result:
log.warning("Got an error looking up a name: %s", result['error'])
return Failure(UnknownNameError(name))
_check_result_fields(result)
try:
metadata = Metadata(json.loads(result['value']))
except (ValueError, TypeError):
return Failure(InvalidStreamInfoError(name))
txid = result['txid']
sd_hash = metadata['sources']['lbry_sd_hash']
d = self._save_name_metadata(name, txid, sd_hash)
d.addCallback(lambda _: self.get_claimid(name, txid))
d.addCallback(lambda cid: _log_success(cid))
d.addCallback(lambda _: metadata)
return d
def get_claim(self, name, claim_id):
d = self.get_claims_for_name(name)
d.addCallback(lambda claims: next(claim for claim in claims['claims'] if claim['claimId'] == claim_id))
return d
def get_claimid(self, name, txid):
def _get_id_for_return(claim_id):
if claim_id:
return defer.succeed(claim_id)
else:
d = self.get_claims_from_tx(txid)
d.addCallback(lambda claims: next(c['claimId'] for c in claims if c['name'] == name))
d.addCallback(lambda cid: self._update_claimid(cid, name, txid))
return d
d = self._get_claimid_for_tx(name, txid)
d.addCallback(_get_id_for_return)
return d
def get_my_claim(self, name):
def _convert_units(claim):
amount = Decimal(claim['nEffectiveAmount'] / COIN)
claim['nEffectiveAmount'] = amount
return claim
def _get_claim_for_return(claim):
if not claim:
return False
d = self.get_claim(name, claim['claim_id'])
d.addCallback(_convert_units)
d.addCallback(lambda clm: self._format_claim_for_return(name, clm, claim['txid']))
return d
def _get_my_unspent_claim(claims):
for claim in claims:
if claim['name'] == name and not claim['is spent']:
return claim
return False
d = self.get_name_claims()
d.addCallback(_get_my_unspent_claim)
d.addCallback(_get_claim_for_return)
return d
def get_claim_info(self, name, txid=None):
if not txid:
d = self._get_value_for_name(name)
d.addCallback(lambda r: self._get_claim_info(name, r['txid']))
else:
d = self._get_claim_info(name, txid)
d.addErrback(lambda _: False)
return d
def _format_claim_for_return(self, name, claim, txid, metadata=None, meta_version=None):
result = {}
result['claim_id'] = claim['claimId']
result['amount'] = claim['nEffectiveAmount']
result['height'] = claim['nHeight']
result['name'] = name
result['txid'] = txid
result['value'] = metadata if metadata else json.loads(claim['value'])
result['supports'] = [{'txid': support['txid'], 'n': support['n']} for support in claim['supports']]
result['meta_version'] = meta_version if meta_version else result['value'].get('ver', '0.0.1')
return result
def _get_claim_info(self, name, txid):
def _build_response(claim):
try:
metadata = Metadata(json.loads(claim['value']))
meta_ver = metadata.version
sd_hash = metadata['sources']['lbry_sd_hash']
d = self._save_name_metadata(name, txid, sd_hash)
except AssertionError:
metadata = claim['value']
meta_ver = "Non-compliant"
d = defer.succeed(None)
d.addCallback(lambda _: self._format_claim_for_return(name, claim, txid,
metadata=metadata, meta_version=meta_ver))
log.info("get claim info lbry://%s metadata: %s, claimid: %s", name, meta_ver, claim['claimId'])
return d
d = self.get_claimid(name, txid)
d.addCallback(lambda claim_id: self.get_claim(name, claim_id))
d.addCallback(_build_response)
return d
def get_claims_for_name(self, name):
d = self._get_claims_for_name(name)
return d
def update_metadata(self, new_metadata, old_metadata):
meta_for_return = old_metadata if isinstance(old_metadata, dict) else {}
for k in new_metadata:
meta_for_return[k] = new_metadata[k]
return defer.succeed(Metadata(meta_for_return))
def claim_name(self, name, bid, m):
def _save_metadata(txid, metadata):
log.info("Saving metadata for claim %s" % txid)
d = self._save_name_metadata(name, txid, metadata['sources']['lbry_sd_hash'])
d.addCallback(lambda _: txid)
return d
def _claim_or_update(claim, metadata, _bid):
if not claim:
log.info("No claim yet, making a new one")
return self._send_name_claim(name, metadata, _bid)
if not claim['is_mine']:
log.info("Making a contesting claim")
return self._send_name_claim(name, metadata, _bid)
else:
log.info("Updating over own claim")
d = self.update_metadata(metadata, claim['value'])
d.addCallback(lambda new_metadata: self._send_name_claim_update(name, claim['claim_id'], claim['txid'], new_metadata, _bid))
return d
meta = Metadata(m)
d = self.get_claim_info(name)
d.addCallback(lambda claim: _claim_or_update(claim, meta, bid))
d.addCallback(lambda txid: _save_metadata(txid, meta))
return d
def abandon_name(self, txid):
d1 = self.get_new_address()
d2 = self.get_claims_from_tx(txid)
def get_txout_of_claim(claims):
for claim in claims:
if 'name' in claim and 'nOut' in claim:
return claim['nOut']
return defer.fail(ValueError("No claims in tx"))
def get_value_of_txout(nOut):
d = self._get_raw_tx(txid)
d.addCallback(self._get_decoded_tx)
d.addCallback(lambda tx: tx['vout'][nOut]['value'])
return d
d2.addCallback(get_txout_of_claim)
d2.addCallback(get_value_of_txout)
dl = defer.DeferredList([d1, d2], consumeErrors=True)
def abandon(results):
if results[0][0] and results[1][0]:
address = results[0][1]
amount = float(results[1][1])
return self._send_abandon(txid, address, amount)
elif results[0][0] is False:
return defer.fail(Failure(ValueError("Couldn't get a new address")))
else:
return results[1][1]
dl.addCallback(abandon)
return dl
def support_claim(self, name, claim_id, amount):
return self._support_claim(name, claim_id, amount)
def get_tx(self, txid):
d = self._get_raw_tx(txid)
d.addCallback(self._get_decoded_tx)
return d
def get_block_info(self, height):
d = self._get_blockhash(height)
return d
def get_history(self):
d = self._get_history()
return d
def address_is_mine(self, address):
d = self._address_is_mine(address)
return d
def get_tx_json(self, txid):
def _decode(raw_tx):
tx = Transaction(raw_tx).deserialize()
decoded_tx = {}
for txkey in tx.keys():
if isinstance(tx[txkey], list):
decoded_tx[txkey] = []
for i in tx[txkey]:
tmp = {}
for k in i.keys():
if isinstance(i[k], Decimal):
tmp[k] = float(i[k] / 1e8)
else:
tmp[k] = i[k]
decoded_tx[txkey].append(tmp)
else:
decoded_tx[txkey] = tx[txkey]
return decoded_tx
d = self._get_raw_tx(txid)
d.addCallback(_decode)
return d
def get_name_and_validity_for_sd_hash(self, sd_hash):
d = self._get_claim_metadata_for_sd_hash(sd_hash)
d.addCallback(lambda name_txid: self._get_status_of_claim(name_txid[1], name_txid[0], sd_hash) if name_txid is not None else None)
return d
def get_available_balance(self):
return float(self.wallet_balance - self.total_reserved_points)
def is_first_run(self):
if self._first_run == self._FIRST_RUN_UNKNOWN:
d = self._check_first_run()
def set_first_run(is_first):
self._first_run = self._FIRST_RUN_YES if is_first else self._FIRST_RUN_NO
d.addCallback(set_first_run)
else:
d = defer.succeed(self._FIRST_RUN_YES if self._first_run else self._FIRST_RUN_NO)
d.addCallback(lambda _: self._first_run == self._FIRST_RUN_YES)
return d
def _get_status_of_claim(self, txid, name, sd_hash):
d = self.get_claims_from_tx(txid)
def get_status(claims):
if claims is None:
claims = []
for claim in claims:
if 'in claim trie' in claim:
if 'name' in claim and str(claim['name']) == name and 'value' in claim:
try:
value_dict = json.loads(claim['value'])
except (ValueError, TypeError):
return None
claim_sd_hash = None
if 'stream_hash' in value_dict:
claim_sd_hash = str(value_dict['stream_hash'])
if 'sources' in value_dict and 'lbrynet_sd_hash' in value_dict['sources']:
claim_sd_hash = str(value_dict['sources']['lbry_sd_hash'])
if claim_sd_hash is not None and claim_sd_hash == sd_hash:
if 'is controlling' in claim and claim['is controlling']:
return name, "valid"
if claim['in claim trie']:
return name, "invalid"
if 'in queue' in claim and claim['in queue']:
return name, "pending"
return name, "unconfirmed"
return None
d.addCallback(get_status)
return d
def _check_expected_balances(self):
now = datetime.datetime.now()
balances_to_check = []
try:
while self.expected_balance_at_time[0][3] < now:
balances_to_check.append(self.expected_balance_at_time.popleft())
except IndexError:
pass
ds = []
for balance_to_check in balances_to_check:
log.info("Checking balance of address %s", str(balance_to_check[1]))
d = self._get_balance_for_address(balance_to_check[1])
d.addCallback(lambda bal: bal >= balance_to_check[2])
ds.append(d)
dl = defer.DeferredList(ds)
def handle_checks(results):
from future_builtins import zip
for balance, (success, result) in zip(balances_to_check, results):
peer = balance[0]
if success is True:
if result is False:
if balance[4] <= 1: # first or second strike, give them another chance
new_expected_balance = (balance[0],
balance[1],
balance[2],
datetime.datetime.now() + self.max_expected_payment_time,
balance[4] + 1,
balance[5])
self.expected_balance_at_time.append(new_expected_balance)
peer.update_score(-5.0)
else:
peer.update_score(-50.0)
else:
if balance[4] == 0:
peer.update_score(balance[5])
peer.update_stats('points_received', balance[5])
else:
log.warning("Something went wrong checking a balance. Peer: %s, account: %s,"
"expected balance: %s, expected time: %s, count: %s, error: %s",
str(balance[0]), str(balance[1]), str(balance[2]), str(balance[3]),
str(balance[4]), str(result.getErrorMessage()))
dl.addCallback(handle_checks)
return dl
def _open_db(self):
self.db = adbapi.ConnectionPool('sqlite3', os.path.join(self.db_dir, "blockchainname.db"),
check_same_thread=False)
def create_tables(transaction):
transaction.execute("create table if not exists name_metadata (" +
" name text, " +
" txid text, " +
" sd_hash text)")
transaction.execute("create table if not exists claim_ids (" +
" claimId text, " +
" name text, " +
" txid text)")
return self.db.runInteraction(create_tables)
def _clean_bad_records(self):
d = self.db.runQuery("delete from name_metadata where length(txid) > 64 or txid is null")
return d
def _save_name_metadata(self, name, txid, sd_hash):
assert len(txid) == 64, "That's not a txid: %s" % str(txid)
d = self.db.runQuery("delete from name_metadata where name=? and txid=? and sd_hash=?", (name, txid, sd_hash))
d.addCallback(lambda _: self.db.runQuery("insert into name_metadata values (?, ?, ?)", (name, txid, sd_hash)))
return d
def _get_claim_metadata_for_sd_hash(self, sd_hash):
d = self.db.runQuery("select name, txid from name_metadata where sd_hash=?", (sd_hash,))
d.addCallback(lambda r: r[0] if r else None)
return d
def _update_claimid(self, claim_id, name, txid):
assert len(txid) == 64, "That's not a txid: %s" % str(txid)
d = self.db.runQuery("delete from claim_ids where claimId=? and name=? and txid=?", (claim_id, name, txid))
d.addCallback(lambda r: self.db.runQuery("insert into claim_ids values (?, ?, ?)", (claim_id, name, txid)))
d.addCallback(lambda _: claim_id)
return d
def _get_claimid_for_tx(self, name, txid):
assert len(txid) == 64, "That's not a txid: %s" % str(txid)
d = self.db.runQuery("select claimId from claim_ids where name=? and txid=?", (name, txid))
d.addCallback(lambda r: r[0][0] if r else None)
return d
######### Must be overridden #########
def get_balance(self):
return defer.fail(NotImplementedError())
def get_new_address(self):
return defer.fail(NotImplementedError())
def get_block(self, blockhash):
return defer.fail(NotImplementedError())
def get_most_recent_blocktime(self):
return defer.fail(NotImplementedError())
def get_best_blockhash(self):
return defer.fail(NotImplementedError())
def get_name_claims(self):
return defer.fail(NotImplementedError())
def _get_claims_for_name(self, name):
return defer.fail(NotImplementedError())
def _check_first_run(self):
return defer.fail(NotImplementedError())
def _get_raw_tx(self, txid):
return defer.fail(NotImplementedError())
def _send_name_claim(self, name, val, amount):
return defer.fail(NotImplementedError())
def _get_decoded_tx(self, raw_tx):
return defer.fail(NotImplementedError())
def _send_abandon(self, txid, address, amount):
return defer.fail(NotImplementedError())
def _send_name_claim_update(self, name, claim_id, txid, value, amount):
return defer.fail(NotImplementedError())
def _support_claim(self, name, claim_id, amount):
return defer.fail(NotImplementedError())
def _do_send_many(self, payments_to_send):
return defer.fail(NotImplementedError())
def _get_value_for_name(self, name):
return defer.fail(NotImplementedError())
def get_claims_from_tx(self, txid):
return defer.fail(NotImplementedError())
def _get_balance_for_address(self, address):
return defer.fail(NotImplementedError())
def _get_history(self):
return defer.fail(NotImplementedError())
def _address_is_mine(self, address):
return defer.fail(NotImplementedError())
def _start(self):
pass
def _stop(self):
pass
class LBRYcrdWallet(Wallet):
def __init__(self, db_dir, wallet_dir=None, wallet_conf=None, lbrycrdd_path=None):
Wallet.__init__(self, db_dir)
self.started_lbrycrdd = False
self.wallet_dir = wallet_dir
self.wallet_conf = wallet_conf
self.lbrycrdd = None
self.lbrycrdd_path = lbrycrdd_path
settings = self._get_rpc_conf()
rpc_user = settings["username"]
rpc_pass = settings["password"]
rpc_port = settings["rpc_port"]
rpc_url = "127.0.0.1"
self.rpc_conn_string = "http://%s:%s@%s:%s" % (rpc_user, rpc_pass, rpc_url, str(rpc_port))
def _start(self):
return threads.deferToThread(self._make_connection)
def _stop(self):
if self.lbrycrdd_path is not None:
return self._stop_daemon()
def _make_connection(self):
alert.info("Connecting to lbrycrdd...")
if self.lbrycrdd_path is not None:
self._start_daemon()
self._get_info_rpc()
log.info("Connected!")
alert.info("Connected to lbrycrdd.")
def _get_rpc_conf(self):
settings = {"username": "rpcuser",
"password": "rpcpassword",
"rpc_port": 9245}
if self.wallet_conf and os.path.exists(self.wallet_conf):
conf = open(self.wallet_conf)
for l in conf:
if l.startswith("rpcuser="):
settings["username"] = l[8:].rstrip('\n')
if l.startswith("rpcpassword="):
settings["password"] = l[12:].rstrip('\n')
if l.startswith("rpcport="):
settings["rpc_port"] = int(l[8:].rstrip('\n'))
return settings
def _check_first_run(self):
d = self.get_balance()
d.addCallback(lambda bal: threads.deferToThread(self._get_num_addresses_rpc) if bal == 0 else 2)
d.addCallback(lambda num_addresses: True if num_addresses <= 1 else False)
return d
def get_new_address(self):
return threads.deferToThread(self._get_new_address_rpc)
def get_balance(self):
return threads.deferToThread(self._get_wallet_balance_rpc)
def get_most_recent_blocktime(self):
d = threads.deferToThread(self._get_best_blockhash_rpc)
d.addCallback(lambda blockhash: threads.deferToThread(self._get_block_rpc, blockhash))
d.addCallback(
lambda block: block['time'] if 'time' in block else Failure(ValueError("Could not get a block time")))
return d
def get_name_claims(self):
return threads.deferToThread(self._get_name_claims_rpc)
def get_block(self, blockhash):
return threads.deferToThread(self._get_block_rpc, blockhash)
def get_best_blockhash(self):
d = threads.deferToThread(self._get_blockchain_info_rpc)
d.addCallback(lambda blockchain_info: blockchain_info['bestblockhash'])
return d
def get_nametrie(self):
return threads.deferToThread(self._get_nametrie_rpc)
def start_miner(self):
d = threads.deferToThread(self._get_gen_status_rpc)
d.addCallback(lambda status: threads.deferToThread(self._set_gen_status_rpc, True) if not status
else "Miner was already running")
return d
def stop_miner(self):
d = threads.deferToThread(self._get_gen_status_rpc)
d.addCallback(lambda status: threads.deferToThread(self._set_gen_status_rpc, False) if status
else "Miner wasn't running")
return d
def get_miner_status(self):
return threads.deferToThread(self._get_gen_status_rpc)
def _get_balance_for_address(self, address):
return threads.deferToThread(self._get_balance_for_address_rpc, address)
def _do_send_many(self, payments_to_send):
outputs = {address: float(points) for address, points in payments_to_send.iteritems()}
return threads.deferToThread(self._do_send_many_rpc, outputs)
def _send_name_claim(self, name, value, amount):
return threads.deferToThread(self._send_name_claim_rpc, name, value, amount)
def _get_raw_tx(self, txid):
return threads.deferToThread(self._get_raw_tx_rpc, txid)
def _get_decoded_tx(self, raw_tx):
return threads.deferToThread(self._get_decoded_tx_rpc, raw_tx)
def _send_abandon(self, txid, address, amount):
return threads.deferToThread(self._send_abandon_rpc, txid, address, amount)
def _send_name_claim_update(self, name, claim_id, txid, value, amount):
return threads.deferToThread(self._update_name_rpc, txid, value, amount)
def _support_claim(self, name, claim_id, amount):
return threads.deferToThread(self._support_claim_rpc, name, claim_id, amount)
def _get_claims_for_name(self, name):
return threads.deferToThread(self._get_claims_for_name_rpc, name)
def get_claims_from_tx(self, txid):
return threads.deferToThread(self._get_claims_from_tx_rpc, txid)
def _get_blockhash(self, blockhash):
return threads.deferToThread(self._get_blockhash_rpc, blockhash)
def _get_value_for_name(self, name):
return threads.deferToThread(self._get_value_for_name_rpc, name)
def _get_history(self):
return threads.deferToThread(self._list_transactions_rpc)
def _address_is_mine(self, address):
return threads.deferToThread(self._get_address_is_mine_rpc, address)
def _get_rpc_conn(self):
return AuthServiceProxy(self.rpc_conn_string)
def _start_daemon(self):
tries = 0
try:
rpc_conn = self._get_rpc_conn()
try:
rpc_conn.getinfo()
except ValueError:
log.exception('Failed to get rpc info. Rethrowing with a hopefully more useful error message')
raise Exception('Failed to get rpc info from lbrycrdd. Try restarting lbrycrdd')
log.info("lbrycrdd was already running when LBRYcrdWallet was started.")
return
except (socket.error, JSONRPCException):
tries += 1
log.info("lbrcyrdd was not running when LBRYcrdWallet was started. Attempting to start it.")
try:
if os.name == "nt":
si = subprocess.STARTUPINFO
si.dwFlags = subprocess.STARTF_USESHOWWINDOW
si.wShowWindow = subprocess.SW_HIDE
self.lbrycrdd = subprocess.Popen([self.lbrycrdd_path, "-datadir=%s" % self.wallet_dir,
"-conf=%s" % self.wallet_conf], startupinfo=si)
else:
if sys.platform == 'darwin':
os.chdir("/Applications/LBRY.app/Contents/Resources")
self.lbrycrdd = subprocess.Popen([self.lbrycrdd_path, "-datadir=%s" % self.wallet_dir,
"-conf=%s" % self.wallet_conf])
self.started_lbrycrdd = True
except OSError:
import traceback
log.error("Couldn't launch lbrycrdd at path %s: %s", self.lbrycrdd_path, traceback.format_exc())
raise ValueError("Couldn't launch lbrycrdd. Tried %s" % self.lbrycrdd_path)
while tries < 6:
try:
rpc_conn = self._get_rpc_conn()
rpc_conn.getinfo()
break
except (socket.error, JSONRPCException):
tries += 1
log.warning("Failed to connect to lbrycrdd.")
if tries < 6:
time.sleep(2 ** tries)
log.warning("Trying again in %d seconds", 2 ** tries)
else:
log.warning("Giving up.")
else:
self.lbrycrdd.terminate()
raise ValueError("Couldn't open lbrycrdd")
def _stop_daemon(self):
if self.lbrycrdd is not None and self.started_lbrycrdd is True:
alert.info("Stopping lbrycrdd...")
d = threads.deferToThread(self._stop_rpc)
d.addCallback(lambda _: alert.info("Stopped lbrycrdd."))
return d
return defer.succeed(True)
@_catch_connection_error
def _get_balance_for_address_rpc(self, address):
rpc_conn = self._get_rpc_conn()
balance = rpc_conn.getreceivedbyaddress(address)
log.debug("received balance for %s: %s", str(address), str(balance))
return balance
@_catch_connection_error
def _do_send_many_rpc(self, payments):
rpc_conn = self._get_rpc_conn()
return rpc_conn.sendmany("", payments)
@_catch_connection_error
def _get_info_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getinfo()
@_catch_connection_error
def _get_name_claims_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.listnameclaims()
@_catch_connection_error
def _get_gen_status_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getgenerate()
@_catch_connection_error
def _set_gen_status_rpc(self, b):
if b:
log.info("Starting miner")
else:
log.info("Stopping miner")
rpc_conn = self._get_rpc_conn()
return rpc_conn.setgenerate(b)
@_catch_connection_error
def _get_raw_tx_rpc(self, txid):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getrawtransaction(txid)
@_catch_connection_error
def _get_decoded_tx_rpc(self, raw):
rpc_conn = self._get_rpc_conn()
return rpc_conn.decoderawtransaction(raw)
@_catch_connection_error
def _send_abandon_rpc(self, txid, address, amount):
rpc_conn = self._get_rpc_conn()
return rpc_conn.abandonclaim(txid, address, amount)
@_catch_connection_error
def _get_blockchain_info_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getblockchaininfo()
@_catch_connection_error
def _get_block_rpc(self, blockhash):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getblock(blockhash)
@_catch_connection_error
def _get_blockhash_rpc(self, height):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getblockhash(height)
@_catch_connection_error
def _get_claims_from_tx_rpc(self, txid):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getclaimsfortx(txid)
@_catch_connection_error
def _get_claims_for_name_rpc(self, name):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getclaimsforname(name)
@_catch_connection_error
def _get_nametrie_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getclaimtrie()
@_catch_connection_error
def _get_wallet_balance_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getbalance("")
@_catch_connection_error
def _get_new_address_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getnewaddress()
@_catch_connection_error
def _get_value_for_name_rpc(self, name):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getvalueforname(name)
@_catch_connection_error
def _update_name_rpc(self, txid, value, amount):
rpc_conn = self._get_rpc_conn()
return rpc_conn.updateclaim(txid, json.dumps(value), amount)
@_catch_connection_error
def _send_name_claim_rpc(self, name, value, amount):
rpc_conn = self._get_rpc_conn()
try:
return str(rpc_conn.claimname(name, json.dumps(value), amount))
except JSONRPCException as e:
if 'message' in e.error and e.error['message'] == "Insufficient funds":
raise InsufficientFundsError()
elif 'message' in e.error:
raise ValueError(e.error['message'])
@_catch_connection_error
def _support_claim_rpc(self, name, claim_id, amount):
rpc_conn = self._get_rpc_conn()
return rpc_conn.supportclaim(name, claim_id, amount)
@_catch_connection_error
def _get_num_addresses_rpc(self):
rpc_conn = self._get_rpc_conn()
return len(rpc_conn.getaddressesbyaccount(""))
@_catch_connection_error
def _get_best_blockhash_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getbestblockhash()
@_catch_connection_error
def _list_transactions_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.listtransactions()
@_catch_connection_error
def _get_address_is_mine_rpc(self, address):
rpc_conn = self._get_rpc_conn()
return address in rpc_conn.getaddressesbyaccount("")
@_catch_connection_error
def _stop_rpc(self):
# check if our lbrycrdd is actually running, or if we connected to one that was already
# running and ours failed to start
if self.lbrycrdd.poll() is None:
rpc_conn = self._get_rpc_conn()
rpc_conn.stop()
self.lbrycrdd.wait()
class LBRYumWallet(Wallet):
def __init__(self, db_dir):
Wallet.__init__(self, db_dir)
self.config = None
self.network = None
self.wallet = None
self.cmd_runner = None
self.first_run = False
self.printed_retrieving_headers = False
self._start_check = None
self._catch_up_check = None
self._caught_up_counter = 0
self._lag_counter = 0
self.blocks_behind_alert = 0
self.catchup_progress = 0
self.max_behind = 0
def _start(self):
network_start_d = defer.Deferred()
def setup_network():
self.config = SimpleConfig({'auto_connect': True})
self.network = Network(self.config)
alert.info("Loading the wallet...")
return defer.succeed(self.network.start())
d = setup_network()
def check_started():
if self.network.is_connecting():
if not self.printed_retrieving_headers and self.network.blockchain.retrieving_headers:
alert.info("Running the wallet for the first time...this may take a moment.")
self.printed_retrieving_headers = True
return False
self._start_check.stop()
self._start_check = None
if self.network.is_connected():
network_start_d.callback(True)
else:
network_start_d.errback(ValueError("Failed to connect to network."))
self._start_check = task.LoopingCall(check_started)
d.addCallback(lambda _: self._start_check.start(.1))
d.addCallback(lambda _: network_start_d)
d.addCallback(lambda _: self._load_wallet())
d.addCallback(lambda _: self._get_cmd_runner())
return d
def _stop(self):
if self._start_check is not None:
self._start_check.stop()
self._start_check = None
if self._catch_up_check is not None:
self._catch_up_check.stop()
self._catch_up_check = None
d = defer.Deferred()
def check_stopped():
if self.network:
if self.network.is_connected():
return False
stop_check.stop()
self.network = None
d.callback(True)
if self.network:
self.network.stop()
stop_check = task.LoopingCall(check_stopped)
stop_check.start(.1)
return d
def _load_wallet(self):
def get_wallet():
path = self.config.get_wallet_path()
storage = lbryum.wallet.WalletStorage(path)
wallet = lbryum.wallet.Wallet(storage)
if not storage.file_exists:
self.first_run = True
seed = wallet.make_seed()
wallet.add_seed(seed, None)
wallet.create_master_keys(None)
wallet.create_main_account()
wallet.synchronize()
self.wallet = wallet
blockchain_caught_d = defer.Deferred()
def check_caught_up():
local_height = self.network.get_catchup_progress()
remote_height = self.network.get_server_height()
if remote_height != 0 and remote_height - local_height <= 5:
msg = ""
if self._caught_up_counter != 0:
msg += "All caught up. "
msg += "Wallet loaded."
alert.info(msg)
self._catch_up_check.stop()
self._catch_up_check = None
blockchain_caught_d.callback(True)
elif remote_height != 0:
past_blocks_behind = self.blocks_behind_alert
self.blocks_behind_alert = remote_height - local_height
if self.blocks_behind_alert < past_blocks_behind:
self._lag_counter = 0
self.is_lagging = False
else:
self._lag_counter += 1
if self._lag_counter >= 900:
self.is_lagging = True
if self.blocks_behind_alert > self.max_behind:
self.max_behind = self.blocks_behind_alert
self.catchup_progress = int(100 * (self.blocks_behind_alert / (5 + self.max_behind)))
if self._caught_up_counter == 0:
alert.info('Catching up with the blockchain...showing blocks left...')
if self._caught_up_counter % 30 == 0:
alert.info('%d...', (remote_height - local_height))
self._caught_up_counter += 1
self._catch_up_check = task.LoopingCall(check_caught_up)
d = threads.deferToThread(get_wallet)
d.addCallback(self._save_wallet)
d.addCallback(lambda _: self.wallet.start_threads(self.network))
d.addCallback(lambda _: self._catch_up_check.start(.1))
d.addCallback(lambda _: blockchain_caught_d)
return d
def _get_cmd_runner(self):
self.cmd_runner = Commands(self.config, self.wallet, self.network)
def get_balance(self):
cmd = known_commands['getbalance']
func = getattr(self.cmd_runner, cmd.name)
accounts = None
exclude_claimtrietx = True
d = threads.deferToThread(func, accounts, exclude_claimtrietx)
d.addCallback(lambda result: Decimal(result['confirmed']) + Decimal(result.get('unconfirmed', 0.0)))
return d
def get_new_address(self):
d = threads.deferToThread(self.wallet.create_new_address)
d.addCallback(self._save_wallet)
return d
def get_block(self, blockhash):
cmd = known_commands['getblock']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, blockhash)
def get_most_recent_blocktime(self):
header = self.network.get_header(self.network.get_local_height())
return defer.succeed(header['timestamp'])
def get_best_blockhash(self):
height = self.network.get_local_height()
d = threads.deferToThread(self.network.blockchain.read_header, height)
d.addCallback(lambda header: self.network.blockchain.hash_header(header))
return d
def _get_blockhash(self, height):
d = threads.deferToThread(self.network.blockchain.read_header, height)
d.addCallback(lambda header: self.network.blockchain.hash_header(header))
return d
def get_name_claims(self):
cmd = known_commands['getnameclaims']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func)
def _check_first_run(self):
return defer.succeed(self.first_run)
def _get_raw_tx(self, txid):
cmd = known_commands['gettransaction']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, txid)
def _send_name_claim(self, name, val, amount):
def send_claim(address):
cmd = known_commands['claimname']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, address, amount, name, json.dumps(val))
d = self.get_new_address()
d.addCallback(send_claim)
d.addCallback(self._broadcast_transaction)
return d
def _get_claims_for_name(self, name):
cmd = known_commands['getclaimsforname']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, name)
def _send_name_claim_update(self, name, claim_id, txid, value, amount):
def send_claim_update(address):
decoded_claim_id = claim_id.decode('hex')[::-1]
metadata = json.dumps(value)
log.info("updateclaim %s %s %f %s %s '%s'", txid, address, amount, name, decoded_claim_id.encode('hex'), metadata)
cmd = known_commands['updateclaim']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, txid, address, amount, name, decoded_claim_id, metadata)
d = self.get_new_address()
d.addCallback(send_claim_update)
d.addCallback(self._broadcast_transaction)
return d
def _get_decoded_tx(self, raw_tx):
tx = Transaction(raw_tx)
decoded_tx = {}
decoded_tx['vout'] = []
for output in tx.outputs():
out = {}
out['value'] = Decimal(output[2]) / Decimal(COIN)
decoded_tx['vout'].append(out)
return decoded_tx
def _send_abandon(self, txid, address, amount):
log.info("Abandon %s %s %f" % (txid, address, amount))
cmd = known_commands['abandonclaim']
func = getattr(self.cmd_runner, cmd.name)
d = threads.deferToThread(func, txid, address, amount)
d.addCallback(self._broadcast_transaction)
return d
def _support_claim(self, name, claim_id, amount):
def _send_support(d, a, n, c):
cmd = known_commands['supportclaim']
func = getattr(self.cmd_runner, cmd.name)
d = threads.deferToThread(func, d, a, n, c)
return d
d = self.get_new_address()
d.addCallback(lambda address: _send_support(address, amount, name, claim_id))
d.addCallback(self._broadcast_transaction)
return d
def _broadcast_transaction(self, raw_tx):
def _log_tx(r):
log.info("Broadcast tx: %s", r)
return r
cmd = known_commands['broadcast']
func = getattr(self.cmd_runner, cmd.name)
d = threads.deferToThread(func, raw_tx)
d.addCallback(_log_tx)
d.addCallback(lambda r: r if len(r) == 64 else defer.fail(Exception("Transaction rejected")))
d.addCallback(self._save_wallet)
return d
def _do_send_many(self, payments_to_send):
log.warning("Doing send many. payments to send: %s", str(payments_to_send))
cmd = known_commands['paytomanyandsend']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, payments_to_send.iteritems())
def _get_value_for_name(self, name):
cmd = known_commands['getvalueforname']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, name)
def get_claims_from_tx(self, txid):
cmd = known_commands['getclaimsfromtx']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, txid)
def _get_balance_for_address(self, address):
return defer.succeed(Decimal(self.wallet.get_addr_received(address))/COIN)
def get_nametrie(self):
cmd = known_commands['getclaimtrie']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func)
def _get_history(self):
cmd = known_commands['history']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func)
def _address_is_mine(self, address):
cmd = known_commands['ismine']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, address)
def get_pub_keys(self, wallet):
cmd = known_commands['getpubkeys']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, wallet)
def _save_wallet(self, val):
d = threads.deferToThread(self.wallet.storage.write)
d.addCallback(lambda _: val)
return d
class LBRYcrdAddressRequester(object):
implements([IRequestCreator])
def __init__(self, wallet):
self.wallet = wallet
self._protocols = []
######### IRequestCreator #########
def send_next_request(self, peer, protocol):
if not protocol in self._protocols:
r = ClientRequest({'lbrycrd_address': True}, 'lbrycrd_address')
d = protocol.add_request(r)
d.addCallback(self._handle_address_response, peer, r, protocol)
d.addErrback(self._request_failed, peer)
self._protocols.append(protocol)
return defer.succeed(True)
else:
return defer.succeed(False)
######### internal calls #########
def _handle_address_response(self, response_dict, peer, request, protocol):
assert request.response_identifier in response_dict, \
"Expected %s in dict but did not get it" % request.response_identifier
assert protocol in self._protocols, "Responding protocol is not in our list of protocols"
address = response_dict[request.response_identifier]
self.wallet.update_peer_address(peer, address)
def _request_failed(self, err, peer):
if not err.check(RequestCanceledError):
log.warning("A peer failed to send a valid public key response. Error: %s, peer: %s",
err.getErrorMessage(), str(peer))
return err
class LBRYcrdAddressQueryHandlerFactory(object):
implements(IQueryHandlerFactory)
def __init__(self, wallet):
self.wallet = wallet
######### IQueryHandlerFactory #########
def build_query_handler(self):
q_h = LBRYcrdAddressQueryHandler(self.wallet)
return q_h
def get_primary_query_identifier(self):
return 'lbrycrd_address'
def get_description(self):
return "LBRYcrd Address - an address for receiving payments via LBRYcrd"
class LBRYcrdAddressQueryHandler(object):
implements(IQueryHandler)
def __init__(self, wallet):
self.wallet = wallet
self.query_identifiers = ['lbrycrd_address']
self.address = None
self.peer = None
######### IQueryHandler #########
def register_with_request_handler(self, request_handler, peer):
self.peer = peer
request_handler.register_query_handler(self, self.query_identifiers)
def handle_queries(self, queries):
def create_response(address):
self.address = address
fields = {'lbrycrd_address': address}
return fields
if self.query_identifiers[0] in queries:
d = self.wallet.get_new_address_for_peer(self.peer)
d.addCallback(create_response)
return d
if self.address is None:
log.warning("Expected a request for an address, but did not receive one")
return defer.fail(Failure(ValueError("Expected but did not receive an address request")))
else:
return defer.succeed({})
|
|
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import main
from mock import Mock
from tests.utests.voltha.core.flow_helpers import FlowHelpers
from voltha.core import logical_device_agent
from voltha.core.flow_decomposer import *
from voltha.core.logical_device_agent import LogicalDeviceAgent
from voltha.protos.device_pb2 import Device, Port
from voltha.protos.logical_device_pb2 import LogicalDevice, LogicalPort
from voltha.protos.openflow_13_pb2 import Flows, FlowGroups
class test_multipon_logical_device_agent(FlowHelpers):
def setup_mock_registry(self):
registry = Mock()
logical_device_agent.registry = registry
def setUp(self):
self.setup_mock_registry()
self.flows = Flows(items=[])
self.groups = FlowGroups(items=[])
self.ld_ports = [
LogicalPort(
id='0',
device_id='olt',
device_port_no=0,
root_port=True,
ofp_port=ofp.ofp_port(port_no=0)
),
LogicalPort(
id='101',
device_id='onu1',
device_port_no=0,
ofp_port=ofp.ofp_port(port_no=101)
),
LogicalPort(
id='201',
device_id='onu2',
device_port_no=0,
ofp_port=ofp.ofp_port(port_no=201)
)
]
self.devices = {
'olt': Device(
id='olt', root=True, parent_id='id'),
'onu1': Device(
id='onu1', parent_id='olt', parent_port_no=1, vlan=101),
'onu2': Device(
id='onu2', parent_id='olt', parent_port_no=2, vlan=201),
}
self.ports = {
'olt': [
Port(port_no=0, type=Port.ETHERNET_NNI, device_id='olt'),
Port(port_no=1, type=Port.PON_OLT, device_id='olt',
peers=[Port.PeerPort(device_id='onu1', port_no=1)]),
Port(port_no=2, type=Port.PON_OLT, device_id='olt',
peers=[Port.PeerPort(device_id='onu2', port_no=1)])
],
'onu1': [
Port(port_no=0, type=Port.ETHERNET_UNI, device_id='onu1'),
Port(port_no=1, type=Port.PON_ONU, device_id='onu1',
peers=[Port.PeerPort(device_id='olt', port_no=1)])
],
'onu2': [
Port(port_no=0, type=Port.ETHERNET_UNI, device_id='onu2'),
Port(port_no=1, type=Port.PON_ONU, device_id='onu2',
peers=[Port.PeerPort(device_id='olt', port_no=2)])
],
}
self.device_flows = {
'olt': Flows(),
'onu1': Flows(),
'onu2': Flows()
}
self.device_groups = {
'olt': FlowGroups(),
'onu1': FlowGroups(),
'onu2': FlowGroups()
}
self.ld = LogicalDevice(id='id', root_device_id='olt')
self.root_proxy = Mock()
def get_devices(path):
if path == '':
return self.devices.values()
if path.endswith('/ports'):
return self.ports[path[:-len('/ports')]]
elif path.find('/') == -1:
return self.devices[path]
else:
raise Exception(
'Nothing to yield for path /devices/{}'.format(path))
def update_devices(path, data):
if path.endswith('/flows'):
self.device_flows[path[:-len('/flows')]] = data
elif path.endswith('/flow_groups'):
self.device_groups[path[:-len('/flow_groups')]] = data
else:
raise NotImplementedError(
'not handling path /devices/{}'.format(path))
self.root_proxy.get = lambda p: \
get_devices(p[len('/devices/'):]) if p.startswith('/devices') \
else None
self.root_proxy.update = lambda p, d: \
update_devices(p[len('/devices/'):], d) \
if p.startswith('/devices') \
else None
self.ld_proxy = Mock()
self.ld_proxy.get = lambda p: \
self.ld_ports if p == '/ports' else (
self.ld if p == '/' else None
)
self.flows_proxy = Mock()
self.flows_proxy.get = lambda _: self.flows # always '/' path
def update_flows(_, flows): # always '/' path
self.flows = flows
self.flows_proxy.update = update_flows
self.groups_proxy = Mock()
self.groups_proxy.get = lambda _: self.groups # always '/' path
def update_groups(_, groups): # always '/' path
self.groups = groups
self.groups_proxy.update = update_groups
self.core = Mock()
self.core.get_proxy = lambda path: \
self.root_proxy if path == '/' else (
self.ld_proxy if path.endswith('id') else (
self.flows_proxy if path.endswith('flows') else
self.groups_proxy
)
)
self.lda = LogicalDeviceAgent(self.core, self.ld)
def test_init(self):
pass # really just tests the setUp method
# ~~~~~~~~~~~~~~~~~~~~ DEFAULT RULES AND ROUTES ~~~~~~~~~~~~~~~~~~~~~~~~~~~
def test_default_rules(self):
rules = self.lda.get_all_default_rules()
# no default olt downstream and no default group for each of 3 devs
self.assertEqual(len(rules['olt'][0]), 0)
self.assertEqual(len(rules['olt'][1]), 0)
self.assertEqual(len(rules['onu1'][0]), 3)
self.assertEqual(len(rules['onu1'][1]), 0)
self.assertEqual(len(rules['onu2'][0]), 3)
self.assertEqual(len(rules['onu2'][1]), 0)
def test_routes(self):
self.lda.get_all_default_rules() # this will prepare the _routes
routes = self.lda._routes
self.assertEqual(len(routes), 4)
self.assertEqual(set(routes.keys()),
set([(0, 101), (0, 201), (101, 0), (201, 0)]))
# verify all routes
route = routes[(0, 101)]
self.assertEqual(len(route), 2)
self.assertEqual(route[0].device, self.devices['olt'])
self.assertEqual(route[0].ingress_port, self.ports['olt'][0])
self.assertEqual(route[0].egress_port, self.ports['olt'][1])
self.assertEqual(route[1].device, self.devices['onu1'])
self.assertEqual(route[1].ingress_port, self.ports['onu1'][1])
self.assertEqual(route[1].egress_port, self.ports['onu1'][0])
route = routes[(0, 201)]
self.assertEqual(len(route), 2)
self.assertEqual(route[0].device, self.devices['olt'])
self.assertEqual(route[0].ingress_port, self.ports['olt'][0])
self.assertEqual(route[0].egress_port, self.ports['olt'][2])
self.assertEqual(route[1].device, self.devices['onu2'])
self.assertEqual(route[1].ingress_port, self.ports['onu2'][1])
self.assertEqual(route[1].egress_port, self.ports['onu2'][0])
route = routes[(101, 0)]
self.assertEqual(len(route), 2)
self.assertEqual(route[0].device, self.devices['onu1'])
self.assertEqual(route[0].ingress_port, self.ports['onu1'][0])
self.assertEqual(route[0].egress_port, self.ports['onu1'][1])
self.assertEqual(route[1].device, self.devices['olt'])
self.assertEqual(route[1].ingress_port, self.ports['olt'][1])
self.assertEqual(route[1].egress_port, self.ports['olt'][0])
route = routes[(201, 0)]
self.assertEqual(len(route), 2)
self.assertEqual(route[0].device, self.devices['onu2'])
self.assertEqual(route[0].ingress_port, self.ports['onu2'][0])
self.assertEqual(route[0].egress_port, self.ports['onu2'][1])
self.assertEqual(route[1].device, self.devices['olt'])
self.assertEqual(route[1].ingress_port, self.ports['olt'][2])
self.assertEqual(route[1].egress_port, self.ports['olt'][0])
# ~~~~~~~~~~~~~~~~~~~~~~~~~~ FLOW DECOMP TESTS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def test_eapol_flow_decomp_case(self):
self.lda.update_flow_table(mk_simple_flow_mod(
priority=1000,
match_fields=[in_port(201), eth_type(0x888e)],
actions=[output(ofp.OFPP_CONTROLLER)],
meter_id=1,
metadata=32
))
self.lda._flow_table_updated(self.flows)
self.assertEqual(len(self.device_flows['olt'].items), 1)
self.assertEqual(len(self.device_flows['onu1'].items), 3)
self.assertEqual(len(self.device_flows['onu2'].items), 3)
self.assertEqual(len(self.device_groups['olt'].items), 0)
self.assertEqual(len(self.device_groups['onu1'].items), 0)
self.assertEqual(len(self.device_groups['onu2'].items), 0)
self.assertFlowsEqual(self.device_flows['olt'].items[0], mk_flow_stat(
priority=1000,
match_fields=[in_port(2), eth_type(0x888e)],
actions=[
output(2147483645)
],
meter_id=1,
metadata=32
))
def test_multicast_group_with_one_subscriber(self):
self.lda.update_group_table(mk_multicast_group_mod(
group_id=2,
buckets=[
ofp.ofp_bucket(actions=[
pop_vlan(),
output(201)
]),
]
))
self.lda.update_flow_table(mk_simple_flow_mod(
priority=1000,
match_fields=[
in_port(0),
eth_type(0x800),
vlan_vid(4096 + 140),
ipv4_dst(0xe60a0a0a)
],
actions=[group(2)]
))
self.lda._flow_table_updated(self.flows)
self.assertEqual(len(self.device_flows['olt'].items), 1)
self.assertEqual(len(self.device_flows['onu1'].items), 3)
self.assertEqual(len(self.device_flows['onu2'].items), 4)
self.assertEqual(len(self.device_groups['olt'].items), 0)
self.assertEqual(len(self.device_groups['onu1'].items), 0)
self.assertEqual(len(self.device_groups['onu2'].items), 0)
self.assertFlowsEqual(self.device_flows['olt'].items[0], mk_flow_stat(
priority=1000,
match_fields=[in_port(0), eth_type(0x800), vlan_vid(4096 + 140),
ipv4_dst(0xe60a0a0a)],
actions=[
pop_vlan(),
output(1)
]
))
self.assertFlowsEqual(self.device_flows['onu2'].items[3], mk_flow_stat(
priority=1000,
match_fields=[in_port(1), eth_type(0x800), ipv4_dst(0xe60a0a0a)],
actions=[
output(0)
]
))
def test_multicast_group_with_no_subscribers(self):
self.lda.update_group_table(mk_multicast_group_mod(
group_id=2,
buckets=[] # No subscribers
))
self.lda.update_flow_table(mk_simple_flow_mod(
priority=1000,
match_fields=[
in_port(0),
eth_type(0x800),
vlan_vid(4096 + 140),
ipv4_dst(0xe60a0a0a)
],
actions=[group(2)]
))
self.lda._flow_table_updated(self.flows)
self.assertEqual(len(self.device_flows['olt'].items), 0)
self.assertEqual(len(self.device_flows['onu1'].items), 3)
self.assertEqual(len(self.device_flows['onu2'].items), 3)
self.assertEqual(len(self.device_flows['onu2'].items), 3)
self.assertEqual(len(self.device_groups['olt'].items), 0)
self.assertEqual(len(self.device_groups['onu1'].items), 0)
self.assertEqual(len(self.device_groups['onu2'].items), 0)
self.assertFlowNotInFlows(mk_flow_stat(
priority=1000,
match_fields=[in_port(0), eth_type(0x800), vlan_vid(4096 + 140),
ipv4_dst(0xe60a0a0a)],
actions=[
pop_vlan(),
output(2)
]
), self.device_flows['olt'])
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ COMPLEX TESTS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def test_complex_flow_table_decomposition(self):
# Various controller-bound rules
for _in_port in (101, 201):
self.lda.update_flow_table(mk_simple_flow_mod(
priority=2000,
match_fields=[in_port(_in_port), eth_type(0x888e)],
actions=[
push_vlan(0x8100),
set_field(vlan_vid(4096 + 4000)),
output(ofp.OFPP_CONTROLLER)
]
))
self.lda.update_flow_table(mk_simple_flow_mod(
priority=1000,
match_fields=[in_port(_in_port), eth_type(0x800), ip_proto(2)],
actions=[output(ofp.OFPP_CONTROLLER)]
))
self.lda.update_flow_table(mk_simple_flow_mod(
priority=1000,
match_fields=[in_port(_in_port), eth_type(0x800), ip_proto(17),
udp_src(68), udp_dst(67)],
actions=[output(ofp.OFPP_CONTROLLER)]
))
# Multicast channels
mcast_setup = (
(1, 0xe4010101, ()),
(2, 0xe4010102, (101,)),
(3, 0xe4010103, (201,)),
(4, 0xe4010104, (101, 201)),
)
for group_id, mcast_addr, ports in mcast_setup:
self.lda.update_group_table(mk_multicast_group_mod(
group_id=group_id,
buckets=[
ofp.ofp_bucket(actions=[
pop_vlan(),
output(port)
]) for port in ports
]))
self.lda.update_flow_table(mk_simple_flow_mod(
priority=1000,
match_fields=[
in_port(0),
eth_type(0x800),
vlan_vid(4096 + 140),
ipv4_dst(mcast_addr)
],
actions=[
group(group_id)
]
))
# Unicast channels for each subscriber
for port, c_vid in ((101, 101), (201, 201)):
# Downstream flow 1 for nni to pon
self.lda.update_flow_table(mk_simple_flow_mod(
priority=500,
match_fields=[
in_port(0),
vlan_vid(4096 + 1000)
],
actions=[pop_vlan()],
next_table_id=1
))
# Downstream flow 2
self.lda.update_flow_table(mk_simple_flow_mod(
priority=500,
match_fields=[in_port(0), vlan_vid(4096 + c_vid)],
actions=[set_field(vlan_vid(4096 + 0)), output(port)]
))
# upstream flow 1 for the 0-tagged case
self.lda.update_flow_table(mk_simple_flow_mod(
priority=500,
match_fields=[in_port(port), vlan_vid(4096 + 0)],
actions=[set_field(vlan_vid(4096 + c_vid))],
next_table_id=1
))
# ... and for the untagged case
self.lda.update_flow_table(mk_simple_flow_mod(
priority=500,
match_fields=[in_port(port), vlan_vid(0)],
actions=[push_vlan(0x8100), set_field(vlan_vid(4096 + c_vid))],
next_table_id=1
))
# Upstream flow 2 for s-tag
self.lda.update_flow_table(mk_simple_flow_mod(
priority=500,
match_fields=[in_port(port), vlan_vid(4096 + c_vid)],
actions=[
push_vlan(0x8100),
set_field(vlan_vid(4096 + 1000)),
output(0)
]
))
self.assertEqual(len(self.flows.items), 19)
self.assertEqual(len(self.groups.items), 4)
# trigger flow table decomposition
self.lda._flow_table_updated(self.flows)
# now check device level flows
self.assertEqual(len(self.device_flows['olt'].items), 10)
self.assertEqual(len(self.device_flows['onu1'].items), 5)
self.assertEqual(len(self.device_flows['onu2'].items), 5)
self.assertEqual(len(self.device_groups['olt'].items), 0)
self.assertEqual(len(self.device_groups['onu1'].items), 0)
self.assertEqual(len(self.device_groups['onu2'].items), 0)
# Flows installed on the OLT
self.assertFlowsEqual(self.device_flows['olt'].items[0], mk_flow_stat(
priority=2000,
match_fields=[in_port(1), eth_type(0x888e)],
actions=[push_vlan(0x8100), set_field(vlan_vid(4096 + 4000)),
output(2147483645)]
))
self.assertFlowsEqual(self.device_flows['olt'].items[1], mk_flow_stat(
priority=1000,
match_fields=[in_port(1), eth_type(0x800), ip_proto(2)],
actions=[output(2147483645)]
))
self.assertFlowsEqual(self.device_flows['olt'].items[2], mk_flow_stat(
priority=1000,
match_fields=[in_port(1), eth_type(0x800), ip_proto(17),
udp_src(68), udp_dst(67)],
actions=[output(2147483645)]
))
self.assertFlowsEqual(self.device_flows['olt'].items[3], mk_flow_stat(
priority=2000,
match_fields=[in_port(2), eth_type(0x888e)],
actions=[push_vlan(0x8100), set_field(vlan_vid(4096 + 4000)),
output(2147483645)]
))
self.assertFlowsEqual(self.device_flows['olt'].items[4], mk_flow_stat(
priority=1000,
match_fields=[in_port(2), eth_type(0x800), ip_proto(2)],
actions=[output(2147483645)]
))
self.assertFlowsEqual(self.device_flows['olt'].items[5], mk_flow_stat(
priority=1000,
match_fields=[in_port(2), eth_type(0x800), ip_proto(17),
udp_src(68), udp_dst(67)],
actions=[output(2147483645)]
))
self.assertFlowsEqual(self.device_flows['olt'].items[6], mk_flow_stat(
priority=1000,
match_fields=[in_port(0), eth_type(0x800), vlan_vid(4096 + 140),
ipv4_dst(0xE4010102)],
actions=[pop_vlan(), output(1)]
))
self.assertFlowsEqual(self.device_flows['olt'].items[9], mk_flow_stat(
priority=500,
match_fields=[in_port(0), vlan_vid(4096 + 1000)],
actions=[pop_vlan(), output(2)]
))
# Flows installed on the ONU1
self.assertFlowsEqual(self.device_flows['onu1'].items[0], mk_flow_stat(
priority=500,
match_fields=[in_port(0), vlan_vid(4096 + 0)],
actions=[
set_field(vlan_vid(4096 + 101)), output(1)]
))
self.assertFlowsEqual(self.device_flows['onu1'].items[2], mk_flow_stat(
priority=500,
match_fields=[in_port(1), vlan_vid(4096 + 101)],
actions=[set_field(vlan_vid(4096 + 0)), output(0)]
))
self.assertFlowsEqual(self.device_flows['onu1'].items[1], mk_flow_stat(
priority=500,
match_fields=[in_port(0), vlan_vid(0)],
actions=[push_vlan(0x8100), set_field(vlan_vid(4096 + 101)),
output(1)]
))
# Flows installed on the ONU2
self.assertFlowsEqual(self.device_flows['onu2'].items[0], mk_flow_stat(
priority=500,
match_fields=[in_port(0), vlan_vid(4096 + 0)],
actions=[
set_field(vlan_vid(4096 + 201)), output(1)]
))
self.assertFlowsEqual(self.device_flows['onu2'].items[2], mk_flow_stat(
priority=500,
match_fields=[in_port(1), vlan_vid(4096 + 201)],
actions=[set_field(vlan_vid(4096 + 0)), output(0)]
))
self.assertFlowsEqual(self.device_flows['onu2'].items[1], mk_flow_stat(
priority=500,
match_fields=[in_port(0), vlan_vid(0)],
actions=[push_vlan(0x8100), set_field(vlan_vid(4096 + 201)),
output(1)]
))
if __name__ == '__main__':
main()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ConstantOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class ConstantTest(test.TestCase):
def _testCpu(self, x):
np_ans = np.array(x)
with self.test_session(use_gpu=False):
tf_ans = ops.convert_to_tensor(x).eval()
dtype = dtypes_lib.as_dtype(np_ans.dtype)
if dtype.is_floating or dtype.is_complex:
self.assertAllClose(np_ans, tf_ans)
else:
self.assertAllEqual(np_ans, tf_ans)
def _testGpu(self, x):
np_ans = np.array(x)
with self.test_session(use_gpu=True):
tf_ans = ops.convert_to_tensor(x).eval()
dtype = dtypes_lib.as_dtype(np_ans.dtype)
if dtype.is_floating or dtype.is_complex:
self.assertAllClose(np_ans, tf_ans)
else:
self.assertAllEqual(np_ans, tf_ans)
def _testAll(self, x):
self._testCpu(x)
self._testGpu(x)
def testBFloat16(self):
bfloat16 = dtypes_lib.bfloat16.as_numpy_dtype
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(bfloat16))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(bfloat16))
self._testAll(np.empty((2, 0, 5)).astype(bfloat16))
def testHalf(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float16))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float16))
self._testAll(np.empty((2, 0, 5)).astype(np.float16))
def testFloat(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float32))
self._testAll(np.empty((2, 0, 5)).astype(np.float32))
def testDouble(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float64))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float64))
self._testAll(np.empty((2, 0, 5)).astype(np.float64))
def testInt32(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int32))
self._testAll((100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(
np.int32))
self._testAll(np.empty((2, 0, 5)).astype(np.int32))
def testInt64(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int64))
self._testAll((100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(
np.int64))
self._testAll(np.empty((2, 0, 5)).astype(np.int64))
def testComplex64(self):
self._testAll(
np.complex(1, 2) *
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.complex64))
self._testAll(
np.complex(1, 2) *
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.complex64))
self._testAll(np.empty((2, 0, 5)).astype(np.complex64))
def testComplex128(self):
self._testAll(
np.complex(1, 2) *
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.complex128))
self._testAll(
np.complex(1, 2) *
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.complex128))
self._testAll(np.empty((2, 0, 5)).astype(np.complex128))
def testString(self):
self._testCpu(
np.array([compat.as_bytes(str(x)) for x in np.arange(-15, 15)]).reshape(
[2, 3, 5]))
self._testCpu(np.empty((2, 0, 5)).astype(np.str_))
def testVariant(self):
# TODO(ebrevdo): Re-enable use_gpu=True once non-DMA Variant
# copying between CPU and GPU is supported.
with self.test_session(use_gpu=False):
variant_tensor = tensor_pb2.TensorProto(
dtype=dtypes_lib.variant.as_datatype_enum,
tensor_shape=tensor_shape.TensorShape([]).as_proto(),
variant_val=[
tensor_pb2.VariantTensorDataProto(
# Match registration in variant_op_registry.cc
type_name=b"int",
metadata=np.array(1, dtype=np.int32).tobytes())
])
const = constant_op.constant(variant_tensor)
const_value = const.op.get_attr("value")
# Ensure we stored the tensor proto properly.
self.assertProtoEquals(variant_tensor, const_value)
# Smoke test -- ensure this executes without trouble.
# Right now, non-numpy-compatible objects cannot be returned from a
# session.run call; similarly, objects that can't be converted to
# native numpy types cannot be passed to ops.convert_to_tensor.
# TODO(ebrevdo): Add registration mechanism for
# ops.convert_to_tensor and for session.run output.
logging_const_op = logging_ops.Print(
const, [const],
message="Variant storing an int, decoded const value:").op
logging_const_op.run()
def testStringWithNulls(self):
with self.test_session():
val = ops.convert_to_tensor(b"\0\0\0\0").eval()
self.assertEqual(len(val), 4)
self.assertEqual(val, b"\0\0\0\0")
with self.test_session():
val = ops.convert_to_tensor(b"xx\0xx").eval()
self.assertEqual(len(val), 5)
self.assertAllEqual(val, b"xx\0xx")
nested = [[b"\0\0\0\0", b"xx\0xx"], [b"\0_\0_\0_\0", b"\0"]]
with self.test_session():
val = ops.convert_to_tensor(nested).eval()
# NOTE(mrry): Do not use assertAllEqual, because it converts nested to a
# numpy array, which loses the null terminators.
self.assertEqual(val.tolist(), nested)
def testExplicitShapeNumPy(self):
with ops.Graph().as_default():
c = constant_op.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32),
shape=[2, 3, 5])
self.assertEqual(c.get_shape(), [2, 3, 5])
@test_util.assert_no_new_pyobjects_executing_eagerly
def testEagerMemory(self):
"""Tests PyObject refs are managed correctly when executing eagerly."""
constant_op.constant([[1.]])
def testImplicitShapeNumPy(self):
with ops.Graph().as_default():
c = constant_op.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32))
self.assertEqual(c.get_shape(), [2, 3, 5])
def testExplicitShapeList(self):
with ops.Graph().as_default():
c = constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[7])
self.assertEqual(c.get_shape(), [7])
def testImplicitShapeList(self):
with ops.Graph().as_default():
c = constant_op.constant([1, 2, 3, 4, 5, 6, 7])
self.assertEqual(c.get_shape(), [7])
def testExplicitShapeNumber(self):
with ops.Graph().as_default():
c = constant_op.constant(1, shape=[1])
self.assertEqual(c.get_shape(), [1])
def testImplicitShapeNumber(self):
with ops.Graph().as_default():
c = constant_op.constant(1)
self.assertEqual(c.get_shape(), [])
def testShapeInconsistent(self):
with ops.Graph().as_default():
c = constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[10])
self.assertEqual(c.get_shape(), [10])
# pylint: disable=g-long-lambda
def testShapeWrong(self):
with ops.Graph().as_default():
with self.assertRaisesWithPredicateMatch(
ValueError,
lambda e: ("Too many elements provided. Needed at most 5, "
"but received 7" == str(e))):
constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[5])
# pylint: enable=g-long-lambda
# TODO(b/35396543): Temporarily disable: suspicion that
# this is causing test timeouts.
def _testTooLargeConstant(self):
with ops.Graph().as_default():
large_array = np.zeros((512, 1024, 1024), dtype=np.float32)
with self.assertRaisesRegexp(
ValueError,
"Cannot create a tensor proto whose content is larger than 2GB."):
c = constant_op.constant(large_array)
# TODO(b/35396543): Temporarily disable: suspicion that
# this is causing test timeouts.
def _testTooLargeGraph(self):
with ops.Graph().as_default() as g:
large_array = np.zeros((256, 1024, 1024), dtype=np.float32)
c = constant_op.constant(large_array)
d = constant_op.constant(large_array)
with self.assertRaisesRegexp(ValueError,
"GraphDef cannot be larger than 2GB."):
g.as_graph_def()
def testSparseValuesRaiseErrors(self):
with self.assertRaisesRegexp(ValueError,
"setting an array element with a sequence"):
c = constant_op.constant([[1, 2], [3]], dtype=dtypes_lib.int32)
with self.assertRaisesRegexp(ValueError, "must be a dense"):
c = constant_op.constant([[1, 2], [3]])
with self.assertRaisesRegexp(ValueError, "must be a dense"):
c = constant_op.constant([[1, 2], [3], [4, 5]])
class AsTensorTest(test.TestCase):
def testAsTensorForTensorInput(self):
with ops.Graph().as_default():
t = constant_op.constant(10.0)
x = ops.convert_to_tensor(t)
self.assertIs(t, x)
def testAsTensorForNonTensorInput(self):
with ops.Graph().as_default():
x = ops.convert_to_tensor(10.0)
self.assertTrue(isinstance(x, ops.Tensor))
def testAsTensorForShapeInput(self):
with self.test_session():
x = ops.convert_to_tensor(tensor_shape.TensorShape([]))
self.assertEqual(dtypes_lib.int32, x.dtype)
self.assertAllEqual([], x.eval())
x = ops.convert_to_tensor(tensor_shape.TensorShape([1, 2, 3]))
self.assertEqual(dtypes_lib.int32, x.dtype)
self.assertAllEqual([1, 2, 3], x.eval())
x = ops.convert_to_tensor(tensor_shape.TensorShape([2**31-1, 2, 3]))
self.assertEqual(dtypes_lib.int32, x.dtype)
self.assertAllEqual([2**31-1, 2, 3], x.eval())
x = ops.convert_to_tensor(tensor_shape.TensorShape([2**31-1, 2, 3]),
dtype=dtypes_lib.int32)
self.assertEqual(dtypes_lib.int32, x.dtype)
self.assertAllEqual([2**31-1, 2, 3], x.eval())
x = ops.convert_to_tensor(tensor_shape.TensorShape([2**31, 2, 3]))
self.assertEqual(dtypes_lib.int64, x.dtype)
self.assertAllEqual([2**31, 2, 3], x.eval())
x = ops.convert_to_tensor(tensor_shape.TensorShape([2**31, 2, 3]),
dtype=dtypes_lib.int64)
self.assertEqual(dtypes_lib.int64, x.dtype)
self.assertAllEqual([2**31, 2, 3], x.eval())
with self.assertRaisesRegexp(
ValueError, "a dimension is too large .2147483648."):
x = ops.convert_to_tensor(tensor_shape.TensorShape([2**31, 2, 3]),
dtype=dtypes_lib.int32)
x = ops.convert_to_tensor(
tensor_shape.TensorShape([1, 2, 3]), dtype=dtypes_lib.int64)
self.assertEqual(dtypes_lib.int64, x.dtype)
self.assertAllEqual([1, 2, 3], x.eval())
x = array_ops.reshape(
array_ops.zeros([6]), tensor_shape.TensorShape([2, 3]))
self.assertAllEqual([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], x.eval())
with self.assertRaisesRegexp(ValueError, "partially known"):
ops.convert_to_tensor(tensor_shape.TensorShape(None))
with self.assertRaisesRegexp(ValueError, "partially known"):
ops.convert_to_tensor(tensor_shape.TensorShape([1, None, 64]))
with self.assertRaises(TypeError):
ops.convert_to_tensor(
tensor_shape.TensorShape([1, 2, 3]), dtype=dtypes_lib.float32)
def testAsTensorForDimensionInput(self):
with self.test_session():
x = ops.convert_to_tensor(tensor_shape.TensorShape([1, 2, 3])[1])
self.assertEqual(dtypes_lib.int32, x.dtype)
self.assertAllEqual(2, x.eval())
x = ops.convert_to_tensor(
tensor_shape.TensorShape([1, 2, 3])[1], dtype=dtypes_lib.int64)
self.assertEqual(dtypes_lib.int64, x.dtype)
self.assertAllEqual(2, x.eval())
with self.assertRaisesRegexp(ValueError, "unknown Dimension"):
ops.convert_to_tensor(tensor_shape.TensorShape(None)[1])
with self.assertRaisesRegexp(ValueError, "unknown Dimension"):
ops.convert_to_tensor(tensor_shape.TensorShape([1, None, 64])[1])
with self.assertRaises(TypeError):
ops.convert_to_tensor(
tensor_shape.TensorShape([1, 2, 3])[1], dtype=dtypes_lib.float32)
class IdentityOpTest(test.TestCase):
def testIdTensor(self):
with ops.Graph().as_default():
x = constant_op.constant(2.0, shape=[6], name="input")
id_op = array_ops.identity(x, name="id")
self.assertTrue(isinstance(id_op.op.inputs[0], ops.Tensor))
self.assertProtoEquals("name: 'id' op: 'Identity' input: 'input' "
"attr { key: 'T' value { type: DT_FLOAT } }",
id_op.op.node_def)
class ZerosTest(test.TestCase):
def _Zeros(self, shape):
with self.test_session():
ret = array_ops.zeros(shape)
self.assertEqual(shape, ret.get_shape())
return ret.eval()
def testConst(self):
self.assertTrue(
np.array_equal(self._Zeros([2, 3]), np.array([[0] * 3] * 2)))
def testScalar(self):
self.assertEqual(0, self._Zeros([]))
self.assertEqual(0, self._Zeros(()))
with self.test_session():
scalar = array_ops.zeros(constant_op.constant([], dtype=dtypes_lib.int32))
self.assertEqual(0, scalar.eval())
def testDynamicSizes(self):
np_ans = np.array([[0] * 3] * 2)
with self.test_session():
# Creates a tensor of 2 x 3.
d = array_ops.fill([2, 3], 12., name="fill")
# Constructs a tensor of zeros of the same dimensions as "d".
z = array_ops.zeros(array_ops.shape(d))
out = z.eval()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, d)
self.assertShapeEqual(np_ans, z)
def testDtype(self):
with self.test_session():
d = array_ops.fill([2, 3], 12., name="fill")
self.assertEqual(d.get_shape(), [2, 3])
# Test default type for both constant size and dynamic size
z = array_ops.zeros([2, 3])
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.zeros([2, 3]))
z = array_ops.zeros(array_ops.shape(d))
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.zeros([2, 3]))
# Test explicit type control
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64,
dtypes_lib.bool, dtypes_lib.string
]:
z = array_ops.zeros([2, 3], dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
z_value = z.eval()
self.assertFalse(np.any(z_value))
self.assertEqual((2, 3), z_value.shape)
z = array_ops.zeros(array_ops.shape(d), dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
z_value = z.eval()
self.assertFalse(np.any(z_value))
self.assertEqual((2, 3), z_value.shape)
class ZerosLikeTest(test.TestCase):
def _compareZeros(self, dtype, fully_defined_shape, use_gpu):
with self.test_session(use_gpu=use_gpu):
# Creates a tensor of non-zero values with shape 2 x 3.
# NOTE(kearnes): The default numpy dtype associated with tf.string is
# np.object (and can't be changed without breaking a lot things), which
# causes a TypeError in constant_op.constant below. Here we catch the
# special case of tf.string and set the numpy dtype appropriately.
if dtype == dtypes_lib.string:
numpy_dtype = np.string_
else:
numpy_dtype = dtype.as_numpy_dtype
if fully_defined_shape:
d = constant_op.constant(
np.ones((2, 3), dtype=numpy_dtype), dtype=dtype)
else:
d = array_ops.placeholder(dtype=dtype)
# Constructs a tensor of zeros of the same dimensions and type as "d".
z_var = array_ops.zeros_like(d)
# Test that the type is correct
self.assertEqual(z_var.dtype, dtype)
# Test that the shape is correct
if fully_defined_shape:
self.assertEqual([2, 3], z_var.get_shape())
# Test that the value is correct
feed_dict = {}
if not fully_defined_shape:
feed_dict[d] = np.ones((2, 3), dtype=numpy_dtype)
z_value = z_var.eval(feed_dict=feed_dict)
self.assertFalse(np.any(z_value))
self.assertEqual((2, 3), z_value.shape)
def testZerosLikeCPU(self):
for dtype in [
dtypes_lib.half, dtypes_lib.float32, dtypes_lib.float64,
dtypes_lib.int8, dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.uint16,
dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.bool,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.string
]:
self._compareZeros(dtype, fully_defined_shape=False, use_gpu=False)
self._compareZeros(dtype, fully_defined_shape=True, use_gpu=False)
def testZerosLikeGPU(self):
for dtype in [
dtypes_lib.half, dtypes_lib.float32, dtypes_lib.float64,
dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.complex64,
dtypes_lib.complex128, dtypes_lib.bool
]:
self._compareZeros(dtype, fully_defined_shape=False, use_gpu=True)
self._compareZeros(dtype, fully_defined_shape=True, use_gpu=True)
def testZerosLikePartialShape(self):
d = array_ops.placeholder(dtypes_lib.float32, shape=[None, 4, None])
z = array_ops.zeros_like(d)
self.assertEqual(d.get_shape().as_list(), z.get_shape().as_list())
def testZerosLikeDtype(self):
# Make sure zeros_like works even for dtypes that cannot be cast between
with self.test_session():
shape = (3, 5)
dtypes = np.float32, np.complex64
for in_type in dtypes:
x = np.arange(15).astype(in_type).reshape(*shape)
for out_type in dtypes:
y = array_ops.zeros_like(x, dtype=out_type).eval()
self.assertEqual(y.dtype, out_type)
self.assertEqual(y.shape, shape)
self.assertAllEqual(y, np.zeros(shape, dtype=out_type))
def testZerosLikeVariant(self):
# TODO(ebrevdo): Re-enable use_gpu=True once non-DMA Variant
# copying between CPU and GPU is supported AND we register a
# ZerosLike callback for GPU for Variant storing primitive types
# in variant_op_registry.cc.
with self.test_session(use_gpu=False):
variant_tensor = tensor_pb2.TensorProto(
dtype=dtypes_lib.variant.as_datatype_enum,
tensor_shape=tensor_shape.TensorShape([]).as_proto(),
variant_val=[
tensor_pb2.VariantTensorDataProto(
# Match registration in variant_op_registry.cc
type_name=b"int",
metadata=np.array(1, dtype=np.int32).tobytes())
])
const_variant = constant_op.constant(variant_tensor)
zeros_like = array_ops.zeros_like(const_variant)
zeros_like_op = logging_ops.Print(
zeros_like, [const_variant, zeros_like],
message="Variant storing an int, input and output of zeros_like:").op
# Smoke test -- ensure this executes without trouble.
# Right now, non-numpy-compatible objects cannot be returned from a
# session.run call; similarly, objects that can't be converted to
# native numpy types cannot be passed to ops.convert_to_tensor.
# TODO(ebrevdo): Add registration mechanism for
# ops.convert_to_tensor and for session.run output.
zeros_like_op.run()
class OnesTest(test.TestCase):
def _Ones(self, shape):
with self.test_session():
ret = array_ops.ones(shape)
self.assertEqual(shape, ret.get_shape())
return ret.eval()
def testConst(self):
self.assertTrue(np.array_equal(self._Ones([2, 3]), np.array([[1] * 3] * 2)))
def testScalar(self):
self.assertEqual(1, self._Ones([]))
self.assertEqual(1, self._Ones(()))
with self.test_session():
scalar = array_ops.ones(constant_op.constant([], dtype=dtypes_lib.int32))
self.assertEqual(1, scalar.eval())
def testDynamicSizes(self):
np_ans = np.array([[1] * 3] * 2)
with self.test_session():
# Creates a tensor of 2 x 3.
d = array_ops.fill([2, 3], 12., name="fill")
# Constructs a tensor of ones of the same dimensions as "d".
z = array_ops.ones(array_ops.shape(d))
out = z.eval()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, d)
self.assertShapeEqual(np_ans, z)
def testAutoPack(self):
with self.test_session():
h = array_ops.placeholder(dtypes_lib.int32, shape=[])
w = array_ops.placeholder(dtypes_lib.int32, shape=[])
z = array_ops.ones([h, w])
out = z.eval(feed_dict={h: 4, w: 16})
self.assertAllEqual(out, np.array([[1] * 16] * 4))
def testDtype(self):
with self.test_session():
d = array_ops.fill([2, 3], 12., name="fill")
self.assertEqual(d.get_shape(), [2, 3])
# Test default type for both constant size and dynamic size
z = array_ops.ones([2, 3])
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.ones([2, 3]))
z = array_ops.ones(array_ops.shape(d))
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.ones([2, 3]))
# Test explicit type control
for dtype in (dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128,
dtypes_lib.int64, dtypes_lib.bool):
z = array_ops.ones([2, 3], dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.ones([2, 3]))
z = array_ops.ones(array_ops.shape(d), dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.ones([2, 3]))
class OnesLikeTest(test.TestCase):
def testOnesLike(self):
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int8,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.uint16, dtypes_lib.int32,
dtypes_lib.int64, dtypes_lib.bool, dtypes_lib.complex64,
dtypes_lib.complex128
]:
numpy_dtype = dtype.as_numpy_dtype
with self.test_session():
# Creates a tensor of non-zero values with shape 2 x 3.
d = constant_op.constant(
np.ones(
(2, 3), dtype=numpy_dtype), dtype=dtype)
# Constructs a tensor of zeros of the same dimensions and type as "d".
z_var = array_ops.ones_like(d)
# Test that the type is correct
self.assertEqual(z_var.dtype, dtype)
z_value = z_var.eval()
# Test that the value is correct
self.assertTrue(np.array_equal(z_value, np.array([[1] * 3] * 2)))
self.assertEqual([2, 3], z_var.get_shape())
def testOnesLikePartialShape(self):
d = array_ops.placeholder(dtypes_lib.float32, shape=[None, 4, None])
z = array_ops.ones_like(d)
self.assertEqual(d.get_shape().as_list(), z.get_shape().as_list())
class FillTest(test.TestCase):
def _compare(self, dims, val, np_ans, use_gpu):
with self.test_session(use_gpu=use_gpu):
tf_ans = array_ops.fill(dims, val, name="fill")
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
# Fill does not set the shape.
# self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, dims, val, np_ans):
self._compare(dims, val, np_ans, False)
self._compare(dims, val, np_ans, True)
def testFillFloat(self):
np_ans = np.array([[3.1415] * 3] * 2).astype(np.float32)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillDouble(self):
np_ans = np.array([[3.1415] * 3] * 2).astype(np.float64)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillInt32(self):
np_ans = np.array([[42] * 3] * 2).astype(np.int32)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillInt64(self):
np_ans = np.array([[-42] * 3] * 2).astype(np.int64)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillComplex64(self):
np_ans = np.array([[0.15] * 3] * 2).astype(np.complex64)
self._compare([2, 3], np_ans[0][0], np_ans, use_gpu=False)
def testFillComplex128(self):
np_ans = np.array([[0.15] * 3] * 2).astype(np.complex128)
self._compare([2, 3], np_ans[0][0], np_ans, use_gpu=False)
def testFillString(self):
np_ans = np.array([[b"yolo"] * 3] * 2)
with self.test_session(use_gpu=False):
tf_ans = array_ops.fill([2, 3], np_ans[0][0], name="fill").eval()
self.assertAllEqual(np_ans, tf_ans)
def testFillNegative(self):
with self.test_session():
for shape in (-1,), (2, -1), (-1, 2), (-2), (-3):
with self.assertRaises(ValueError):
array_ops.fill(shape, 7)
# Using a placeholder so this won't be caught in static analysis.
dims = array_ops.placeholder(dtypes_lib.int32)
fill_t = array_ops.fill(dims, 3.0)
for shape in (-1,), (2, -1), (-1, 2), (-2), (-3):
with self.assertRaises(errors_impl.InvalidArgumentError):
fill_t.eval({dims: shape})
def testShapeFunctionEdgeCases(self):
# Non-vector dimensions.
with self.assertRaises(ValueError):
array_ops.fill([[0, 1], [2, 3]], 1.0)
# Non-scalar value.
with self.assertRaises(ValueError):
array_ops.fill([3, 2], [1.0, 2.0])
# Partial dimension information.
f = array_ops.fill(array_ops.placeholder(dtypes_lib.int32, shape=(4,)), 3.0)
self.assertEqual([None, None, None, None], f.get_shape().as_list())
f = array_ops.fill(
[array_ops.placeholder(
dtypes_lib.int32, shape=()), 17], 1.0)
self.assertEqual([None, 17], f.get_shape().as_list())
def testGradient(self):
with self.test_session():
in_v = constant_op.constant(5.0)
out_shape = [3, 2]
out_filled = array_ops.fill(out_shape, in_v)
err = gradient_checker.compute_gradient_error(in_v, [], out_filled,
out_shape)
self.assertLess(err, 1e-3)
class PlaceholderTest(test.TestCase):
def testDtype(self):
with self.test_session():
p = array_ops.placeholder(dtypes_lib.float32, shape=(10, 10), name="p")
p_identity = array_ops.identity(p)
feed_array = np.random.rand(10, 10)
self.assertAllClose(
p_identity.eval(feed_dict={p: feed_array}), feed_array)
with self.assertRaisesOpError(
"must feed a value for placeholder tensor 'p' with dtype float"):
p_identity.eval()
def testShape(self):
with self.test_session():
p = array_ops.placeholder(dtypes_lib.float32, shape=(10, 10), name="p")
p_identity = array_ops.identity(p)
feed_array = np.random.rand(10, 10)
self.assertAllClose(
p_identity.eval(feed_dict={p: feed_array}), feed_array)
with self.assertRaisesOpError(
"must feed a value for placeholder tensor 'p' with dtype float and "
r"shape \[10,10\]"):
p_identity.eval()
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Cannot feed value of shape" in str(e)):
p_identity.eval(feed_dict={p: feed_array[:5, :5]})
def testUnknownShape(self):
with self.test_session():
p = array_ops.placeholder(dtypes_lib.float32, shape=None, name="p")
p_identity = array_ops.identity(p)
# can feed anything
feed_array = np.random.rand(10, 3)
self.assertAllClose(
p_identity.eval(feed_dict={p: feed_array}), feed_array)
feed_array = np.random.rand(4, 2, 5)
self.assertAllClose(
p_identity.eval(feed_dict={p: feed_array}), feed_array)
def testScalarShape(self):
with self.test_session():
p = array_ops.placeholder(dtypes_lib.float32, shape=[], name="p")
p_identity = array_ops.identity(p)
self.assertAllClose(p_identity.eval(feed_dict={p: 5}), 5)
def testPartialShape(self):
with self.test_session():
p = array_ops.placeholder(dtypes_lib.float32, shape=[None, 3], name="p")
p_identity = array_ops.identity(p)
feed_array = np.random.rand(10, 3)
self.assertAllClose(
p_identity.eval(feed_dict={p: feed_array}), feed_array)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Cannot feed value of shape" in str(e)):
p_identity.eval(feed_dict={p: feed_array[:5, :2]})
def testPartialShapeWhenNotFed(self):
with self.test_session():
p = array_ops.placeholder(dtypes_lib.float32, shape=[None, 3], name="p")
p_identity = array_ops.identity(p)
# Should trigger an operator error, not a shape error.
with self.assertRaisesOpError(
"must feed a value for placeholder tensor 'p' with dtype float"):
p_identity.eval()
def testControlDependency(self):
with self.test_session():
p = array_ops.placeholder(dtypes_lib.int32, shape=[], name="p")
with ops.control_dependencies([p]):
c = constant_op.constant(5, dtypes_lib.int32)
d = math_ops.multiply(p, c)
val = np.array(2).astype(np.int)
self.assertEqual(10, d.eval(feed_dict={p: val}))
def testBadShape(self):
with self.assertRaises(ValueError):
array_ops.placeholder(dtypes_lib.float32, shape=(-1, 10))
def testTensorStr(self):
a = array_ops.placeholder(dtypes_lib.float32, shape=None, name="a")
self.assertEqual("<tf.Tensor 'a:0' shape=<unknown> dtype=float32>", repr(a))
b = array_ops.placeholder(dtypes_lib.int32, shape=(32, 40), name="b")
self.assertEqual("<tf.Tensor 'b:0' shape=(32, 40) dtype=int32>", repr(b))
c = array_ops.placeholder(dtypes_lib.qint32, shape=(32, None, 2), name="c")
self.assertEqual("<tf.Tensor 'c:0' shape=(32, ?, 2) dtype=qint32>", repr(c))
def testOldGraph(self):
# Load graph generated from earlier version of TF where
# placeholder shape was not set.
#
# a = tf.placeholder(tf.float32)
# b = a + 1.0
#
# Older graph's default shape is 'shape {}', not 'shape {
# unknown_rank: true }'
graph = """
node {
name: "Placeholder"
op: "Placeholder"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "shape"
value {
shape {
}
}
}
}
node {
name: "add/y"
op: "Const"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
}
float_val: 1.0
}
}
}
}
node {
name: "add"
op: "Add"
input: "Placeholder"
input: "add/y"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
}
versions {
producer: 21
}
"""
gdef = graph_pb2.GraphDef()
text_format.Merge(graph, gdef)
with self.test_session():
p, ret = importer.import_graph_def(
gdef, return_elements=["Placeholder:0", "add:0"])
# Feed in a vector of two elements. Since the producer version
# of 21, a shape of {} is interpreted as "any shape". If
# producer version were 22, then we'd get a shape mismatch
# error.
self.assertAllEqual([2.0, 3.0], ret.eval(feed_dict={p: [1.0, 2.0]}))
class PlaceholderWithDefaultTest(test.TestCase):
def testFullShape(self):
with self.test_session(force_gpu=test_util.is_gpu_available()):
p = array_ops.placeholder_with_default([[2, 2], [2, 2]], shape=[2, 2])
a = array_ops.identity(p)
self.assertAllEqual([[2, 2], [2, 2]], a.eval())
self.assertAllEqual(
[[3, 3], [3, 3]], a.eval(feed_dict={p: [[3, 3], [3, 3]]}))
with self.assertRaises(ValueError):
a.eval(feed_dict={p: [[6, 6, 6], [6, 6, 6]]})
def testPartialShape(self):
with self.test_session(force_gpu=test_util.is_gpu_available()):
p = array_ops.placeholder_with_default([1, 2, 3], shape=[None])
a = array_ops.identity(p)
self.assertAllEqual([1, 2, 3], a.eval())
self.assertAllEqual([3, 37], a.eval(feed_dict={p: [3, 37]}))
with self.assertRaises(ValueError):
a.eval(feed_dict={p: [[2, 2], [2, 2]]})
def testNoShape(self):
with self.test_session(force_gpu=test_util.is_gpu_available()):
p = array_ops.placeholder_with_default([17], shape=None)
a = array_ops.identity(p)
self.assertAllEqual([17], a.eval())
self.assertAllEqual([3, 37], a.eval(feed_dict={p: [3, 37]}))
self.assertAllEqual(
[[3, 3], [3, 3]], a.eval(feed_dict={p: [[3, 3], [3, 3]]}))
def testGradient(self):
with self.test_session(force_gpu=test_util.is_gpu_available()):
x = array_ops.placeholder(dtypes_lib.float32, [5, 7])
y = array_ops.placeholder_with_default(x, None)
err = gradient_checker.compute_gradient_error(x, [5, 7], y, [5, 7])
self.assertLess(err, 1e-3)
if __name__ == "__main__":
test.main()
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Logic to update a TensorFlow model graph with quantization operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.contrib import graph_editor
from tensorflow.contrib.quantize.python import common
from tensorflow.contrib.quantize.python import graph_matcher
from tensorflow.contrib.quantize.python import input_to_ops
from tensorflow.contrib.quantize.python import quant_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
# Quantizable operation types that are supported by the quantization rewrite.
_QUANTIZABLE_TYPES = {'Conv2D', 'MatMul', 'DepthwiseConv2dNative'}
# Activations that are supported by the quantization rewrite.
_ACTIVATION_TYPES = {'Relu', 'Relu6'}
def Quantize(graph,
is_training,
weight_bits=8,
activation_bits=8,
ema_decay=0.999,
quant_delay=None,
vars_collection=ops.GraphKeys.GLOBAL_VARIABLES,
scope=None):
"""Updates graph with quantization operations.
Currently we quantize the following tensors:
* Conv/MatMul: Quantize the weights if it matches.
* Activation: Quantize the output if it matches.
* Bypass/Post-activation Bypass: Quantize both input and output
if it matches.
Args:
graph: Graph to modify.
is_training: Whether quantizing training graph or eval graph.
weight_bits: Number of bits to use for quantizing weights.
activation_bits: Number of bits to use for quantizing activations.
ema_decay: (Optional) Float, EMA decay parameter. EMA is used to update
quantization intervals for quantizing activations (see here about EMA:
https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average).
quant_delay: (Optional, default None) Int, count of global steps for which
to delay quantization. This helps weights stabilize at the start of
training.
vars_collection: (Optional) Collection where to store the variables for
quantization interval ends.
scope: The scope to be transformed. If it's not None, only the ops which
are in this scope will be transformed.
Raises:
ValueError: When quantization fails.
"""
if scope and not scope.endswith('/'):
scope += '/'
input_to_ops_map = input_to_ops.InputToOps(graph)
for layer_match in _FindLayersToQuantize(graph):
# Quantize the weights.
context = _GetContextFromOp(layer_match.layer_op)
# If `scope` is given, only quantize it if the consumer of weights
# (the layer op) is in the right scope.
_InsertQuantOp(
context,
'weights_quant',
layer_match.weight_tensor.op, [layer_match.layer_op],
is_training,
moving_avg=False,
ema_decay=ema_decay,
quant_delay=quant_delay,
narrow_range=True,
vars_collection=vars_collection,
bits=weight_bits,
consumer_scope=scope)
# Quantize the activations.
consumer_ops = input_to_ops_map.ConsumerOperations(
layer_match.activation_op)
add_context = context
if layer_match.bypass_op:
add_context = re.search(r'^(.*)/([^/]+)', context).group(1)
# If `scope` is given, only quantize it if the producer of weights
# (usually it's the layer op) is in the right scope.
_InsertQuantOp(
add_context,
'act_quant',
layer_match.activation_op,
consumer_ops,
is_training,
moving_avg=True,
ema_decay=ema_decay,
quant_delay=quant_delay,
vars_collection=vars_collection,
bits=activation_bits,
init_min=0.0,
producer_scope=scope)
# Quantize the inputs and output to the bypass (if it exists). The input to
# the bypass is the bias add, and the output is the activation.
if layer_match.bypass_op is not None:
# If `scope` is given, only quantize it if the both the producer and the
# consumer are in the right scope.
_InsertQuantOp(
context,
'conv_quant',
layer_match.bias_add_op, [layer_match.bypass_op],
is_training,
moving_avg=True,
ema_decay=ema_decay,
quant_delay=quant_delay,
vars_collection=vars_collection,
bits=activation_bits,
producer_scope=scope,
consumer_scope=scope)
# Make sure the op following this isn't an activation. In which case, we
# shouldn't quantize it, since the activation will be Fused into the
# Add at inference time.
consumers = input_to_ops_map.ConsumerOperations(layer_match.bypass_op)
if any([consumer.type in _ACTIVATION_TYPES for consumer in consumers]):
logging.info('Skipping %s, because its followed by an activation.',
layer_match.bypass_op.name)
else:
_InsertQuantOp(
add_context,
'add_quant',
layer_match.bypass_op,
input_to_ops_map.ConsumerOperations(layer_match.bypass_op),
is_training,
moving_avg=True,
ema_decay=ema_decay,
quant_delay=quant_delay,
vars_collection=vars_collection,
bits=activation_bits,
producer_scope=scope,
consumer_scope=scope)
# Quantize bypass ops that occur after the activation.
if layer_match.post_activation_bypass_op is not None:
post_activation_bypass_context = re.search(
r'^(.*)/([^/]+)', layer_match.post_activation_bypass_op.name).group(1)
# If `scope` is given, only quantize it if the producer is in the right
# scope.
# Make sure the op following this isn't an activation. In which case, we
# shouldn't quantize it, since the activation will be Fused into the
# Add at inference time.
consumers = input_to_ops_map.ConsumerOperations(
layer_match.post_activation_bypass_op)
if any([consumer.type in _ACTIVATION_TYPES for consumer in consumers]):
logging.info('Skipping %s, because its followed by an activation.',
layer_match.post_activation_bypass_op.name)
else:
_InsertQuantOp(
post_activation_bypass_context,
'post_activation_bypass_quant',
layer_match.post_activation_bypass_op,
consumers,
is_training,
moving_avg=True,
ema_decay=ema_decay,
quant_delay=quant_delay,
vars_collection=vars_collection,
bits=activation_bits,
producer_scope=scope)
def _FindLayersToQuantize(graph):
"""Matches layers in graph to quantize.
The following patterns get matched. Nodes surrounded by [] will be
optionally matched:
weight|folded_weight
/
conv|fc
|
[post_conv_correction]
|
biasadd|folded_bias
|
[bypass]
|
activation
|
[post_activation_bypass]
Match replacements:
If weight|folded_weight is found, FakeQuant is added afterwards.
If bypass is found, FakeQuant is added before and after.
If activation is found, FakeQuant is added afterwards.
If post_activation_bypass is found, FakeQuant is added afterwards.
Args:
graph: Graph to perform match on.
Returns:
list of _LayerMatches.
"""
input_pattern = graph_matcher.OpTypePattern('*')
weight_var_pattern = graph_matcher.OpTypePattern('Variable|VariableV2')
weight_partition_identity_pattern = graph_matcher.OpTypePattern(
'Identity', inputs=[weight_var_pattern])
weight_partition_concat_pattern = graph_matcher.OpTypePattern(
'ConcatV2', inputs=[weight_partition_identity_pattern, '*', '*'])
weight_identity_pattern = graph_matcher.OpTypePattern(
'Identity',
inputs=[
graph_matcher.OneofPattern([
weight_partition_identity_pattern,
weight_partition_concat_pattern,
weight_var_pattern,
])
])
weight_resource_var_pattern = graph_matcher.OpTypePattern('ReadVariableOp')
folded_weight_pattern = graph_matcher.OpTypePattern('Mul')
# The weights inputs to the layer operation can either be from the Variable or
# the folded weight (Mul).
layer_pattern = graph_matcher.OpTypePattern(
'|'.join(_QUANTIZABLE_TYPES),
inputs=[
input_pattern,
graph_matcher.OneofPattern([
weight_identity_pattern, weight_resource_var_pattern,
folded_weight_pattern
])
],
ordered_inputs=False)
folded_bias_mul_pattern = graph_matcher.OpTypePattern(
'Mul',
inputs=[graph_matcher.OpTypePattern('*'), layer_pattern],
ordered_inputs=False)
post_layer_op_correction_pattern = graph_matcher.OpTypePattern(
'Add',
inputs=[folded_bias_mul_pattern,
graph_matcher.OpTypePattern('*')],
ordered_inputs=False)
folded_bias_add_pattern = graph_matcher.OpTypePattern(
'Add',
inputs=[
post_layer_op_correction_pattern,
graph_matcher.OpTypePattern('*')
],
ordered_inputs=False)
bias_add_pattern = graph_matcher.OpTypePattern(
'Add|BiasAdd', inputs=[layer_pattern, '*'], ordered_inputs=False)
# The bias can come from the bias add or the folded bias add.
bypass_pattern = graph_matcher.OpTypePattern(
'Add',
inputs=[
graph_matcher.OneofPattern(
[bias_add_pattern, folded_bias_add_pattern]), '*'
],
ordered_inputs=False)
# The input to the activation can come from bias add, fold bias add, the
# bypasses.
# TODO(suharshs): We should ideally skip Identity operations instead of
# treating them as an activation.
activation_pattern = graph_matcher.OpTypePattern(
'|'.join(_ACTIVATION_TYPES) + '|Identity',
inputs=[
graph_matcher.OneofPattern([
bias_add_pattern,
folded_bias_add_pattern,
bypass_pattern,
])
])
post_activation_bypass_pattern = graph_matcher.OpTypePattern(
'Add', inputs=['*', activation_pattern], ordered_inputs=False)
# The order of the following matching blocks is very important. Since matches
# aren't guaranteed to be disjoint, we structure matches from largest to
# smallest to guarantee that the largest match always wins. Additionally, we
# ensure that we don't match layers multiple times.
layer_matches = []
# We use matched_layer_set to ensure that layers aren't matched multiple
# times.
matched_layer_set = set()
# First, we match layers that have a post activation bypass. We do this first
# to ensure we don't match only the first part of this layer, missing the
# post activation bypass node.
post_activation_bypass_layer_matcher = graph_matcher.GraphMatcher(
post_activation_bypass_pattern)
for match_result in post_activation_bypass_layer_matcher.match_graph(graph):
layer_op = match_result.get_op(layer_pattern)
weight_tensor = match_result.get_tensor(weight_identity_pattern)
if weight_tensor is None:
weight_tensor = match_result.get_tensor(weight_resource_var_pattern)
if weight_tensor is None:
weight_tensor = match_result.get_tensor(folded_weight_pattern)
activation_op = match_result.get_op(activation_pattern)
bias_add_op = match_result.get_op(bias_add_pattern)
if bias_add_op is None:
bias_add_op = match_result.get_op(folded_bias_add_pattern)
bypass_op = match_result.get_op(bypass_pattern)
post_activation_bypass_op = match_result.get_op(
post_activation_bypass_pattern)
if layer_op not in matched_layer_set:
matched_layer_set.add(layer_op)
layer_matches.append(
_LayerMatch(layer_op, weight_tensor, activation_op, bypass_op,
post_activation_bypass_op, bias_add_op))
# Now, we match the basic layer ending at an activation. We may get duplicate
# matches from above, but we don't add them to layer_matches.
layer_matcher = graph_matcher.GraphMatcher(activation_pattern)
for match_result in layer_matcher.match_graph(graph):
layer_op = match_result.get_op(layer_pattern)
weight_tensor = match_result.get_tensor(weight_identity_pattern)
if weight_tensor is None:
weight_tensor = match_result.get_tensor(weight_resource_var_pattern)
if weight_tensor is None:
weight_tensor = match_result.get_tensor(folded_weight_pattern)
activation_op = match_result.get_op(activation_pattern)
bias_add_op = match_result.get_op(bias_add_pattern)
if bias_add_op is None:
bias_add_op = match_result.get_op(folded_bias_add_pattern)
bypass_op = match_result.get_op(bypass_pattern)
if layer_op not in matched_layer_set:
matched_layer_set.add(layer_op)
layer_matches.append(
_LayerMatch(layer_op, weight_tensor, activation_op, bypass_op, None,
bias_add_op))
# Match the final layer, where there may not be an activation and instead
# the output of the final BiasAdd must be quantized. So we treat the BiasAdd
# as the 'activation_op' in the _LayerMatch, to ensure that it's output is
# quantized.
final_layer_matcher = graph_matcher.GraphMatcher(
graph_matcher.OneofPattern([bias_add_pattern, folded_bias_add_pattern]))
for match_result in final_layer_matcher.match_graph(graph):
layer_op = match_result.get_op(layer_pattern)
weight_tensor = match_result.get_tensor(weight_identity_pattern)
if weight_tensor is None:
weight_tensor = match_result.get_tensor(weight_resource_var_pattern)
if weight_tensor is None:
weight_tensor = match_result.get_tensor(folded_weight_pattern)
activation_op = match_result.get_op(bias_add_pattern)
if activation_op is None:
activation_op = match_result.get_op(folded_bias_add_pattern)
if layer_op not in matched_layer_set:
matched_layer_set.add(layer_op)
layer_matches.append(
_LayerMatch(layer_op, weight_tensor, activation_op, None, None, None))
return layer_matches
def _HasPostActivationBypass(activation_op):
for activation_tensor in activation_op.outputs:
for output_op in activation_tensor.consumers():
if output_op.type == 'Add':
return True
return False
class _LayerMatch(object):
"""Contains all information related to a matched Layer."""
def __init__(self, layer_op, weight_tensor, activation_op, bypass_op,
post_activation_bypass_op, bias_add_op):
self._layer_op = layer_op
self._weight_tensor = weight_tensor
self._activation_op = activation_op
self._bypass_op = bypass_op
self._post_activation_bypass_op = post_activation_bypass_op
self._bias_add_op = bias_add_op
@property
def layer_op(self):
return self._layer_op
@property
def weight_tensor(self):
return self._weight_tensor
@property
def activation_op(self):
return self._activation_op
@property
def bypass_op(self):
return self._bypass_op
@property
def post_activation_bypass_op(self):
return self._post_activation_bypass_op
@property
def bias_add_op(self):
return self._bias_add_op
def _InsertQuantOp(context,
name,
producer,
consumers,
is_training,
moving_avg=True,
init_min=-6.0,
init_max=6.0,
bits=8,
ema_decay=0.999,
quant_delay=None,
vars_collection=ops.GraphKeys.GLOBAL_VARIABLES,
narrow_range=False,
producer_scope=None,
consumer_scope=None):
"""Inserts a quant op between a producer op and (multiple) consumer ops.
Args:
context: Context where producer and consumer operations are nested.
name: Name for the new quantization op within the context.
producer: Producer operation of the pairs where quantization will be
inserted.
consumers: Consumer operations of the pairs.
is_training: Whether quantizing training graph or eval graph.
moving_avg: Specifies whether to use exponential moving average or just
the last value seen.
init_min: Starting minimum value for the new quantization op.
init_max: Starting maximum value for the new quantization op.
bits: Number of bits to use for quantization, must be between 2 and 8.
ema_decay: (Optional) Float, EMA decay parameter. EMA is used to update
quantization intervals for quantizing activations (see here about EMA:
https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average).
quant_delay: (Optional, default None) Int, count of global steps for which
to delay quantization. This helps weights stabilize at the start of
training.
vars_collection: (Optional) Collection where to store the variables for
quantization interval ends.
narrow_range: Whether to use the narrow quantization range
[1; 2^bits - 1] or wide range [0; 2^bits - 1].
producer_scope: The restriction of producer scope. If not None, the new op
will be inserted only when the producer is in this scope.
consumer_scope: The restriction of producer scope. If not None, the new op
will be inserted only when all the consumers are in this scope.
Raises:
ValueError: When producer operation is not directly connected to the
consumer operation.
"""
if producer_scope and not producer.name.startswith(producer_scope):
logging.info(
'_InsertQuantOp ignores context="%s" name="%s" '
'because producer "%s" is not in scope "%s"',
context, name, producer.name, producer_scope)
return
if consumer_scope:
consumers_in_scope = []
for consumer in consumers:
if consumer.name.startswith(consumer_scope):
consumers_in_scope.append(consumer)
else:
logging.info(
'_InsertQuantOp context="%s" name="%s" ignores '
'consumer "%s" because it is not in scope "%s"',
context, name, consumer.name, consumer_scope)
return
consumers = consumers_in_scope
name_prefix = _AddContextToName(context, name)
# This is needed on TPU where name_scope == 'TPUReplicate/loop', and
# name_prefix starts with 'TPUReplicate/loop/'; without dropping it
# variables are created as TPUReplicate/loop/TPUReplicate/loop/..., which
# breaks things later.
name_scope = ops.get_name_scope()
if name_scope:
name_prefix = common.DropStringPrefix(name_prefix, name_scope + '/')
inputs = producer.outputs[0]
# Prevent ops from being quantized multiple times. Bypass ops can sometimes
# overlap between multiple matches, so we need to ensure that we don't
# add duplicate FakeQuant operations.
fake_quant_ops = set([
'FakeQuantWithMinMaxVars',
'FakeQuantWithMinMaxArgs'
])
if fake_quant_ops.intersection(set([c.type for c in inputs.consumers()])):
return
if moving_avg:
quant = (
quant_ops.MovingAvgQuantize(
inputs,
init_min=init_min,
init_max=init_max,
ema_decay=ema_decay,
is_training=is_training,
num_bits=bits,
narrow_range=narrow_range,
vars_collection=vars_collection,
name_prefix=name_prefix))
else:
quant = (
quant_ops.LastValueQuantize(
inputs,
init_min=init_min,
init_max=init_max,
is_training=is_training,
num_bits=bits,
narrow_range=narrow_range,
vars_collection=vars_collection,
name_prefix=name_prefix))
if quant_delay and quant_delay > 0:
activate_quant = math_ops.greater_equal(
common.CreateOrGetQuantizationStep(),
quant_delay,
name=name_prefix + '/activate_quant')
quant = control_flow_ops.cond(
activate_quant,
lambda: quant,
lambda: inputs,
name=name_prefix + '/delayed_quant')
if consumers:
tensors_modified_count = graph_editor.reroute_ts(
[quant], [inputs], can_modify=consumers)
# Some operations can have multiple output tensors going to the same
# consumer. Since consumers is a set, we need to ensure that
# tensors_modified_count is greater than or equal to the length of the set
# of consumers.
if tensors_modified_count < len(consumers):
raise ValueError('No inputs quantized for ops: [%s]' % ', '.join(
[consumer.name for consumer in consumers]))
def _GetContextFromOp(op):
"""Gets the root context name from the op name."""
context_re = re.search(r'^(.*)/([^/]+)', op.name)
if context_re:
return context_re.group(1)
return ''
def _AddContextToName(context, name):
"""Adds the context to the name if it exists."""
if not context:
return name
return context + '/' + name
|
|
# Copyright 2011 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import uuidutils
import six
import webob
from jacket.api.compute.openstack import api_version_request
from jacket.api.compute.openstack.compute.legacy_v2.contrib import admin_actions as \
migrate_server_v2
from jacket.api.compute.openstack.compute import migrate_server as \
migrate_server_v21
from jacket.compute import exception
from jacket.compute import test
from jacket.tests.compute.unit.api.openstack.compute import admin_only_action_common
from jacket.tests.compute.unit.api.openstack import fakes
class MigrateServerTestsV21(admin_only_action_common.CommonTests):
migrate_server = migrate_server_v21
controller_name = 'MigrateServerController'
validation_error = exception.ValidationError
_api_version = '2.1'
disk_over_commit = False
def setUp(self):
super(MigrateServerTestsV21, self).setUp()
self.controller = getattr(self.migrate_server, self.controller_name)()
self.compute_api = self.controller.compute_api
def _fake_controller(*args, **kwargs):
return self.controller
self.stubs.Set(self.migrate_server, self.controller_name,
_fake_controller)
self.mox.StubOutWithMock(self.compute_api, 'get')
def _get_migration_body(self, **kwargs):
return {'os-migrateLive': self._get_params(**kwargs)}
def _get_params(self, **kwargs):
return {'host': kwargs.get('host'),
'block_migration': kwargs.get('block_migration') or False,
'disk_over_commit': self.disk_over_commit}
def test_migrate(self):
method_translations = {'_migrate': 'resize',
'_migrate_live': 'live_migrate'}
body_map = {'_migrate_live': self._get_migration_body(host='hostname')}
args_map = {'_migrate_live': ((False, self.disk_over_commit,
'hostname'), {})}
self._test_actions(['_migrate', '_migrate_live'], body_map=body_map,
method_translations=method_translations,
args_map=args_map)
def test_migrate_none_hostname(self):
method_translations = {'_migrate': 'resize',
'_migrate_live': 'live_migrate'}
body_map = {'_migrate_live': self._get_migration_body(host=None)}
args_map = {'_migrate_live': ((False, self.disk_over_commit, None),
{})}
self._test_actions(['_migrate', '_migrate_live'], body_map=body_map,
method_translations=method_translations,
args_map=args_map)
def test_migrate_with_non_existed_instance(self):
body_map = self._get_migration_body(host='hostname')
self._test_actions_with_non_existed_instance(
['_migrate', '_migrate_live'], body_map=body_map)
def test_migrate_raise_conflict_on_invalid_state(self):
method_translations = {'_migrate': 'resize',
'_migrate_live': 'live_migrate'}
body_map = self._get_migration_body(host='hostname')
args_map = {'_migrate_live': ((False, self.disk_over_commit,
'hostname'), {})}
exception_arg = {'_migrate': 'migrate',
'_migrate_live': 'os-migrateLive'}
self._test_actions_raise_conflict_on_invalid_state(
['_migrate', '_migrate_live'], body_map=body_map,
args_map=args_map, method_translations=method_translations,
exception_args=exception_arg)
def test_actions_with_locked_instance(self):
method_translations = {'_migrate': 'resize',
'_migrate_live': 'live_migrate'}
body_map = {'_migrate_live':
self._get_migration_body(host='hostname')}
args_map = {'_migrate_live': ((False, self.disk_over_commit,
'hostname'), {})}
self._test_actions_with_locked_instance(
['_migrate', '_migrate_live'], body_map=body_map,
args_map=args_map, method_translations=method_translations)
def _test_migrate_exception(self, exc_info, expected_result):
self.mox.StubOutWithMock(self.compute_api, 'resize')
instance = self._stub_instance_get()
self.compute_api.resize(self.context, instance).AndRaise(exc_info)
self.mox.ReplayAll()
self.assertRaises(expected_result,
self.controller._migrate,
self.req, instance['uuid'], {'migrate': None})
def test_migrate_too_many_instances(self):
exc_info = exception.TooManyInstances(overs='', req='', used=0,
allowed=0)
self._test_migrate_exception(exc_info, webob.exc.HTTPForbidden)
def _test_migrate_live_succeeded(self, param):
self.mox.StubOutWithMock(self.compute_api, 'live_migrate')
instance = self._stub_instance_get()
self.compute_api.live_migrate(self.context, instance, False,
self.disk_over_commit, 'hostname')
self.mox.ReplayAll()
res = self.controller._migrate_live(self.req, instance.uuid,
body={'os-migrateLive': param})
# NOTE: on v2.1, http status code is set as wsgi_code of API
# method instead of status_int in a response object.
if self._api_version == '2.1':
status_int = self.controller._migrate_live.wsgi_code
else:
status_int = res.status_int
self.assertEqual(202, status_int)
def test_migrate_live_enabled(self):
param = self._get_params(host='hostname')
self._test_migrate_live_succeeded(param)
def test_migrate_live_enabled_with_string_param(self):
param = {'host': 'hostname',
'block_migration': "False",
'disk_over_commit': "False"}
self._test_migrate_live_succeeded(param)
def test_migrate_live_without_host(self):
body = self._get_migration_body()
del body['os-migrateLive']['host']
self.assertRaises(self.validation_error,
self.controller._migrate_live,
self.req, fakes.FAKE_UUID, body=body)
def test_migrate_live_without_block_migration(self):
body = self._get_migration_body()
del body['os-migrateLive']['block_migration']
self.assertRaises(self.validation_error,
self.controller._migrate_live,
self.req, fakes.FAKE_UUID, body=body)
def test_migrate_live_without_disk_over_commit(self):
body = {'os-migrateLive':
{'host': 'hostname',
'block_migration': False}}
self.assertRaises(self.validation_error,
self.controller._migrate_live,
self.req, fakes.FAKE_UUID, body=body)
def test_migrate_live_with_invalid_block_migration(self):
body = self._get_migration_body(block_migration='foo')
self.assertRaises(self.validation_error,
self.controller._migrate_live,
self.req, fakes.FAKE_UUID, body=body)
def test_migrate_live_with_invalid_disk_over_commit(self):
body = {'os-migrateLive':
{'host': 'hostname',
'block_migration': False,
'disk_over_commit': "foo"}}
self.assertRaises(self.validation_error,
self.controller._migrate_live,
self.req, fakes.FAKE_UUID, body=body)
def test_migrate_live_missing_dict_param(self):
body = self._get_migration_body(host='hostname')
del body['os-migrateLive']['host']
body['os-migrateLive']['dummy'] = 'hostname'
self.assertRaises(self.validation_error,
self.controller._migrate_live,
self.req, fakes.FAKE_UUID, body=body)
def _test_migrate_live_failed_with_exception(
self, fake_exc,
uuid=None,
expected_exc=webob.exc.HTTPBadRequest,
check_response=True):
self.mox.StubOutWithMock(self.compute_api, 'live_migrate')
instance = self._stub_instance_get(uuid=uuid)
self.compute_api.live_migrate(self.context, instance, False,
self.disk_over_commit,
'hostname').AndRaise(fake_exc)
self.mox.ReplayAll()
body = self._get_migration_body(host='hostname')
ex = self.assertRaises(expected_exc,
self.controller._migrate_live,
self.req, instance.uuid, body=body)
if check_response:
self.assertIn(six.text_type(fake_exc), ex.explanation)
def test_migrate_live_compute_service_unavailable(self):
self._test_migrate_live_failed_with_exception(
exception.ComputeServiceUnavailable(host='host'))
def test_migrate_live_invalid_hypervisor_type(self):
self._test_migrate_live_failed_with_exception(
exception.InvalidHypervisorType())
def test_migrate_live_invalid_cpu_info(self):
self._test_migrate_live_failed_with_exception(
exception.InvalidCPUInfo(reason=""))
def test_migrate_live_unable_to_migrate_to_self(self):
uuid = uuidutils.generate_uuid()
self._test_migrate_live_failed_with_exception(
exception.UnableToMigrateToSelf(instance_id=uuid,
host='host'),
uuid=uuid)
def test_migrate_live_destination_hypervisor_too_old(self):
self._test_migrate_live_failed_with_exception(
exception.DestinationHypervisorTooOld())
def test_migrate_live_no_valid_host(self):
self._test_migrate_live_failed_with_exception(
exception.NoValidHost(reason=''))
def test_migrate_live_invalid_local_storage(self):
self._test_migrate_live_failed_with_exception(
exception.InvalidLocalStorage(path='', reason=''))
def test_migrate_live_invalid_shared_storage(self):
self._test_migrate_live_failed_with_exception(
exception.InvalidSharedStorage(path='', reason=''))
def test_migrate_live_hypervisor_unavailable(self):
self._test_migrate_live_failed_with_exception(
exception.HypervisorUnavailable(host=""))
def test_migrate_live_instance_not_active(self):
self._test_migrate_live_failed_with_exception(
exception.InstanceInvalidState(
instance_uuid='', state='', attr='', method=''),
expected_exc=webob.exc.HTTPConflict,
check_response=False)
def test_migrate_live_pre_check_error(self):
self._test_migrate_live_failed_with_exception(
exception.MigrationPreCheckError(reason=''))
def test_migrate_live_migration_with_old_nova_not_safe(self):
self._test_migrate_live_failed_with_exception(
exception.LiveMigrationWithOldNovaNotSafe(server=''))
def test_migrate_live_migration_with_unexpected_error(self):
self._test_migrate_live_failed_with_exception(
exception.MigrationError(reason=''),
expected_exc=webob.exc.HTTPInternalServerError,
check_response=False)
class MigrateServerTestsV2(MigrateServerTestsV21):
migrate_server = migrate_server_v2
controller_name = 'AdminActionsController'
validation_error = webob.exc.HTTPBadRequest
_api_version = '2'
class MigrateServerTestsV225(MigrateServerTestsV21):
# We don't have disk_over_commit in v2.25
disk_over_commit = None
def setUp(self):
super(MigrateServerTestsV225, self).setUp()
self.req.api_version_request = api_version_request.APIVersionRequest(
'2.25')
def _get_params(self, **kwargs):
return {'host': kwargs.get('host'),
'block_migration': kwargs.get('block_migration') or False}
def test_migrate_live_enabled_with_string_param(self):
param = {'host': 'hostname',
'block_migration': "False"}
self._test_migrate_live_succeeded(param)
def test_migrate_live_without_disk_over_commit(self):
pass
def test_migrate_live_with_invalid_disk_over_commit(self):
pass
def test_live_migrate_block_migration_auto(self):
method_translations = {'_migrate_live': 'live_migrate'}
body_map = {'_migrate_live': {'os-migrateLive': {'host': 'hostname',
'block_migration': 'auto'}}}
args_map = {'_migrate_live': ((None, None, 'hostname'), {})}
self._test_actions(['_migrate_live'], body_map=body_map,
method_translations=method_translations,
args_map=args_map)
def test_migrate_live_with_disk_over_commit_raise(self):
body = {'os-migrateLive':
{'host': 'hostname',
'block_migration': 'auto',
'disk_over_commit': False}}
self.assertRaises(self.validation_error,
self.controller._migrate_live,
self.req, fakes.FAKE_UUID, body=body)
def test_migrate_live_migration_with_old_nova_not_supported(self):
self._test_migrate_live_failed_with_exception(
exception.LiveMigrationWithOldNovaNotSupported())
class MigrateServerPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(MigrateServerPolicyEnforcementV21, self).setUp()
self.controller = migrate_server_v21.MigrateServerController()
self.req = fakes.HTTPRequest.blank('')
def test_migrate_policy_failed(self):
rule_name = "os_compute_api:os-migrate-server:migrate"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller._migrate, self.req,
fakes.FAKE_UUID,
body={'migrate': {}})
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_migrate_live_policy_failed(self):
rule_name = "os_compute_api:os-migrate-server:migrate_live"
self.policy.set_rules({rule_name: "project:non_fake"})
body_args = {'os-migrateLive': {'host': 'hostname',
'block_migration': False,
'disk_over_commit': False}}
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller._migrate_live, self.req,
fakes.FAKE_UUID,
body=body_args)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
|
|
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cell phone carrier Emoji symbols data loading and access."""
__author__ = "Markus Scherer"
import os.path
import xml.dom.minidom
import row_cell
class CarrierData(object):
"""One carrier's Emoji symbols data.
Attributes:
all_uni: All Unicode code points, for all of this carrier's symbols.
"""
all_uni = frozenset()
# Each _ranges attribute is a list of range tuples for mapping between
# linear ranges of Unicode code points and corresponding linear, same-length
# ranges of target values.
# Shift-JIS or JIS target values only count valid codes according to the
# encoding scheme.
# See _RangeFromUnicode(), _NumberFromUnicode(), etc.
_uni_to_number_ranges = None
_uni_to_old_number_ranges = None
_uni_to_shift_jis_ranges = None
_uni_to_jis_ranges = None
# Map from Unicode code point hex-digit strings to <e> DOM element nodes
# with symbol data.
_uni_to_elements = {}
def _AllUnicodesFromRanges(self, ranges):
"""Build the all_uni set from a list of range tuples."""
all_uni = set()
for one_range in ranges:
for uni in range(one_range[0], one_range[1] + 1):
all_uni.add("%04X" % uni)
self.all_uni = frozenset(all_uni)
def _CheckRanges(self):
"""Verify that in each range tuple the source and target ranges
have the same length."""
if self._uni_to_number_ranges:
for range in self._uni_to_number_ranges:
assert (range[1] - range[0]) == (range[3] - range[2])
if self._uni_to_old_number_ranges:
for range in self._uni_to_old_number_ranges:
assert (range[1] - range[0]) == (range[3] - range[2])
if self._uni_to_shift_jis_ranges:
for range in self._uni_to_shift_jis_ranges:
# Shift the Shift-JIS codes down to JIS X 0208 and compute the
# linear differences.
shift_jis_start = row_cell.FromShiftJis((range[2] >> 8) - 0x10,
range[2] & 0xff)
shift_jis_end = row_cell.FromShiftJis((range[3] >> 8) - 0x10,
range[3] & 0xff)
assert (range[1] - range[0]) == (shift_jis_end - shift_jis_start)
if self._uni_to_jis_ranges:
for range in self._uni_to_jis_ranges:
jis_start = row_cell.From2022((range[2] >> 8), range[2] & 0xff)
jis_end = row_cell.From2022((range[3] >> 8), range[3] & 0xff)
assert (range[1] - range[0]) == (jis_end - jis_start)
def _ReadXML(self, filename):
self.__doc = xml.dom.minidom.parse(filename)
self.__root = self.__doc.documentElement
for element in self.__root.getElementsByTagName("e"):
self._uni_to_elements[element.getAttribute("unicode")] = element
def SymbolFromUnicode(self, uni):
"""Get carrier data for one Emoji symbol.
Args:
uni: Carrier Unicode PUA code point, as a hex digit string.
Returns:
The Symbol instance corresponding to uni.
"""
symbol = Symbol()
symbol.uni = uni
symbol._element = self._uni_to_elements.get(uni)
symbol._carrier_data = self
if self._uni_to_number_ranges:
symbol.number = _NumberFromUnicode(self._uni_to_number_ranges, uni)
elif symbol._element:
number = symbol._element.getAttribute("number")
if number: symbol.number = int(number)
if self._uni_to_old_number_ranges:
symbol.old_number = _NumberFromUnicode(self._uni_to_old_number_ranges,
uni)
elif symbol._element:
old_number = symbol._element.getAttribute("old_number")
if old_number: symbol.old_number = int(old_number)
if self._uni_to_shift_jis_ranges:
symbol.shift_jis = (
"%04X" % _ShiftJisFromUnicode(self._uni_to_shift_jis_ranges, uni))
elif symbol._element:
shift_jis = symbol._element.getAttribute("shift_jis")
if shift_jis: symbol.shift_jis = shift_jis
if self._uni_to_jis_ranges:
symbol.jis = "%04X" % _JisFromUnicode(self._uni_to_jis_ranges, uni)
elif symbol._element:
jis = symbol._element.getAttribute("jis")
if jis: symbol.jis = jis
if symbol._element:
new_number = symbol._element.getAttribute("new_number")
if new_number: symbol.new_number = int(new_number)
return symbol
def _ImageHTML(self, uni, number):
"""Get HTML for the symbol image, or an empty string.
Called only from Symbol.ImageHTML()."""
return ""
def GetShiftJISLeadBytes(self):
"""Returns a frozenset of Shift-JIS lead bytes for Emoji symbols."""
lead_bytes = set()
if self._uni_to_shift_jis_ranges:
for sj_range in self._uni_to_shift_jis_ranges:
lead_bytes |= set(range(sj_range[2] >> 8, (sj_range[3] >> 8) + 1))
else:
for element in self._uni_to_elements.itervalues():
shift_jis = element.getAttribute("shift_jis")
if shift_jis: lead_bytes.add(int(shift_jis[0:2], 16))
return frozenset(lead_bytes)
def GetJISLeadBytesAsShiftJIS(self):
"""Returns a frozenset of JIS lead bytes in Shift-JIS format."""
lead_bytes = set()
if self._uni_to_jis_ranges:
for jis_range in self._uni_to_jis_ranges:
sjis_start = row_cell.From2022Integer(jis_range[2]).ToShiftJis()
sjis_end = row_cell.From2022Integer(jis_range[3]).ToShiftJis()
lead_bytes |= set(range(sjis_start[0], sjis_end[0] + 1))
else:
for element in self._uni_to_elements.itervalues():
jis = element.getAttribute("jis")
if jis: lead_bytes.add(row_cell.From2022String(jis).ToShiftJis()[0])
return frozenset(lead_bytes)
def _RangeFromUnicode(ranges, uni):
"""Select from a list the range containing the Unicode code point.
Args:
ranges: A list of ranges. Each range pair is a 4-tuple of
(unicode_start, unicode_end, target_start, target_end) integers.
Each range tuple represents a linear mapping between a range of Unicode
code points and a range of numbers/Shift-JIS codes/JIS codes.
In each tuple, the Unicode and target ranges must have the same length.
The _end values are inclusive range boundaries.
uni: A Unicode code point integer.
Returns:
The range tuple where unicode_start <= uni <= unicode_end.
"""
for range in ranges:
if range[0] <= uni <= range[1]: return range
return None
def _NumberFromUnicode(ranges, uni):
"""Map a Unicode code point to a number.
Args:
ranges: A list of ranges. See _RangeFromUnicode().
uni: A Unicode code point (a hex digit string).
Returns:
The number integer corresponding to the
Unicode code point, according to the ranges;
or None if none of the ranges contains the code point.
"""
uni = int(uni, 16)
range = _RangeFromUnicode(ranges, uni)
return range[2] + (uni - range[0])
def _ShiftJisFromUnicode(ranges, uni):
"""Map a Unicode code point to a Shift-JIS code.
In a range of Shift-JIS codes, only valid codes according to the encoding
scheme are counted. For example, after F27E follows F280 because 7F is not
a valid trail byte.
Args:
ranges: A list of ranges. See _RangeFromUnicode().
uni: A Unicode code point (a hex digit string).
Returns:
The Shift-JIS code (integer) corresponding to the
Unicode code point, according to the ranges;
or None if none of the ranges contains the code point.
"""
uni = int(uni, 16)
range = _RangeFromUnicode(ranges, uni)
offset = uni - range[0]
# Shift the Shift-JIS codes down to JIS X 0208 and back up
# so that we get standard row-cell byte values (1..94) and can use RowCell.
rc = row_cell.FromShiftJis((range[2] >> 8) - 0x10, range[2] & 0xff) + offset
(b1, b2) = rc.ToShiftJis()
return ((b1 + 0x10) << 8) | b2
def _JisFromUnicode(ranges, uni):
"""Map a Unicode code point to a JIS X 0208 (ISO-2022-JP) code.
In a range of JIS codes, only valid codes according to the encoding
scheme are counted. For example, after 757E follows 7621.
Args:
ranges: A list of ranges. See _RangeFromUnicode().
uni: A Unicode code point (a hex digit string).
Returns:
The JIS code (integer) corresponding to the
Unicode code point, according to the ranges;
or None if none of the ranges contains the code point.
"""
uni = int(uni, 16)
range = _RangeFromUnicode(ranges, uni)
offset = uni - range[0]
rc = row_cell.From2022(range[2] >> 8, range[2] & 0xff) + offset
(b1, b2) = rc.To2022()
return (b1 << 8) | b2
class Symbol(object):
"""Carrier data for one Emoji symbol."""
__slots__ = ("uni", "number", "old_number", "new_number",
"shift_jis", "jis", "_element", "_carrier_data")
def __init__(self):
"""Carrier Emoji symbol data.
Constructed by CarrierData.SymbolFromUnicode(). Do not instantiate yourself.
Attributes:
uni: Unicode PUA code point, 4..6-hex-digit string
number: Carrier-specific Emoji symbol number
old_number: Carrier-specific Emoji symbol number (old number system)
new_number: Carrier-specific Emoji symbol number (new number system)
shift_jis: Shift-JIS code, 4-hex-digit string
jis: JIS (ISO-2022-JP) code, 4-hex-digit string
"""
self.uni = None
self.number = None
self.old_number = None
self.new_number = None
self.shift_jis = None
self.jis = None
self._element = None # <e> XML element
def GetEnglishName(self):
"""Get the carrier's English name of this Emoji symbol."""
if self._element:
return self._element.getAttribute("name_en")
else:
return ""
def GetJapaneseName(self):
"""Get the carrier's Japanese name of this Emoji symbol."""
if self._element:
return self._element.getAttribute("name_ja")
else:
return ""
def ImageHTML(self):
"""Get HTML for the symbol image, or an empty string."""
return self._carrier_data._ImageHTML(self.uni, self.number)
class _DocomoData(CarrierData):
"""DoCoMo Emoji symbols data."""
_uni_to_number_ranges = [
(0xE63E, 0xE6A5, 1, 104),
(0xE6A6, 0xE6AB, 177, 182),
(0xE6AC, 0xE6AE, 167, 169),
(0xE6AF, 0xE6B0, 183, 184),
(0xE6B1, 0xE6B3, 170, 172),
(0xE6B4, 0xE6B6, 185, 187),
(0xE6B7, 0xE6BA, 173, 176),
(0xE6BB, 0xE6CD, 188, 206),
(0xE6CE, 0xE6EB, 105, 134),
(0xE6EC, 0xE70A, 136, 166),
(0xE70B, 0xE70B, 135, 135),
(0xE70C, 0xE757, 301, 376)]
_uni_to_shift_jis_ranges = [(0xE63E, 0xE757, 0xF89F, 0xF9FC)]
_uni_to_elements = {}
def __init__(self):
# TODO(mscherer): Add argument for root data folder path.
filename = os.path.join(os.path.dirname(__file__),
"..", "data", "docomo", "carrier_data.xml")
self._CheckRanges()
self._AllUnicodesFromRanges(self._uni_to_shift_jis_ranges)
self._ReadXML(filename)
def _ImageHTML(self, uni, number):
"""Get HTML for the symbol image, or an empty string.
Called only from Symbol.ImageHTML()."""
path = "http://www.nttdocomo.co.jp/service/imode/make/content/pictograph/"
if number < 300:
return ("<img src=%sbasic/images/%d.gif width=16 height=16>" %
(path, number))
else:
return ("<img src=%sextention/images/%d.gif width=16 height=16>" %
(path, number - 300))
class _KddiData(CarrierData):
"""KDDI Emoji symbols data."""
_uni_to_shift_jis_ranges = [
(0xE468, 0xE5B4, 0xF640, 0xF7D1),
(0xE5B5, 0xE5CC, 0xF7E5, 0xF7FC),
(0xE5CD, 0xE5DF, 0xF340, 0xF352),
(0xEA80, 0xEAFA, 0xF353, 0xF3CE),
(0xEAFB, 0xEB0D, 0xF7D2, 0xF7E4),
(0xEB0E, 0xEB8E, 0xF3CF, 0xF493)]
_uni_to_jis_ranges = [
(0xE468, 0xE5B4, 0x7521, 0x7853),
(0xE5B5, 0xE5DF, 0x7867, 0x7933),
(0xEA80, 0xEAFA, 0x7934, 0x7A50),
(0xEAFB, 0xEB0D, 0x7854, 0x7866),
(0xEB0E, 0xEB8E, 0x7A51, 0x7B73)]
_uni_to_elements = {}
def __init__(self):
# TODO(mscherer): Add argument for root data folder path.
filename = os.path.join(os.path.dirname(__file__),
"..", "data", "kddi", "carrier_data.xml")
self._CheckRanges()
self._AllUnicodesFromRanges(self._uni_to_shift_jis_ranges)
self._ReadXML(filename)
def _ImageHTML(self, uni, number):
"""Get HTML for the symbol image, or an empty string.
Called only from Symbol.ImageHTML()."""
return ("<img src=http://www001.upp.so-net.ne.jp/hdml/emoji/e/%d.gif>" %
number)
class _SoftbankData(CarrierData):
"""SoftBank Emoji symbols data."""
_uni_to_old_number_ranges = [
(0xE001, 0xE05A, 1, 90),
(0xE101, 0xE15A, 91, 180),
(0xE201, 0xE25A, 181, 270),
(0xE301, 0xE34D, 271, 347),
(0xE401, 0xE44C, 348, 423),
(0xE501, 0xE53E, 424, 485)]
_uni_to_shift_jis_ranges = [
(0xE001, 0xE05A, 0xF941, 0xF99B),
(0xE101, 0xE15A, 0xF741, 0xF79B),
(0xE201, 0xE25A, 0xF7A1, 0xF7FA),
(0xE301, 0xE34D, 0xF9A1, 0xF9ED),
(0xE401, 0xE44C, 0xFB41, 0xFB8D),
(0xE501, 0xE53E, 0xFBA1, 0xFBDE)]
_uni_to_elements = {}
__animated_img = frozenset([
"E101", "E102", "E103", "E104", "E105", "E106", "E107", "E108",
"E10D", "E10F",
"E113", "E115", "E117", "E11B", "E11D", "E12B", "E130",
"E201", "E206", "E219", "E254", "E255", "E256", "E257", "E258",
"E259", "E25A",
"E30C", "E310", "E311", "E312", "E313", "E317", "E31E", "E31F",
"E320", "E325", "E326", "E327", "E328", "E329", "E32E", "E335",
"E336", "E337", "E34B",
"E409", "E40D", "E412", "E417", "E41C", "E41E", "E41F", "E422",
"E423", "E428", "E429", "E42D", "E433", "E437", "E43E", "E440",
"E442", "E447", "E44B",
"E51F", "E538", "E539", "E53A", "E53B", "E53C", "E53D", "E53E"])
def __init__(self):
# TODO(mscherer): Add argument for root data folder path.
filename = os.path.join(os.path.dirname(__file__),
"..", "data", "softbank", "carrier_data.xml")
self._CheckRanges()
self._AllUnicodesFromRanges(self._uni_to_shift_jis_ranges)
self._ReadXML(filename)
def _ImageHTML(self, uni, number):
"""Get HTML for the symbol image, or an empty string.
Called only from Symbol.ImageHTML()."""
return ("<img src=http://creation.mb.softbank.jp/web/img/"
"%s01/%s_20%s.gif>" %
(uni[0:2], uni,
{False: "", True: "_ani"}[uni in self.__animated_img]))
class _GoogleData(CarrierData):
"""Google Emoji symbols data."""
pass
# CarrierData singletons
_DOCOMO_DATA = None
_KDDI_DATA = None
_SOFTBANK_DATA = None
_GOOGLE_DATA = None
def GetDocomoData():
global _DOCOMO_DATA
if not _DOCOMO_DATA: _DOCOMO_DATA = _DocomoData()
return _DOCOMO_DATA
def GetKddiData():
global _KDDI_DATA
if not _KDDI_DATA: _KDDI_DATA = _KddiData()
return _KDDI_DATA
def GetSoftbankData():
global _SOFTBANK_DATA
if not _SOFTBANK_DATA: _SOFTBANK_DATA = _SoftbankData()
return _SOFTBANK_DATA
def GetGoogleData():
global _GOOGLE_DATA
if not _GOOGLE_DATA: _GOOGLE_DATA = _GoogleData()
return _GOOGLE_DATA
|
|
# pylama:ignore=E501
# TODO: Modify all calls to get a Well to use the `wells` method
from unittest import mock
import pytest
from opentrons.legacy_api.containers import load as containers_load
from opentrons.legacy_api.instruments import Pipette
from opentrons.legacy_api.containers.placeable import unpack_location
from opentrons.trackers import pose_tracker
from tests.opentrons.conftest import fuzzy_assert
from tests.opentrons import generate_plate
@pytest.fixture
def local_test_pipette(robot):
trash = containers_load(robot, 'point', '1')
tiprack1 = containers_load(robot, 'tiprack-10ul', '5')
tiprack2 = containers_load(robot, 'tiprack-10ul', '8')
plate = containers_load(robot, '96-flat', '4')
p200 = Pipette(
robot,
ul_per_mm=18.5,
trash_container=trash,
tip_racks=[tiprack1, tiprack2],
max_volume=200,
min_volume=10, # These are variable
mount='left',
channels=1,
name='other-pipette-for-transfer-tests'
)
p200.reset()
p200.calibrate_plunger(top=0, bottom=10, blow_out=12, drop_tip=13)
robot.home()
return trash, tiprack1, tiprack2, plate, p200
@pytest.mark.api1_only
def test_bad_volume_percentage(local_test_pipette):
_, _1, _2, _3, p200 = local_test_pipette
with pytest.raises(RuntimeError):
p200._volume_percentage(-1)
@pytest.mark.api1_only
def test_add_instrument(local_test_pipette, robot):
trash, tiprack1, tiprack2, plate, p200 = local_test_pipette
robot.reset()
Pipette(robot, ul_per_mm=18.5, max_volume=1000, mount='left')
with pytest.raises(RuntimeError):
Pipette(robot,
mount='left',
max_volume=100,
ul_per_mm=10)
@pytest.mark.api1_only
def test_aspirate_zero_volume(local_test_pipette, robot):
trash, tiprack1, tiprack2, plate, p200 = local_test_pipette
assert robot.commands() == []
p200.tip_attached = True
p200.aspirate(0)
assert robot.commands() == ['Aspirating 0.0 uL from ? at 92.5 uL/sec'] # noqa
@pytest.mark.api1_only
def test_get_plunger_position(local_test_pipette, robot):
trash, tiprack1, tiprack2, plate, p200 = local_test_pipette
assert p200._get_plunger_position('top') == 0
assert p200._get_plunger_position('bottom') == 10
assert p200._get_plunger_position('blow_out') == 12
assert p200._get_plunger_position('drop_tip') == 13
p200.plunger_positions['drop_tip'] = None
with pytest.raises(RuntimeError):
p200._get_plunger_position('drop_tip')
with pytest.raises(RuntimeError):
p200._get_plunger_position('roll_out')
@pytest.mark.api1_only
def test_deprecated_axis_call(local_test_pipette, robot):
trash, tiprack1, tiprack2, plate, p200 = local_test_pipette
import warnings
warnings.filterwarnings('error')
# Check that user warning occurs when axis is called
with pytest.raises(UserWarning):
Pipette(robot, axis='a')
# Check that the warning is still valid when max_volume is also used
with pytest.raises(UserWarning):
Pipette(robot, axis='a', max_volume=300)
warnings.filterwarnings('default')
@pytest.mark.api1_only
def test_get_instruments_by_name(local_test_pipette, robot):
trash, tiprack1, tiprack2, plate, p200 = local_test_pipette
p1000 = Pipette(
robot,
ul_per_mm=18.5,
trash_container=trash,
tip_racks=[tiprack1],
max_volume=1000,
min_volume=10, # These are variable
mount='right',
name='p1000',
channels=1,
aspirate_speed=300,
dispense_speed=500
)
result = list(robot.get_instruments('p1000'))
assert result == [('right', p1000)]
@pytest.mark.api1_only
def test_placeables_reference(local_test_pipette):
trash, tiprack1, tiprack2, plate, p200 = local_test_pipette
p200.tip_attached = True
p200.aspirate(100, plate[0])
p200.dispense(100, plate[0])
p200.aspirate(100, plate[20])
p200.aspirate(100, plate[1])
expected = [
plate[0],
plate[20],
plate[1]
]
assert p200.placeables == expected
@pytest.mark.api1_only
def test_unpack_location(local_test_pipette):
trash, tiprack1, tiprack2, plate, p200 = local_test_pipette
# TODO: remove when new labware system is promoted to production (it
# TODO: should not include the `unpack_location` magic
location = (plate[0], (1, 0, -1))
res = unpack_location(location)
assert res == (plate[0], (1, 0, -1))
res = unpack_location(plate[0])
assert res == (plate[0], plate[0].from_center(x=0, y=0, z=1))
@pytest.mark.api1_only
def test_aspirate_invalid_max_volume(local_test_pipette):
trash, tiprack1, tiprack2, plate, p200 = local_test_pipette
p200.tip_attached = True
with pytest.raises(RuntimeWarning):
p200.aspirate(500)
@pytest.mark.api1_only
def test_volume_percentage(local_test_pipette, robot):
trash, tiprack1, tiprack2, plate, p200 = local_test_pipette
with pytest.raises(RuntimeError):
p200._volume_percentage(-1)
with pytest.raises(RuntimeError):
p200._volume_percentage(300)
assert p200._volume_percentage(100) == 0.5
assert not robot.get_warnings()
p200._volume_percentage(p200.min_volume / 2)
assert len(robot.get_warnings()) == 1
@pytest.mark.api1_only
def test_add_tip(local_test_pipette, robot):
"""
This deals with z accrual behavior during tip add/remove, when +/- get
flipped in pose tracking logic
"""
trash, tiprack1, tiprack2, plate, p200 = local_test_pipette
prior_position = pose_tracker.absolute(robot.poses, p200)
p200._add_tip(42)
p200._remove_tip(42)
new_position = pose_tracker.absolute(robot.poses, p200)
assert (new_position == prior_position).all()
@pytest.mark.api1_only
def test_set_speed(local_test_pipette):
trash, tiprack1, tiprack2, plate, p200 = local_test_pipette
p200.set_speed(aspirate=100)
assert p200.speeds['aspirate'] == 100
p200.set_speed(dispense=100)
assert p200.speeds['dispense'] == 100
@pytest.mark.api1_only
def test_distribute(local_test_pipette, robot):
trash, tiprack1, tiprack2, plate, p200 = local_test_pipette
p200.reset()
# Setting true instead of calling pick_up_tip because the test is
# currently based on an exact command list. Should make this better.
p200.distribute(
30,
plate[0],
plate[1:9],
new_tip='always'
)
expected = [
['Distributing', '30', 'well A1', 'wells B1...A2'],
['Transferring'],
['Picking up tip'],
['Aspirating', '190', 'well A1'],
['Dispensing', '30', 'well B1'],
['Dispensing', '30', 'well C1'],
['Dispensing', '30', 'well D1'],
['Dispensing', '30', 'well E1'],
['Dispensing', '30', 'well F1'],
['Dispensing', '30', 'well G1'],
['Blow', 'well A1'],
['Drop'],
['Pick'],
['Aspirating', '70', 'well A1'],
['Dispensing', '30', 'well H1'],
['Dispensing', '30', 'well A2'],
['Blow', 'well A1'],
['Drop']
]
fuzzy_assert(robot.commands(), expected=expected)
robot.clear_commands()
p200.reset()
p200.tip_attached = True
p200.distribute(
30,
plate[0],
plate[1:9],
new_tip='never'
)
expected = [
['Distributing', '30', 'well A1', 'wells B1...A2'],
['Transferring'],
['Aspirating', '190', 'well A1'],
['Dispensing', '30', 'well B1'],
['Dispensing', '30', 'well C1'],
['Dispensing', '30', 'well D1'],
['Dispensing', '30', 'well E1'],
['Dispensing', '30', 'well F1'],
['Dispensing', '30', 'well G1'],
['Blow', 'well A1'],
['Aspirating', '70', 'well A1'],
['Dispensing', '30', 'well H1'],
['Dispensing', '30', 'well A2'],
['Blow', 'well A1']
]
fuzzy_assert(robot.commands(), expected=expected)
robot.clear_commands()
p200.reset()
p200.distribute(
30,
plate[0],
plate
)
total_dispenses = 0
for c in robot.commands():
if 'dispensing' in c.lower():
total_dispenses += 1
assert total_dispenses == 96
robot.clear_commands()
p200.reset()
p200.transfer(
30,
plate[0],
plate[1:9],
trash=False
)
expected = [
['Transferring', '30', 'well A1'],
['Pick'],
['Aspirating', '30', 'well A1'],
['Dispensing', '30', 'well B1'],
['Aspirating', '30', 'well A1'],
['Dispensing', '30', 'well C1'],
['Aspirating', '30', 'well A1'],
['Dispensing', '30', 'well D1'],
['Aspirating', '30', 'well A1'],
['Dispensing', '30', 'well E1'],
['Aspirating', '30', 'well A1'],
['Dispensing', '30', 'well F1'],
['Aspirating', '30', 'well A1'],
['Dispensing', '30', 'well G1'],
['Aspirating', '30', 'well A1'],
['Dispensing', '30', 'well H1'],
['Aspirating', '30', 'well A1'],
['Dispensing', '30', 'well A2'],
['Return'],
['Drop']
]
fuzzy_assert(robot.commands(), expected=expected)
robot.clear_commands()
@pytest.mark.api1_only
def test_consolidate(local_test_pipette, robot):
trash, tiprack1, tiprack2, plate, p200 = local_test_pipette
p200.reset()
p200.consolidate(
30,
plate[0:8],
plate['A2'],
new_tip='always'
)
expected = [
['Consolidating', '30'],
['Transferring', '30'],
['Pick'],
['Aspirating', '30', 'Well A1'],
['Aspirating', '30', 'Well B1'],
['Aspirating', '30', 'Well C1'],
['Aspirating', '30', 'Well D1'],
['Aspirating', '30', 'Well E1'],
['Aspirating', '30', 'Well F1'],
['Dispensing', '180', 'Well A2'],
['Drop'],
['Pick'],
['Aspirating', '30', 'Well G1'],
['Aspirating', '30', 'Well H1'],
['Dispensing', '60', 'Well A2'],
['Drop']
]
fuzzy_assert(robot.commands(),
expected=expected)
robot.clear_commands()
p200.reset()
p200.tip_attached = True
p200.consolidate(
30,
plate[0:8],
plate['A2'],
new_tip='never'
)
expected = [
['Consolidating', '30'],
['Transferring', '30'],
['Aspirating', '30', 'Well A1'],
['Aspirating', '30', 'Well B1'],
['Aspirating', '30', 'Well C1'],
['Aspirating', '30', 'Well D1'],
['Aspirating', '30', 'Well E1'],
['Aspirating', '30', 'Well F1'],
['Dispensing', '180', 'Well A2'],
['Aspirating', '30', 'Well G1'],
['Aspirating', '30', 'Well H1'],
['Dispensing', '60', 'Well A2'],
]
fuzzy_assert(robot.commands(), expected=expected)
robot.clear_commands()
p200.reset()
p200.consolidate(
30,
plate,
plate[0]
)
total_aspirates = 0
for c in robot.commands():
if 'aspirating' in c.lower():
total_aspirates += 1
assert total_aspirates == 96
robot.clear_commands()
p200.reset()
p200.transfer(
30,
plate[0:8],
plate['A2']
)
expected = [
['Transferring', '30'],
['Pick'],
['Aspirating', '30', 'Well A1'],
['Dispensing', '30', 'Well A2'],
['Aspirating', '30', 'Well B1'],
['Dispensing', '30', 'Well A2'],
['Aspirating', '30', 'Well C1'],
['Dispensing', '30', 'Well A2'],
['Aspirating', '30', 'Well D1'],
['Dispensing', '30', 'Well A2'],
['Aspirating', '30', 'Well E1'],
['Dispensing', '30', 'Well A2'],
['Aspirating', '30', 'Well F1'],
['Dispensing', '30', 'Well A2'],
['Aspirating', '30', 'Well G1'],
['Dispensing', '30', 'Well A2'],
['Aspirating', '30', 'Well H1'],
['Dispensing', '30', 'Well A2'],
['Drop']
]
fuzzy_assert(robot.commands(), expected=expected)
robot.clear_commands()
@pytest.mark.api1_only
def test_transfer(local_test_pipette, robot):
trash, tiprack1, tiprack2, plate, p200 = local_test_pipette
p200.reset()
p200.transfer(
30,
plate[0:8],
plate[1:9],
new_tip='always',
air_gap=10,
disposal_vol=20, # ignored by transfer
touch_tip=True,
blow_out=True,
trash=True
)
expected = [
['Transferring', '30'],
['pick'],
['aspirating', '30', 'Well A1'],
['air'],
['aspirating', '10'],
['touch'],
['dispensing', '10', 'Well B1'],
['dispensing', '30', 'Well B1'],
['touch'],
['blow'],
['drop'],
['pick'],
['aspirating', '30', 'Well B1'],
['air'],
['aspirating', '10'],
['touch'],
['dispensing', '10', 'Well C1'],
['dispensing', '30', 'Well C1'],
['touch'],
['blow'],
['drop'],
['pick'],
['aspirating', '30', 'Well C1'],
['air'],
['aspirating', '10'],
['touch'],
['dispensing', '10', 'Well D1'],
['dispensing', '30', 'Well D1'],
['touch'],
['blow'],
['drop'],
['pick'],
['aspirating', '30', 'Well D1'],
['air'],
['aspirating', '10'],
['touch'],
['dispensing', '10', 'Well E1'],
['dispensing', '30', 'Well E1'],
['touch'],
['blow'],
['drop'],
['pick'],
['aspirating', '30', 'Well E1'],
['air'],
['aspirating', '10'],
['touch'],
['dispensing', '10', 'Well F1'],
['dispensing', '30', 'Well F1'],
['touch'],
['blow'],
['drop'],
['pick'],
['aspirating', '30', 'Well F1'],
['air'],
['aspirating', '10'],
['touch'],
['dispensing', '10', 'Well G1'],
['dispensing', '30', 'Well G1'],
['touch'],
['blow'],
['drop'],
['pick'],
['aspirating', '30', 'Well G1'],
['air'],
['aspirating', '10'],
['touch'],
['dispensing', '10', 'Well H1'],
['dispensing', '30', 'Well H1'],
['touch'],
['blow'],
['drop'],
['pick'],
['aspirating', '30', 'Well H1'],
['air'],
['aspirating', '10'],
['touch'],
['dispensing', '10', 'Well A2'],
['dispensing', '30', 'Well A2'],
['touch'],
['blow'],
['drop']
]
fuzzy_assert(robot.commands(),
expected=expected)
robot.clear_commands()
with pytest.raises(ValueError, match='air_gap.*'):
p200.transfer(300,
plate[0],
plate[1],
air_gap=300)
with pytest.raises(ValueError, match='air_gap.*'):
p200.transfer(300,
plate[0],
plate[1],
air_gap=10000)
@pytest.mark.api1_only
def test_bad_transfer(local_test_pipette):
trash, tiprack1, tiprack2, plate, p200 = local_test_pipette
p200.reset()
with pytest.raises(ValueError):
p200.transfer(30, plate[0:2], plate[0:3])
with pytest.raises(ValueError):
p200.transfer(30, plate[0:3], plate[0:2])
with pytest.raises(RuntimeError):
p200.transfer([30, 30, 30], plate[0:2], plate[0:2])
with pytest.raises(ValueError):
p200.transfer(30, plate[0], plate[1], new_tip='sometimes')
@pytest.mark.api1_only
def test_divisible_locations(local_test_pipette, robot):
trash, tiprack1, tiprack2, plate, p200 = local_test_pipette
p200.reset()
p200.transfer(
100,
plate[0:4],
plate[0:2]
)
expected = [
['transferring', '100'],
['pick'],
['aspirating', '100', 'Well A1'],
['dispensing', '100', 'Well A1'],
['aspirating', '100', 'Well B1'],
['dispensing', '100', 'Well A1'],
['aspirating', '100', 'Well C1'],
['dispensing', '100', 'Well B1'],
['aspirating', '100', 'Well D1'],
['dispensing', '100', 'Well B1'],
['drop']
]
fuzzy_assert(robot.commands(),
expected=expected)
robot.clear_commands()
p200.reset()
p200.consolidate(
100,
plate[0:4],
plate[0:2]
)
expected = [
['consolidating', '100'],
['transferring', '100'],
['pick'],
['aspirating', '100', 'Well A1'],
['aspirating', '100', 'Well B1'],
['dispensing', '200', 'Well A1'],
['aspirating', '100', 'Well C1'],
['aspirating', '100', 'Well D1'],
['dispensing', '200', 'Well B1'],
['drop']
]
fuzzy_assert(robot.commands(),
expected=expected
)
robot.clear_commands()
p200.reset()
p200.distribute(
100,
plate[0:2],
plate[0:4],
disposal_vol=0
)
expected = [
['distributing', '100'],
['transferring', '100'],
['pick'],
['aspirating', '200', 'Well A1'],
['dispensing', '100', 'Well A1'],
['dispensing', '100', 'Well B1'],
['aspirating', '200', 'Well B1'],
['dispensing', '100', 'Well C1'],
['dispensing', '100', 'Well D1'],
['drop']
]
fuzzy_assert(robot.commands(),
expected=expected
)
robot.clear_commands()
@pytest.mark.api1_only
def test_transfer_mix(local_test_pipette, robot):
trash, tiprack1, tiprack2, plate, p200 = local_test_pipette
p200.reset()
p200.transfer(
200,
plate[0],
plate[1],
mix_before=(1, 10),
mix_after=(1, 10)
)
expected = [
['Transferring', '200'],
['pick'],
['mix', '10'],
['aspirating', 'Well A1'],
['dispensing'],
['aspirating', '200', 'Well A1'],
['dispensing', '200', 'Well B1'],
['mix', '10'],
['aspirating', 'Well B1'],
['dispensing'],
['drop']
]
fuzzy_assert(robot.commands(),
expected=expected)
robot.clear_commands()
@pytest.mark.api1_only
def test_transfer_air_gap(local_test_pipette, robot):
trash, tiprack1, tiprack2, plate, p200 = local_test_pipette
p200.reset()
p200.transfer(
120,
plate[0],
plate[1],
air_gap=20
)
expected = [
['Transferring', '120'],
['pick'],
['aspirating', '120', 'Well A1'],
['air gap'],
['aspirating', '20'],
['dispensing', '20', 'Well B1'],
['dispensing', '120', 'Well B1'],
['drop']
]
fuzzy_assert(robot.commands(),
expected=expected)
robot.clear_commands()
@pytest.mark.api1_only
def test_consolidate_air_gap(local_test_pipette, robot):
trash, tiprack1, tiprack2, plate, p200 = local_test_pipette
p200.reset()
p200.consolidate(
60,
plate[0:2],
plate[2],
air_gap=20
)
expected = [
['consolidating', '60'],
['transferring', '60'],
['pick'],
['aspirating', '60', 'Well A1'],
['aspirating', '60', 'Well B1'],
['dispensing', '120', 'Well C1'],
['drop']
]
fuzzy_assert(robot.commands(),
expected=expected)
robot.clear_commands()
@pytest.mark.api1_only
def test_distribute_air_gap(local_test_pipette, robot):
trash, tiprack1, tiprack2, plate, p200 = local_test_pipette
p200.reset()
p200.distribute(
60,
plate[2],
plate[0:2],
air_gap=20
)
expected = [
['distributing', '60'],
['transferring', '60'],
['pick'],
['aspirating', '130', 'Well C1'],
['air gap'],
['aspirating', '20'],
['dispensing', '20'],
['dispensing', '60', 'Well A1'],
['air gap'],
['aspirating', '20'],
['dispensing', '20'],
['dispensing', '60', 'Well B1'],
['blow', 'Well A1'],
['drop']
]
fuzzy_assert(robot.commands(), expected=expected)
robot.clear_commands()
@pytest.mark.api1_only
def test_distribute_air_gap_and_disposal_vol(local_test_pipette, robot):
trash, tiprack1, tiprack2, plate, p200 = local_test_pipette
p200.reset()
p200.distribute(
60,
plate[2],
plate[0:2],
air_gap=20,
disposal_vol=20
)
expected = [
['distributing', '60'],
['transferring', '60'],
['pick'],
['aspirating', '140', 'Well C1'],
['air gap'],
['aspirating', '20'],
['dispensing', '20', 'Well A1'],
['dispensing', '60', 'Well A1'],
['air gap'],
['aspirating', '20'],
['dispensing', '20', 'Well B1'],
['dispensing', '60', 'Well B1'],
['blow', 'Well A1'],
['drop']
]
fuzzy_assert(robot.commands(),
expected=expected
)
robot.clear_commands()
@pytest.mark.api1_only
def test_consolidate_mix(local_test_pipette, robot):
trash, tiprack1, tiprack2, plate, p200 = local_test_pipette
p200.reset()
p200.consolidate(
200,
plate[0:2],
plate[2],
mix_before=(1, 10),
mix_after=(1, 10)
)
expected = [
['consolidating', '200'],
['transferring', '200'],
['pick'],
['aspirating', '200', 'Well A1'],
['dispensing', '200', 'Well C1'],
['mix', '10'],
['aspirating', 'Well C1'],
['dispensing'],
['aspirating', '200', 'Well B1'],
['dispensing', '200', 'Well C1'],
['mix', '10'],
['aspirating', 'Well C1'],
['dispensing'],
['drop']
]
fuzzy_assert(robot.commands(),
expected=expected
)
robot.clear_commands()
@pytest.mark.api1_only
def test_distribute_mix(local_test_pipette, robot):
trash, tiprack1, tiprack2, plate, p200 = local_test_pipette
p200.reset()
p200.distribute(
200,
plate[0],
plate[1:3],
mix_before=(1, 10),
mix_after=(1, 10)
)
expected = [
['distributing', '200'],
['transferring', '200'],
['pick'],
['mix', '10'],
['aspirating', 'Well A1'],
['dispensing'],
['aspirating', '200', 'Well A1'],
['dispensing', '200', 'Well B1'],
['mix', '10'],
['aspirating', 'Well A1'],
['dispensing'],
['aspirating', '200', 'Well A1'],
['dispensing', '200', 'Well C1'],
['drop']
]
fuzzy_assert(robot.commands(),
expected=expected
)
robot.clear_commands()
@pytest.mark.api1_only
def test_transfer_multichannel(local_test_pipette, robot):
trash, tiprack1, tiprack2, plate, p200 = local_test_pipette
p200.reset()
p200.channels = 8
p200.transfer(
200,
plate.cols[0],
plate.cols[1],
touch_tip=False,
blow_out=False,
trash=False
)
expected = [
['Transferring', '200'],
['pick'],
['aspirating', '200', 'wells A1...H1'],
['dispensing', '200', 'wells A2...H2'],
['return'],
['drop']
]
fuzzy_assert(robot.commands(),
expected=expected
)
robot.clear_commands()
@pytest.mark.api1_only
def test_transfer_single_channel(local_test_pipette, robot):
trash, tiprack1, tiprack2, plate, p200 = local_test_pipette
p200.reset()
p200.channels = 1
p200.transfer(
200,
plate.cols('1', '2'),
plate.cols('3'),
touch_tip=False,
blow_out=False,
trash=False
)
expected = [
['Transferring', '200'],
['pick'],
['aspirating', '200', 'Well A1'],
['dispensing', '200', 'Well A3'],
['aspirating', '200', 'Well B1'],
['dispensing', '200', 'Well A3'],
['aspirating', '200', 'Well C1'],
['dispensing', '200', 'Well B3'],
['aspirating', '200', 'Well D1'],
['dispensing', '200', 'Well B3'],
['aspirating', '200', 'Well E1'],
['dispensing', '200', 'Well C3'],
['aspirating', '200', 'Well F1'],
['dispensing', '200', 'Well C3'],
['aspirating', '200', 'Well G1'],
['dispensing', '200', 'Well D3'],
['aspirating', '200', 'Well H1'],
['dispensing', '200', 'Well D3'],
['aspirating', '200', 'Well A2'],
['dispensing', '200', 'Well E3'],
['aspirating', '200', 'Well B2'],
['dispensing', '200', 'Well E3'],
['aspirating', '200', 'Well C2'],
['dispensing', '200', 'Well F3'],
['aspirating', '200', 'Well D2'],
['dispensing', '200', 'Well F3'],
['aspirating', '200', 'Well E2'],
['dispensing', '200', 'Well G3'],
['aspirating', '200', 'Well F2'],
['dispensing', '200', 'Well G3'],
['aspirating', '200', 'Well G2'],
['dispensing', '200', 'Well H3'],
['aspirating', '200', 'Well H2'],
['dispensing', '200', 'Well H3'],
['return'],
['drop']
]
fuzzy_assert(
robot.commands(),
expected=expected
)
robot.clear_commands()
@pytest.mark.api1_only
def test_touch_tip(local_test_pipette):
trash, tiprack1, tiprack2, plate, p200 = local_test_pipette
p200.pick_up_tip()
old_move = p200.robot.move_to
p200.robot.move_to = mock.Mock()
p200.touch_tip(plate[0])
p200.touch_tip(v_offset=-3)
p200.touch_tip(plate[1], radius=0.5)
expected = [
mock.call(
(plate[0], (3.20, 3.20, 9.50)),
instrument=p200,
strategy='arc'),
mock.call(
(plate[0], (6.40, 3.20, 9.50)),
instrument=p200,
strategy='direct'),
mock.call(
(plate[0], (0.00, 3.20, 9.50)),
instrument=p200,
strategy='direct'),
mock.call(
(plate[0], (3.20, 6.40, 9.50)),
instrument=p200,
strategy='direct'),
mock.call(
(plate[0], (3.20, 0.00, 9.50)),
instrument=p200,
strategy='direct'),
mock.call(
(plate[0], (3.20, 3.20, 7.50)),
instrument=p200,
strategy='direct'),
mock.call(
(plate[0], (6.40, 3.20, 7.50)),
instrument=p200,
strategy='direct'),
mock.call(
(plate[0], (0.00, 3.20, 7.50)),
instrument=p200,
strategy='direct'),
mock.call(
(plate[0], (3.20, 6.40, 7.50)),
instrument=p200,
strategy='direct'),
mock.call(
(plate[0], (3.20, 0.00, 7.50)),
instrument=p200,
strategy='direct'),
mock.call(
(plate[1], (3.20, 3.20, 9.50)),
instrument=p200,
strategy='arc'),
mock.call(
(plate[1], (4.80, 3.20, 9.50)),
instrument=p200,
strategy='direct'),
mock.call(
(plate[1], (1.60, 3.20, 9.50)),
instrument=p200,
strategy='direct'),
mock.call(
(plate[1], (3.20, 4.80, 9.50)),
instrument=p200,
strategy='direct'),
mock.call(
(plate[1], (3.20, 1.60, 9.50)),
instrument=p200,
strategy='direct')
]
assert expected == p200.robot.move_to.mock_calls
p200.robot.move_to = old_move
@pytest.mark.api1_only
def test_mix(local_test_pipette):
trash, tiprack1, tiprack2, plate, p200 = local_test_pipette
# It is necessary to aspirate before it is mocked out
# so that you have liquid
p200.pick_up_tip()
p200.aspirate = mock.Mock()
p200.dispense = mock.Mock()
# scenario I: 3 arguments - repetitions, volume, location
p200.mix(3, 100, plate[1])
dispense_expected_1 = [
mock.call.dispense(100, rate=1.0),
mock.call.dispense(100, rate=1.0),
mock.call.dispense(100, rate=1.0)
]
assert p200.dispense.mock_calls == dispense_expected_1
aspirate_expected_1 = [
mock.call.aspirate(volume=100, location=plate[1], rate=1.0),
mock.call.aspirate(100, rate=1.0),
mock.call.aspirate(100, rate=1.0)
]
assert p200.aspirate.mock_calls == aspirate_expected_1
# scenario II: 2 arguments - repetitions, volume
p200.aspirate.reset_mock()
p200.dispense.reset_mock()
p200.mix(2, 100)
dispense_expected_2 = [
mock.call.dispense(100, rate=1.0),
mock.call.dispense(100, rate=1.0)
]
assert p200.dispense.mock_calls == dispense_expected_2
aspirate_expected_2 = [
mock.call.aspirate(volume=100, location=None, rate=1.0),
mock.call.aspirate(100, rate=1.0)
]
assert p200.aspirate.mock_calls == aspirate_expected_2
# scenario III: 2 arguments - repetitions, location
p200.aspirate.reset_mock()
p200.dispense.reset_mock()
p200.mix(2, plate[2])
dispense_expected_3 = [
mock.call.dispense(200, rate=1.0),
mock.call.dispense(200, rate=1.0)
]
assert p200.dispense.mock_calls == dispense_expected_3
aspirate_expected_3 = [
mock.call.aspirate(volume=200, location=plate[2], rate=1.0),
mock.call.aspirate(200, rate=1.0)
]
assert p200.aspirate.mock_calls == aspirate_expected_3
# scenario IV: 0 arguments
p200.aspirate.reset_mock()
p200.dispense.reset_mock()
p200.mix()
dispense_expected_3 = [
mock.call.dispense(200, rate=1.0)
]
assert p200.dispense.mock_calls == dispense_expected_3
aspirate_expected_3 = [
mock.call.aspirate(volume=200, location=None, rate=1.0),
]
assert p200.aspirate.mock_calls == aspirate_expected_3
@pytest.mark.api1_only
def test_air_gap(local_test_pipette):
trash, tiprack1, tiprack2, plate, p200 = local_test_pipette
p200.pick_up_tip()
p200.aspirate(50, plate[0])
p200.air_gap()
assert p200.current_volume == 200
p200.dispense()
p200.aspirate(50, plate[1])
p200.air_gap(10)
assert p200.current_volume == 60
p200.dispense()
p200.aspirate(50, plate[2])
p200.air_gap(10, 10)
assert p200.current_volume == 60
p200.dispense()
p200.aspirate(50, plate[2])
p200.air_gap(0)
assert p200.current_volume == 50
@pytest.mark.api1_only
def test_pipette_home(local_test_pipette, robot):
trash, tiprack1, tiprack2, plate, p200 = local_test_pipette
p200.home()
assert len(robot.commands()) == 1
@pytest.mark.api1_only
def test_mix_with_named_args(local_test_pipette):
trash, tiprack1, tiprack2, plate, p200 = local_test_pipette
p200.current_volume = 100
p200.pick_up_tip()
p200.aspirate = mock.Mock()
p200.dispense = mock.Mock()
p200.mix(volume=50, repetitions=2)
assert \
p200.dispense.mock_calls == \
[
mock.call.dispense(50, rate=1.0),
mock.call.dispense(50, rate=1.0)
]
assert \
p200.aspirate.mock_calls == \
[
mock.call.aspirate(volume=50,
location=None,
rate=1.0),
mock.call.aspirate(50, rate=1.0)
]
@pytest.mark.api1_only
def test_tip_tracking_simple(local_test_pipette):
trash, tiprack1, tiprack2, plate, p200 = local_test_pipette
p200.move_to = mock.Mock()
p200.pick_up_tip()
p200.tip_attached = False # prior expectation, for test only
p200.pick_up_tip()
assert p200.move_to.mock_calls == \
build_pick_up_tip(p200, tiprack1[0]) + \
build_pick_up_tip(p200, tiprack1[1])
@pytest.mark.api1_only
def test_simulate_plunger_while_enqueing(local_test_pipette):
trash, tiprack1, tiprack2, plate, p200 = local_test_pipette
p200.pick_up_tip()
assert p200.current_volume == 0
p200.aspirate(200)
assert p200.current_volume == 200
p200.dispense(20)
assert p200.current_volume == 180
p200.dispense(20)
assert p200.current_volume == 160
p200.dispense(60)
assert p200.current_volume == 100
p200.dispense(100)
assert p200.current_volume == 0
p200.drop_tip()
@pytest.mark.api1_only
def test_tip_tracking_chain(local_test_pipette, robot):
trash, tiprack1, tiprack2, plate, p200 = local_test_pipette
# TODO (ben 20171130): revise this test to make more sense in the
# context of required tip pick_up/drop sequencing, etc.
total_tips_per_plate = 4
tiprack1 = generate_plate(
total_tips_per_plate, 2, (5, 5), (0, 0), 5)
tiprack2 = generate_plate(
total_tips_per_plate, 2, (5, 5), (0, 0), 5)
robot._deck['1'].add(tiprack1, 'tiprack1')
robot._deck['2'].add(tiprack2, 'tiprack2')
p200 = Pipette(
robot,
mount='right',
tip_racks=[tiprack1, tiprack2],
trash_container=tiprack1,
name='pipette-for-transfer-tests',
max_volume=200,
ul_per_mm=18.5
)
p200.move_to = mock.Mock()
for _ in range(0, total_tips_per_plate * 2):
p200.pick_up_tip()
p200.tip_attached = False # prior expectation, for test only
expected = []
for i in range(0, total_tips_per_plate):
expected.extend(build_pick_up_tip(p200, tiprack1[i]))
for i in range(0, total_tips_per_plate):
expected.extend(build_pick_up_tip(p200, tiprack2[i]))
assert p200.move_to.mock_calls == expected
# test then when we go over the total number of tips,
# Pipette raises a RuntimeWarning
robot.clear_commands()
p200.reset()
for _ in range(0, total_tips_per_plate * 2):
p200.pick_up_tip()
p200.tip_attached = False # prior expectation, for test only
with pytest.raises(RuntimeWarning):
p200.pick_up_tip()
@pytest.mark.api1_only
def test_tip_tracking_chain_multi_channel(local_test_pipette, robot):
trash, tiprack1, tiprack2, plate, p200 = local_test_pipette
# TODO (ben 20171130): revise this test to make more sense in the
# context of required tip pick_up/drop sequencing, etc.
p200_multi = Pipette(
robot,
trash_container=trash,
tip_racks=[tiprack1, tiprack2],
max_volume=200,
min_volume=10, # These are variable
mount='right',
channels=8,
ul_per_mm=18.5
)
p200_multi.calibrate_plunger(
top=0, bottom=10, blow_out=12, drop_tip=13)
p200_multi.move_to = mock.Mock()
for _ in range(0, 12 * 2):
p200_multi.pick_up_tip()
p200_multi.tip_attached = False # prior expectation, for test only
expected = []
for i in range(0, 12):
expected.extend(
build_pick_up_tip(p200_multi, tiprack1.cols[i]))
for i in range(0, 12):
expected.extend(
build_pick_up_tip(p200_multi, tiprack2.cols[i]))
assert p200_multi.move_to.mock_calls == expected
@pytest.mark.api1_only
def test_tip_tracking_start_at_tip(local_test_pipette):
trash, tiprack1, tiprack2, plate, p200 = local_test_pipette
p200.start_at_tip(tiprack1['B2'])
p200.pick_up_tip()
assert tiprack1['B2'] == p200.current_tip()
@pytest.mark.api1_only
def test_tip_tracking_return(local_test_pipette):
trash, tiprack1, tiprack2, plate, p200 = local_test_pipette
# Note: because this test mocks out `drop_tip`, as a side-effect
# `tip_attached` must be manually set as it would be under the
# `return_tip` callstack, making this tesk somewhat fragile
p200.drop_tip = mock.Mock()
p200.pick_up_tip()
p200.return_tip()
p200.tip_attached = False
p200.pick_up_tip()
p200.return_tip()
expected = [
mock.call(tiprack1[0], home_after=True),
mock.call(tiprack1[1], home_after=True)
]
assert p200.drop_tip.mock_calls == expected
@pytest.mark.api1_only
def test_direct_movement_within_well(local_test_pipette, robot):
trash, tiprack1, tiprack2, plate, p200 = local_test_pipette
old_move = robot.move_to
robot.move_to = mock.Mock()
p200.move_to(plate[0])
p200.move_to(plate[0].top())
p200.move_to(plate[0].bottom())
p200.move_to(plate[1])
p200.move_to(plate[2])
p200.move_to(plate[2].bottom())
expected = [
mock.call(
plate[0], instrument=p200, strategy='arc'),
mock.call(
plate[0].top(), instrument=p200, strategy='direct'),
mock.call(
plate[0].bottom(), instrument=p200, strategy='direct'),
mock.call(
plate[1], instrument=p200, strategy='arc'),
mock.call(
plate[2], instrument=p200, strategy='arc'),
mock.call(
plate[2].bottom(), instrument=p200, strategy='direct')
]
assert robot.move_to.mock_calls == expected
robot.move_to = old_move
@pytest.mark.api1_only
def build_pick_up_tip(pipette, well):
return [
mock.call(well.top()),
mock.call(
well.top(-pipette._pick_up_distance), strategy='direct'),
mock.call(well.top(), strategy='direct'),
mock.call(
well.top(-pipette._pick_up_distance - 1), strategy='direct'),
mock.call(well.top(), strategy='direct'),
mock.call(
well.top(-pipette._pick_up_distance - 2), strategy='direct'),
mock.call(well.top(), strategy='direct')
]
|
|
#!/usr/bin/env python3
import unittest
import sparse_binary_number
class TestSparseBinaryNumber(unittest.TestCase):
def setUp(self):
self.sparse_numbers = [
0b00000000,
0b00000001,
0b00000010,
0b00000100,
0b00000101,
0b00001000,
0b00001001,
0b00001010,
0b00010000,
0b00010001,
0b00010010,
0b00010100,
0b00010101,
0b00100000,
0b00100001,
0b00100010,
0b00100100,
0b00100101,
0b00101000,
0b00101001,
0b00101010,
0b01000000,
0b01000001,
0b01000010,
0b01000100,
0b01000101,
0b01001000,
0b01001001,
0b01001010,
0b01010000,
0b01010001,
0b01010010,
0b01010100,
0b01010101,
0b10000000
]
def test_bit_at_twos_power(self):
# expected, number, twos_power
number = 0b11001101
self.assertEqual(1, sparse_binary_number.bit_at_twos_power(number, 0))
self.assertEqual(0, sparse_binary_number.bit_at_twos_power(number, 1))
self.assertEqual(1, sparse_binary_number.bit_at_twos_power(number, 2))
self.assertEqual(1, sparse_binary_number.bit_at_twos_power(number, 3))
self.assertEqual(0, sparse_binary_number.bit_at_twos_power(number, 4))
self.assertEqual(0, sparse_binary_number.bit_at_twos_power(number, 5))
self.assertEqual(1, sparse_binary_number.bit_at_twos_power(number, 6))
self.assertEqual(1, sparse_binary_number.bit_at_twos_power(number, 7))
def test_bit_at_twos_power_leading_zeros(self):
# expected, number, twos_power
number = 0b01
self.assertEqual(1, sparse_binary_number.bit_at_twos_power(number, 0))
self.assertEqual(0, sparse_binary_number.bit_at_twos_power(number, 1))
def test_bit_at_twos_power_higher_than_most_significant_bit(self):
self.assertEqual(0, sparse_binary_number.bit_at_twos_power(0b01, 8))
self.assertEqual(0, sparse_binary_number.bit_at_twos_power(0b01, 12))
###############################
# examine sparse number sequence
# to help formulate a sparse number generator algorithm
def sequence_difference(self):
difference_list = []
for index in range(1, len(self.sparse_numbers)):
difference = self.sparse_numbers[index]
- self.sparse_numbers[index - 1]
difference_list.append(difference)
return difference_list
def test_sequence_difference(self):
difference_list = self.sequence_difference()
expected = [
1,
2,
4, 5,
8, 9, 10,
16, 17, 18, 20, 21,
32, 33, 34, 36, 37, 40, 41, 42,
64, 65, 66, 68, 69, 72, 73, 74, 80, 81, 82, 84, 85,
128
]
self.assertEqual(expected, difference_list)
# expand elements as powers of 2 to make pattern clearer
expected_expanded = [
1,
2,
4, 4 + 1,
8, 8 + 1, 8 + 2,
16, 16 + 1, 16 + 2, 16 + 4, 16 + 4 + 1,
32, 32 + 1, 32 + 2, 32 + 4, 32 + 4 + 1,
32 + 8, 32 + 8 + 1, 32 + 8 + 2,
64, 64 + 1, 64 + 2, 64 + 4, 64 + 4 + 1,
64 + 8, 64 + 8 + 1, 64 + 8 + 2,
64 + 16, 64 + 16 + 1, 64 + 16 + 2, 64 + 16 + 4, 64 + 16 + 4 + 1,
128
]
self.assertEqual(expected_expanded, difference_list)
###############################
def test_bits_list_0(self):
self.assertEqual([0], sparse_binary_number.bits_list(0))
def test_bits_list_1(self):
self.assertEqual([1], sparse_binary_number.bits_list(0b1))
def test_bits_list_3(self):
self.assertEqual([1, 1], sparse_binary_number.bits_list(0b11))
def test_bits_list_111000101101(self):
self.assertEqual([1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1],
sparse_binary_number.bits_list(0b111000101101))
def test_is_sparse_0(self):
self.assertEqual(True, sparse_binary_number.is_sparse(0))
def test_is_sparse_0b1(self):
self.assertEqual(True, sparse_binary_number.is_sparse(0b1))
def test_is_sparse_trailing_ones(self):
self.assertEqual(False, sparse_binary_number.is_sparse(0b000011))
def test_is_sparse_middle_ones(self):
self.assertEqual(False, sparse_binary_number.is_sparse(0b001100))
def test_is_sparse_leading_ones(self):
self.assertEqual(False, sparse_binary_number.is_sparse(0b110000))
def test_is_sparse(self):
test_data = [
(0, True),
(0b1, True),
(0b10, True),
(0b11, False),
(0b100, True),
(0b101, True),
(5, True),
(0b110, False),
(0b1010010010001010001, True),
(0b1100010010001010001, False)
]
for (number, expected) in test_data:
self.assertEqual(expected, sparse_binary_number.is_sparse(number),
"expected {0} for number 0b{1:b}"
.format(str(expected), number))
def test_next_sparse(self):
for index in range(1, len(self.sparse_numbers)):
number = self.sparse_numbers[index - 1]
actual = sparse_binary_number.next_sparse(number)
expected = self.sparse_numbers[index]
self.assertEqual(expected, actual,
"expected 0b{0:b} for next_sparse(0b{1:b}) but got 0b{2:b}"
.format(expected, number, actual))
def test_next_sparse_incremental(self):
for index in range(1, len(self.sparse_numbers)):
number = self.sparse_numbers[index - 1]
actual = sparse_binary_number.next_sparse_incremental(number)
expected = self.sparse_numbers[index]
self.assertEqual(expected, actual,
"expected 0b{0:b} for next_sparse_incremental(0b{1:b}) but got 0b{2:b}"
.format(expected, number, actual))
def test_next_sparse_incremental_limit_returns_none(self):
test_data = [
(2 ** 32 - 1, None),
(2 ** 32, None),
(2 ** 32 + 1, None)
]
for (number, expected) in test_data:
actual = sparse_binary_number.next_sparse_incremental(number)
self.assertEqual(expected, actual,
"expected {0} for next_sparse_incremental(0b{1:b})"
.format(str(expected), number))
def test_twos_power_max(self):
self.assertEqual(0, sparse_binary_number.twos_power_max(0))
self.assertEqual(0, sparse_binary_number.twos_power_max(0b1))
self.assertEqual(0, sparse_binary_number.twos_power_max(0b01))
self.assertEqual(1, sparse_binary_number.twos_power_max(0b10))
self.assertEqual(1, sparse_binary_number.twos_power_max(0b11))
self.assertEqual(3, sparse_binary_number.twos_power_max(0b01011))
def test_is_zero_bit_and_no_neighbor_ones(self):
self.assertEqual(True,
sparse_binary_number.is_zero_bit_and_no_neighbor_ones(0b0, 0))
self.assertEqual(False,
sparse_binary_number.is_zero_bit_and_no_neighbor_ones(0b1, 0))
self.assertEqual(False,
sparse_binary_number.is_zero_bit_and_no_neighbor_ones(0b001, 1))
self.assertEqual(False,
sparse_binary_number.is_zero_bit_and_no_neighbor_ones(0b10, 0))
self.assertEqual(False,
sparse_binary_number.is_zero_bit_and_no_neighbor_ones(0b10, 1))
self.assertEqual(False,
sparse_binary_number.is_zero_bit_and_no_neighbor_ones(0b010, 1))
self.assertEqual(True,
sparse_binary_number.is_zero_bit_and_no_neighbor_ones(0b100, 0))
self.assertEqual(True,
sparse_binary_number.is_zero_bit_and_no_neighbor_ones(0b1010001, 2))
def test_is_bit_no_neighbor_ones(self):
self.assertEqual(True,
sparse_binary_number.is_bit_no_neighbor_ones(0b0, 0))
self.assertEqual(True,
sparse_binary_number.is_bit_no_neighbor_ones(0b1, 0))
self.assertEqual(False,
sparse_binary_number.is_bit_no_neighbor_ones(0b10, 0))
self.assertEqual(True,
sparse_binary_number.is_bit_no_neighbor_ones(0b10, 1))
self.assertEqual(True,
sparse_binary_number.is_bit_no_neighbor_ones(0b010, 1))
self.assertEqual(False,
sparse_binary_number.is_bit_no_neighbor_ones(0b001, 1))
self.assertEqual(False,
sparse_binary_number.is_bit_no_neighbor_ones(0b100, 1))
def test_is_bit_no_right_one(self):
self.assertEqual(True, sparse_binary_number.is_bit_no_right_one(0b0, 0))
self.assertEqual(True, sparse_binary_number.is_bit_no_right_one(0b1, 0))
self.assertEqual(False,
sparse_binary_number.is_bit_no_right_one(0b01, 1))
self.assertEqual(True,
sparse_binary_number.is_bit_no_right_one(0b10, 1))
def test_is_bit_no_left_one(self):
self.assertEqual(True, sparse_binary_number.is_bit_no_left_one(0b0, 0))
self.assertEqual(True, sparse_binary_number.is_bit_no_left_one(0b1, 0))
self.assertEqual(False,
sparse_binary_number.is_bit_no_left_one(0b10, 0))
self.assertEqual(True,
sparse_binary_number.is_bit_no_left_one(0b10, 1))
self.assertEqual(True,
sparse_binary_number.is_bit_no_left_one(0b11, 1))
self.assertEqual(False,
sparse_binary_number.is_bit_no_left_one(0b101, 1))
if __name__ == "__main__":
unittest.main()
|
|
import threading
import zmq
import logging
import time
import socket
__all__ = ['EventPublisher', 'EventSubscriber', 'EventMessage', 'EventCodes']
class EventPublisher(object):
"""
Event broadcast class for Labtronyx
:param port: Port to bind for event notifications
:type port: int
"""
HEARTBEAT_FREQ = 60.0 # Send heartbeat once per minute
def __init__(self, port):
self.port = port
self._zmq_context = zmq.Context()
self._zmq_socket = None
self._server_alive = threading.Event()
self._server_alive.clear()
def start(self):
# Start ZMQ Event publisher
self._zmq_socket = self._zmq_context.socket(zmq.PUB)
self._zmq_socket.bind("tcp://*:{}".format(self.port))
# Start heartbeat server
heartbeat_srv = threading.Thread(name='Labtronyx-Heartbeat-Server', target=self._heartbeat_server)
heartbeat_srv.setDaemon(True)
heartbeat_srv.start()
def _heartbeat_server(self):
last_heartbeat = 0.0
self._server_alive.set()
while self._server_alive.isSet():
if time.time() - last_heartbeat > self.HEARTBEAT_FREQ:
self.publishEvent(EventCodes.manager.heartbeat)
last_heartbeat = time.time()
time.sleep(0.5) # Low sleep time to ensure we shutdown in a timely manor
def stop(self):
# Stop heartbeat server
self._server_alive.clear()
# Close ZMQ socket
if self._zmq_socket is not None:
self._zmq_socket.close()
self._zmq_socket = None
def publishEvent(self, event, *args, **kwargs):
if self._zmq_socket is not None:
self._zmq_socket.send_json({
'labtronyx-event': '1.0',
'hostname': socket.gethostname(),
'event': str(event),
'args': args,
'params': kwargs
})
class EventSubscriber(object):
"""
Subscribe to events broadcast by the Labtronyx Server. Run asynchronously in a separate thread to prevent the need
for continuous polling. Use `connect` to listen for notifications from a remote server. A single `EventSubscriber`
object can listen to multiple servers.
"""
ZMQ_PORT = 6781
POLL_TIME = 100 # ms
def __init__(self, **kwargs):
self.logger = kwargs.get('logger', logging)
self._callbacks = {}
self._client_alive = threading.Event()
# Create ZMQ context and socket
self._context = zmq.Context()
self._socket = self._context.socket(zmq.SUB)
self._socket.setsockopt(zmq.SUBSCRIBE, '')
# Start client thread
self._client_thread = threading.Thread(name='EventSubscriber', target=self._client)
self._client_thread.start()
# Give the thread time to start up
self._client_alive.wait(1.0)
def __del__(self):
self.stop()
def _client(self):
self._client_alive.set()
while self._client_alive.is_set():
in_waiting = self._socket.poll(self.POLL_TIME)
if in_waiting > 0:
for idx in range(in_waiting):
msg = self._socket.recv_json()
msg_obj = EventMessage(msg)
self.logger.debug("Received event: %s", msg_obj.event)
self.handleMsg(msg_obj)
self._socket.close()
def connect(self, host):
"""
Connect to a remote event publisher
:param host: Hostname or IP Address of remote host
:type host: str
"""
uri = "tcp://{}:{}".format(host, self.ZMQ_PORT)
self._socket.connect(uri)
def disconnect(self, host):
"""
Disconnect from a remote event publisher
:param host: Hostname or IP Address of remote host
:type host: str
"""
uri = "tcp://{}:{}".format(host, self.ZMQ_PORT)
self._socket.disconnect(uri)
def handleMsg(self, event):
"""
Default message handler. Dispatches events to registered callbacks. Overload in subclasses to change how
messages are dispatched.
:param event: Event
:type event: EventMessage object
"""
code = event.event
if code in self._callbacks:
self._callbacks.get(code)(event)
else:
if '' in self._callbacks:
self._callbacks.get('')(event)
def registerCallback(self, event, cb_func):
"""
Register a function to be called when a particular event is received. An event of `''` will register a default
callback
:param event: Event to register
:type event: str
:param cb_func: Function which takes parameters `event` (str) and `args` (dict)
:type cb_func: method
"""
self._callbacks[event] = cb_func
def stop(self):
"""
Convenience function to stop the subscriber thread. Thread will automatically stop before garbage collection.
"""
self._client_alive.clear()
class EventMessage(object):
def __init__(self, json_msg):
self.version = json_msg.get('labtronyx-event')
self.hostname = json_msg.get('hostname')
self.event = json_msg.get('event')
self.args = json_msg.get('args', [])
self.params = json_msg.get('params', {})
def __len__(self):
return len(self.args)
def __getitem__(self, item):
return self.args[item]
def __getattr__(self, item):
return self.params.get(item)
class EventCodes:
class manager:
shutdown = "manager.shutdown"
heartbeat = "manager.heartbeat"
class interface:
created = "interface.created"
destroyed = "interface.destroyed"
changed = "interface.changed"
class resource:
created = "resource.created"
destroyed = "resource.destroyed"
changed = "resource.changed"
driver_loaded = "resource.driver.loaded"
driver_unloaded = "resource.driver.unloaded"
class script:
created = "script.created"
changed = "script.changed"
destroyed = "script.destroyed"
finished = "script.finished"
log = "script.log"
|
|
# -*- coding: utf-8 -*-
# This file is part of RRMPG.
#
# RRMPG is free software with the aim to provide a playground for experiments
# with hydrological rainfall-runoff-models while achieving competitive
# performance results.
#
# You should have received a copy of the MIT License along with RRMPG. If not,
# see <https://opensource.org/licenses/MIT>
"""Interface to the the ABC-Model."""
import numbers
import numpy as np
from scipy import optimize
from .basemodel import BaseModel
from .abcmodel_model import run_abcmodel
from ..utils.metrics import calc_mse
from ..utils.array_checks import check_for_negatives, validate_array_input
class ABCModel(BaseModel):
"""Interface to the the ABC-Model.
This model implements the classical ABC-Model. It was developed for
educational purpose and represents a simple linear model.
Original Publication:
Myron B. Fiering "Streamflow synthesis" Cambridge, Harvard University
Press, 1967. 139 P. (1967).
If no model parameters are passed upon initialization, generates random
parameter set.
Args:
params: (optional) Dictonary containing all model parameters as a
seperate key/value pairs.
"""
# List of model parameters
_param_list = ['a', 'b', 'c']
# Dictionary with default parameter bounds
_default_bounds = {'a': (0, 1),
'b': (0, 1),
'c': (0, 1)}
# Custom numpy datatype needed for numba input
_dtype = np.dtype([('a', np.float64),
('b', np.float64),
('c', np.float64)])
def __init__(self, params=None):
"""Initialize an ABC-Model.
If no parameters are passed as input arguments, random values are
sampled that satisfy the parameter constraints of the ABC-Model.
Args:
params: (optional) Dictonary containing all model parameters as a
seperate key/value pairs.
"""
super().__init__(params=params)
def get_random_params(self, num=1):
"""Generate random sets of model parameters for the ABC-model.
The ABC-model has specific parameter constraints, therefore we will
overwrite the function of the BaseModel, to generated random model
parameters, that satisfy the ABC-Model constraints.
Args:
num: (optional) Integer, specifying the number of parameter sets,
that will be generated. Default is 1.
Returns:
A dict containing one key/value pair for each model parameter.
"""
params = np.zeros(num, dtype=self._dtype)
# sample parameter 'a' between the bounds [0,1]
params['a'][:] = np.random.uniform(low=self._default_bounds['a'][0],
high=self._default_bounds['a'][1],
size=num)
# parameter 'c' must be between [0,1] and has no further constraints
params['c'][:] = np.random.uniform(low=self._default_bounds['c'][0],
high=self._default_bounds['c'][1],
size=num)
# Parameter b is constraint by its corresponding a parameter.
for i in range(num):
# sample parameter 'b' between lower bound 0 and upper bnd (1 - a)
params['b'][i] = np.random.uniform(low=self._default_bounds['b'][0],
high=(1-params['a'][i]),
size=1)
return params
def simulate(self, prec, initial_state=0, return_storage=False,
params=None):
"""Simulate the streamflow for the passed precipitation.
This function makes sanity checks on the input and then calls the
externally defined ABC-Model function.
Args:
prec: Precipitation data for each timestep. Can be a List, numpy
array or pandas.Series
initial_state: (optional) Initial value for the storage.
return_storage: (optional) Boolean, wether or not to return the
simulated storage for each timestep.
params: (optional) Numpy array of parameter sets, that will be
evaluated a once in parallel. Must be of the models own custom
data type. If nothing is passed, the parameters, stored in the
model object, will be used.
Returns:
An array with the simulated stream flow for each timestep and
optional an array with the simulated storage.
Raises:
ValueError: If one of the inputs contains invalid values.
TypeError: If one of the inputs has an incorrect datatype.
"""
# Validation check of the precipitation input
prec = validate_array_input(prec, np.float64, 'precipitation')
# Check if there exist negative precipitation
if check_for_negatives(prec):
raise ValueError("In the precipitation array are negative values.")
# Validation check of the initial state
if not isinstance(initial_state, numbers.Number) or initial_state < 0:
msg = ["The variable 'initial_state' must be a numercial scaler ",
"greate than 0."]
raise TypeError("".join(msg))
# Cast initial state as float
initial_state = float(initial_state)
# Validation check of the return_storage
if not isinstance(return_storage, bool):
raise TypeError("The return_storage arg must be a boolean.")
# If no parameters were passed, prepare array w. params from attributes
if params is None:
params = np.zeros(1, dtype=self._dtype)
for param in self._param_list:
params[param] = getattr(self, param)
# Else, check the param input for correct datatype
else:
if params.dtype != self._dtype:
msg = ["The model parameters must be a numpy array of the ",
"models own custom data type."]
raise TypeError("".join(msg))
# if only one parameter set is passed, expand dimensions to 1D
if isinstance(params, np.void):
params = np.expand_dims(params, params.ndim)
# Create output arrays
qsim = np.zeros((prec.shape[0], params.size), np.float64)
if return_storage:
storage = np.zeros((prec.shape[0], params.size), np.float64)
# call simulation function for each parameter set
for i in range(params.size):
# Call ABC-model simulation function and return results
if return_storage:
qsim[:,i], storage[:,i] = run_abcmodel(prec, initial_state,
params[i])
else:
qsim[:,i], _ = run_abcmodel(prec, initial_state, params[i])
if return_storage:
return qsim, storage
else:
return qsim
def fit(self, qobs, prec, initial_state=0):
"""Fit the model to a timeseries of discharge using.
This functions uses scipy's global optimizer (differential evolution)
to find a good set of parameters for the model, so that the observed
discharge is simulated as good as possible.
Args:
qobs: Array of observed streaflow discharge.
prec: Array of precipitation data.
initial_state: (optional) Initial value for the storage.
Returns:
res: A scipy OptimizeResult class object.
Raises:
ValueError: If one of the inputs contains invalid values.
TypeError: If one of the inputs has an incorrect datatype.
"""
# Validation check of the inputs
qobs = validate_array_input(qobs, np.float64, 'qobs')
prec = validate_array_input(prec, np.float64, 'precipitation')
# Check if there exist negative precipitation
if check_for_negatives(prec):
raise ValueError("In the precipitation array are negative values.")
# Validation check of the initial state
if not isinstance(initial_state, numbers.Number) or initial_state < 0:
msg = ["The variable 'initial_state' must be a numercial scaler ",
"greate than 0."]
raise TypeError("".join(msg))
# Cast initial state as float
initial_state = float(initial_state)
# pack input arguments for scipy optimizer
args = (prec, initial_state, qobs, self._dtype)
bnds = tuple([self._default_bounds[p] for p in self._param_list])
# call the actual optimizer function
res = optimize.differential_evolution(_loss, bounds=bnds, args=args)
return res
def _loss(X, *args):
"""Return the loss value for the current parameter set."""
# Unpack static arguments
prec = args[0]
initial_state = args[1]
qobs = args[2]
dtype = args[3]
# Create a custom numpy array of the model parameters
params = np.zeros(1, dtype=dtype)
params['a'] = X[0]
params['b'] = X[1]
params['c'] = X[2]
# Calculate the simulated streamflow
qsim, _ = run_abcmodel(prec, initial_state, params[0])
# Calculate the Mean-Squared-Error as optimization criterion
loss_value = calc_mse(qobs, qsim)
return loss_value
|
|
from mongoengine import ReferenceField, ListField
from mongoengine.base import BaseDocument, DocumentMetaclass, get_document
from wtforms import fields, validators
from flask_mongoengine.wtf import orm, fields as mongo_fields
from flask_admin import form
from flask_admin.model.form import FieldPlaceholder
from flask_admin.model.fields import InlineFieldList, AjaxSelectField, AjaxSelectMultipleField
from flask_admin.model.widgets import InlineFormWidget
from flask_admin._compat import iteritems
from .fields import ModelFormField, MongoFileField, MongoImageField
from .subdoc import EmbeddedForm
class CustomModelConverter(orm.ModelConverter):
"""
Customized MongoEngine form conversion class.
Injects various Flask-Admin widgets and handles lists with
customized InlineFieldList field.
"""
def __init__(self, view):
super(CustomModelConverter, self).__init__()
self.view = view
def _get_field_override(self, name):
form_overrides = getattr(self.view, 'form_overrides', None)
if form_overrides:
return form_overrides.get(name)
return None
def _get_subdocument_config(self, name):
config = getattr(self.view, '_form_subdocuments', {})
p = config.get(name)
if not p:
return EmbeddedForm()
return p
def _convert_choices(self, choices):
for c in choices:
if isinstance(c, tuple):
yield c
else:
yield (c, c)
def clone_converter(self, view):
return self.__class__(view)
def convert(self, model, field, field_args):
# Check if it is overridden field
if isinstance(field, FieldPlaceholder):
return form.recreate_field(field.field)
kwargs = {
'label': getattr(field, 'verbose_name', field.name),
'description': field.help_text or '',
'validators': [],
'filters': [],
'default': field.default
}
if field_args:
kwargs.update(field_args)
if field.required:
kwargs['validators'].append(validators.Required())
elif not isinstance(field, ListField):
kwargs['validators'].append(validators.Optional())
ftype = type(field).__name__
if field.choices:
kwargs['choices'] = list(self._convert_choices(field.choices))
if ftype in self.converters:
kwargs["coerce"] = self.coerce(ftype)
if kwargs.pop('multiple', False):
return fields.SelectMultipleField(**kwargs)
return fields.SelectField(**kwargs)
ftype = type(field).__name__
if hasattr(field, 'to_form_field'):
return field.to_form_field(model, kwargs)
override = self._get_field_override(field.name)
if override:
return override(**kwargs)
if ftype in self.converters:
return self.converters[ftype](model, field, kwargs)
@orm.converts('DateTimeField')
def conv_DateTime(self, model, field, kwargs):
kwargs['widget'] = form.DateTimePickerWidget()
return orm.ModelConverter.conv_DateTime(self, model, field, kwargs)
@orm.converts('ListField')
def conv_List(self, model, field, kwargs):
if field.field is None:
raise ValueError('ListField "%s" must have field specified for model %s' % (field.name, model))
if isinstance(field.field, ReferenceField):
loader = getattr(self.view, '_form_ajax_refs', {}).get(field.name)
if loader:
return AjaxSelectMultipleField(loader, **kwargs)
kwargs['widget'] = form.Select2Widget(multiple=True)
# TODO: Support AJAX multi-select
doc_type = field.field.document_type
return mongo_fields.ModelSelectMultipleField(model=doc_type, **kwargs)
# Create converter
view = self._get_subdocument_config(field.name)
converter = self.clone_converter(view)
if field.field.choices:
kwargs['multiple'] = True
return converter.convert(model, field.field, kwargs)
unbound_field = converter.convert(model, field.field, {})
return InlineFieldList(unbound_field, min_entries=0, **kwargs)
@orm.converts('EmbeddedDocumentField')
def conv_EmbeddedDocument(self, model, field, kwargs):
# FormField does not support validators
kwargs['validators'] = []
view = self._get_subdocument_config(field.name)
form_opts = form.FormOpts(widget_args=getattr(view, 'form_widget_args', None),
form_rules=view._form_rules)
form_class = view.get_form()
if form_class is None:
converter = self.clone_converter(view)
form_class = get_form(field.document_type_obj, converter,
base_class=view.form_base_class or form.BaseForm,
only=view.form_columns,
exclude=view.form_excluded_columns,
field_args=view.form_args,
extra_fields=view.form_extra_fields)
form_class = view.postprocess_form(form_class)
return ModelFormField(field.document_type_obj, view, form_class, form_opts=form_opts, **kwargs)
@orm.converts('ReferenceField')
def conv_Reference(self, model, field, kwargs):
kwargs['allow_blank'] = not field.required
loader = getattr(self.view, '_form_ajax_refs', {}).get(field.name)
if loader:
return AjaxSelectField(loader, **kwargs)
kwargs['widget'] = form.Select2Widget()
return orm.ModelConverter.conv_Reference(self, model, field, kwargs)
@orm.converts('FileField')
def conv_File(self, model, field, kwargs):
return MongoFileField(**kwargs)
@orm.converts('ImageField')
def conv_image(self, model, field, kwargs):
return MongoImageField(**kwargs)
def get_form(model, converter,
base_class=form.BaseForm,
only=None,
exclude=None,
field_args=None,
extra_fields=None):
"""
Create a wtforms Form for a given mongoengine Document schema::
from flask_mongoengine.wtf import model_form
from myproject.myapp.schemas import Article
ArticleForm = model_form(Article)
:param model:
A mongoengine Document schema class
:param base_class:
Base form class to extend from. Must be a ``wtforms.Form`` subclass.
:param only:
An optional iterable with the property names that should be included in
the form. Only these properties will have fields.
:param exclude:
An optional iterable with the property names that should be excluded
from the form. All other properties will have fields.
:param field_args:
An optional dictionary of field names mapping to keyword arguments used
to construct each field object.
:param converter:
A converter to generate the fields based on the model properties. If
not set, ``ModelConverter`` is used.
"""
if isinstance(model, str):
model = get_document(model)
if not isinstance(model, (BaseDocument, DocumentMetaclass)):
raise TypeError('Model must be a mongoengine Document schema')
field_args = field_args or {}
# Find properties
properties = sorted(((k, v) for k, v in iteritems(model._fields)),
key=lambda v: v[1].creation_counter)
if only:
props = dict(properties)
def find(name):
if extra_fields and name in extra_fields:
return FieldPlaceholder(extra_fields[name])
p = props.get(name)
if p is not None:
return p
raise ValueError('Invalid model property name %s.%s' % (model, name))
properties = ((p, find(p)) for p in only)
elif exclude:
properties = (p for p in properties if p[0] not in exclude)
# Create fields
field_dict = {}
for name, p in properties:
field = converter.convert(model, p, field_args.get(name))
if field is not None:
field_dict[name] = field
# Contribute extra fields
if not only and extra_fields:
for name, field in iteritems(extra_fields):
field_dict[name] = form.recreate_field(field)
field_dict['model_class'] = model
return type(model.__name__ + 'Form', (base_class,), field_dict)
|
|
import json
from psycopg2.extras import DateRange, DateTimeTZRange, NumericRange, Range
from django.contrib.postgres import forms, lookups
from django.db import models
from .utils import AttributeSetter
__all__ = [
'RangeField', 'IntegerRangeField', 'BigIntegerRangeField',
'FloatRangeField', 'DateTimeRangeField', 'DateRangeField',
]
class RangeField(models.Field):
empty_strings_allowed = False
def __init__(self, *args, **kwargs):
# Initializing base_field here ensures that its model matches the model for self.
if hasattr(self, 'base_field'):
self.base_field = self.base_field()
super().__init__(*args, **kwargs)
@property
def model(self):
try:
return self.__dict__['model']
except KeyError:
raise AttributeError("'%s' object has no attribute 'model'" % self.__class__.__name__)
@model.setter
def model(self, model):
self.__dict__['model'] = model
self.base_field.model = model
def get_prep_value(self, value):
if value is None:
return None
elif isinstance(value, Range):
return value
elif isinstance(value, (list, tuple)):
return self.range_type(value[0], value[1])
return value
def to_python(self, value):
if isinstance(value, str):
# Assume we're deserializing
vals = json.loads(value)
for end in ('lower', 'upper'):
if end in vals:
vals[end] = self.base_field.to_python(vals[end])
value = self.range_type(**vals)
elif isinstance(value, (list, tuple)):
value = self.range_type(value[0], value[1])
return value
def set_attributes_from_name(self, name):
super().set_attributes_from_name(name)
self.base_field.set_attributes_from_name(name)
def value_to_string(self, obj):
value = self.value_from_object(obj)
if value is None:
return None
if value.isempty:
return json.dumps({"empty": True})
base_field = self.base_field
result = {"bounds": value._bounds}
for end in ('lower', 'upper'):
val = getattr(value, end)
if val is None:
result[end] = None
else:
obj = AttributeSetter(base_field.attname, val)
result[end] = base_field.value_to_string(obj)
return json.dumps(result)
def formfield(self, **kwargs):
kwargs.setdefault('form_class', self.form_field)
return super().formfield(**kwargs)
class IntegerRangeField(RangeField):
base_field = models.IntegerField
range_type = NumericRange
form_field = forms.IntegerRangeField
def db_type(self, connection):
return 'int4range'
class BigIntegerRangeField(RangeField):
base_field = models.BigIntegerField
range_type = NumericRange
form_field = forms.IntegerRangeField
def db_type(self, connection):
return 'int8range'
class FloatRangeField(RangeField):
base_field = models.FloatField
range_type = NumericRange
form_field = forms.FloatRangeField
def db_type(self, connection):
return 'numrange'
class DateTimeRangeField(RangeField):
base_field = models.DateTimeField
range_type = DateTimeTZRange
form_field = forms.DateTimeRangeField
def db_type(self, connection):
return 'tstzrange'
class DateRangeField(RangeField):
base_field = models.DateField
range_type = DateRange
form_field = forms.DateRangeField
def db_type(self, connection):
return 'daterange'
RangeField.register_lookup(lookups.DataContains)
RangeField.register_lookup(lookups.ContainedBy)
RangeField.register_lookup(lookups.Overlap)
class RangeContainedBy(models.Lookup):
lookup_name = 'contained_by'
type_mapping = {
'integer': 'int4range',
'bigint': 'int8range',
'double precision': 'numrange',
'date': 'daterange',
'timestamp with time zone': 'tstzrange',
}
def as_sql(self, qn, connection):
field = self.lhs.output_field
if isinstance(field, models.FloatField):
sql = '%s::numeric <@ %s::{}'.format(self.type_mapping[field.db_type(connection)])
else:
sql = '%s <@ %s::{}'.format(self.type_mapping[field.db_type(connection)])
lhs, lhs_params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
params = lhs_params + rhs_params
return sql % (lhs, rhs), params
def get_prep_lookup(self):
return RangeField().get_prep_value(self.rhs)
models.DateField.register_lookup(RangeContainedBy)
models.DateTimeField.register_lookup(RangeContainedBy)
models.IntegerField.register_lookup(RangeContainedBy)
models.BigIntegerField.register_lookup(RangeContainedBy)
models.FloatField.register_lookup(RangeContainedBy)
@RangeField.register_lookup
class FullyLessThan(lookups.PostgresSimpleLookup):
lookup_name = 'fully_lt'
operator = '<<'
@RangeField.register_lookup
class FullGreaterThan(lookups.PostgresSimpleLookup):
lookup_name = 'fully_gt'
operator = '>>'
@RangeField.register_lookup
class NotLessThan(lookups.PostgresSimpleLookup):
lookup_name = 'not_lt'
operator = '&>'
@RangeField.register_lookup
class NotGreaterThan(lookups.PostgresSimpleLookup):
lookup_name = 'not_gt'
operator = '&<'
@RangeField.register_lookup
class AdjacentToLookup(lookups.PostgresSimpleLookup):
lookup_name = 'adjacent_to'
operator = '-|-'
@RangeField.register_lookup
class RangeStartsWith(models.Transform):
lookup_name = 'startswith'
function = 'lower'
@property
def output_field(self):
return self.lhs.output_field.base_field
@RangeField.register_lookup
class RangeEndsWith(models.Transform):
lookup_name = 'endswith'
function = 'upper'
@property
def output_field(self):
return self.lhs.output_field.base_field
@RangeField.register_lookup
class IsEmpty(models.Transform):
lookup_name = 'isempty'
function = 'isempty'
output_field = models.BooleanField()
|
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import contextlib
import six
from .framework import Program, default_main_program, Variable
from . import core
from .executor import global_scope, Executor
from paddle.fluid.proto import data_feed_pb2
from google.protobuf import text_format
from . import io
from .data_feed_desc import DataFeedDesc
from .trainer_desc import TrainerDesc, MultiTrainer, DistMultiTrainer
from .distributed import ps_instance
from .contrib.utils import hdfs_utils as hdfs
__all__ = ['AsyncExecutor']
class AsyncExecutor(object):
"""
An asynchronous Executor in Python. Through exploiting the power of
multi-core processor and data queueing, AsyncExecutor makes data reading
and cosuming decoupled, each run in multiple threads in parallel.
Instead of reading data in python side, AsyncExecutor accepts a training
file list, which will be retrieved in C++, then training inputs will be
read, parsed and fed to training network within C++ code.
AsyncExecutor is in active development and the API might change in the near
future.
Example:
>>> data_feed = fluid.DataFeedDesc('data.proto')
>>> startup_program = fluid.default_startup_program()
>>> main_program = fluid.default_main_program()
>>> filelist = ["train_data/part-%d" % i for i in range(100)]
>>> thread_num = len(filelist) / 4
>>>
>>> place = fluid.CPUPlace()
>>> async_executor = fluid.AsyncExecutor(place)
>>>
>>> async_executor.run_startup_program(startup_program)
>>>
>>> epoch = 10
>>> for i in range(epoch):
>>> async_executor.run(main_program,
>>> data_feed,
>>> filelist,
>>> thread_num,
>>> [acc],
>>> debug=False)
Args:
place(fluid.CPUPlace|None): indicate the executor run on which device.
Only CPUPlace supported
Note:
For debugging complicated network in parallel-GPUs, you can test it
on the executor. They has the exactly same arguments, and expected
the same results.
Note: Only running on CPUPlace supported.
"""
def __init__(self, place=None, run_mode=""):
"""
Init.
Example:
>>> place = fluid.CPUPlace()
>>> async_executor = fluid.AsyncExecutor(place)
Args:
place(Place): CPUPlace only
run_mode(str): default is empty string.
"""
if place is None:
place = core.CPUPlace()
if not isinstance(place, core.CPUPlace):
raise ValueError("AsyncExecutor only supports CPU device")
p = core.Place()
p.set_place(place)
scope = global_scope()
self.executor = core.AsyncExecutor(scope, p)
self.instance = None
def run(self,
program,
data_feed,
filelist,
thread_num,
fetch,
mode="",
debug=False):
"""
Run program by this AsyncExecutor. Training dataset will be in filelist.
Users can also inspect certain variables by naming them in parameter
:code:`fetch`, like in fluid.Executor. Unlike fluid.Executor, however,
AsyncExecutor doesn't return fetched variables, instead, it will dump
the values of each fetched variable to stdandard output.
Running the dataset will be on multiple threads, within each a thread
local scope will be created, then all OPs also created in that scope.
Parameters are updated by all the OPs simultaneously.
Args:
program(Program): the program that need to run, if not provied,
then default_main_program will be used.
data_feed(DataFeedDesc): A DataFeedDesc object
filelist(str): a file containing the training dataset file list
thread_num(int): number of concurrent training threads. See
:code:`Note` for how to set this properly
fetch(str|list): the var name or a list of var names to inspect
mode(str): run mode of this interface
debug(bool): When set to True, fetch vars will be printed to
standard output after each minibatch
Note:
the executor will run all operators in the program but not only
the operators dependent by the fetch_list.
Note:
Running AsyncExecutor will be on multiple threads, each bound to a
CPU core. To achieve best performance, it's suggested to set thread
num to be equal or slightly less than that of CPU cores.
"""
if program is None:
program = default_main_program()
program_desc = program.desc
if data_feed is None:
raise ValueError('ValueError: data_feed should be provided')
if filelist is None:
raise ValueError('ValueError: filelist should be provided')
if isinstance(filelist, str):
filelist = [filelist]
if not isinstance(thread_num, int):
raise TypeError('TypeError: thread_num should be a positive number')
if fetch is not None:
if isinstance(fetch, Variable):
fetch = [fetch]
fetch_var_names = [var.name for var in fetch]
for fetch_var in fetch:
shape = fetch_var.shape
if shape[len(shape) - 1] != 1:
raise AssertionError(
"%s: Fetch variable has wrong shape. Only varibles "
"with the last dimension size 1 supported." %
(fetch_var.name))
self.executor.run_from_files(program_desc,
data_feed.desc(), filelist, thread_num,
fetch_var_names, mode, debug,
str(id(program_desc)))
def download_data(self,
afs_path,
local_path,
fs_default_name,
ugi,
file_cnt,
hadoop_home="$HADOOP_HOME",
process_num=12):
"""
download_data is a default download method for distributed training
a user download data without this method
Example:
>>> exe = fluid.AsyncExecutor()
>>> exe.download_data("/xxx/xxx/xx/",
>>> "./data", "afs://
>>> xxx.xxx.xxx.xxx:9901", "xxx,yyy")
Args:
afs_path(str): afs_path defined by users
local_path(str): download data path
fs_default_name(str): file system server address
ugi(str): hadoop ugi
file_cnt(int): a user can specify file number for debugging
hadoop_home(str): hadoop home path
process_num(int): download process num
"""
if self.instance is None:
raise ValueError('instance is None, please run'
'config_distributed_nodes init instance')
configs = {"fs.default.name": fs_default_name, "hadoop.job.ugi": ugi}
client = hdfs.HDFSClient(hadoop_home, configs)
downloads = hdfs.multi_download(
client,
afs_path,
local_path,
self.instance.get_worker_index(),
self.instance.get_node_cnt() / 2,
multi_processes=process_num)
self.instance.barrier_worker() #wait for download_data
def get_instance(self):
"""
get current node's instance so that user can do operations
in distributed setting
"""
if self.instance is None:
raise ValueError(
'instance is None, please run config_distributed_nodes init instance'
)
return self.instance
def config_distributed_nodes(self):
"""
if a user needs to run distributed async executor
he or she needs to do a global configuration so that
information of current process can be obtained
"""
self.instance = ps_instance.PaddlePSInstance(1, 2)
return self.instance
def stop(self):
"""
at the end of process, users should call stop to servers
and barrier all workers
"""
if self.instance is None:
raise ValueError(
'instance is None, please run config_distributed_nodes init instance'
)
self.instance.barrier_worker() #worker do all things
if self.instance.is_first_worker():
self.executor.stop_server()
self.instance.barrier_worker() #sync
self.instance.barrier_all()
self.instance.finalize()
def init_server(self, dist_desc):
"""
Initialize server of current node if current process is a server.
Args:
dist_desc(str): a protobuf string that describes
how to init a worker and a server
"""
if self.instance is None:
raise ValueError(
'instance is None, please run config_distributed_nodes init instance'
)
self.dist_desc_str = text_format.MessageToString(dist_desc)
self.dist_desc = dist_desc
self.executor.init_server(self.dist_desc_str, self.instance._rankid)
ip = self.executor.start_server()
self.instance.set_ip(ip)
self.instance.barrier_all() #wait all server start
ips = self.instance.gather_ips()
self.executor.gather_servers(ips, self.instance.get_node_cnt())
self.instance.barrier_all() #wait all worker start
def init_worker(self, dist_desc, startup_program):
"""
Initialize worker of current node if current process is a worker.
Args:
dist_desc(str): a protobuf string that describes
how to init a worker and a server
startup_program(fluid.Program): startup program of current process
"""
if self.instance is None:
raise ValueError(
'instance is None, please run config_distributed_nodes init instance'
)
self.dist_desc_str = text_format.MessageToString(dist_desc)
self.dist_desc = dist_desc
place = core.CPUPlace()
executor = Executor(place)
if isinstance(startup_program, list):
for sp in startup_program:
executor.run(sp)
else:
executor.run(startup_program)
self.instance.barrier_all() #wait all server start
ips = self.instance.gather_ips()
self.executor.init_worker(self.dist_desc_str, ips,
self.instance.get_node_cnt(),
self.instance._rankid)
self.instance.barrier_all() #wait all worker start
if self.instance.is_first_worker():
self.executor.init_model()
self.instance.barrier_worker() #wait init model
def init_model(self):
"""
init_model command that can be invoked from one of the worker
model parameters are initialized in servers
"""
if self.instance is None:
raise ValueError(
'instance is None, please run config_distributed_nodes init instance'
)
self.executor.init_model()
def save_model(self, save_path):
"""
save_model command that can be invoked from one of the worker
model parameters are saved in servers and upload to save_path of file system.
Args:
save_path(str): save path to file system
"""
if self.instance is None:
raise ValueError(
'instance is None, please run config_distributed_nodes init instance'
)
self.executor.save_model(save_path)
|
|
'''tzinfo timezone information for Australia/Lord_Howe.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Lord_Howe(DstTzInfo):
'''Australia/Lord_Howe timezone definition. See datetime.tzinfo for details'''
zone = 'Australia/Lord_Howe'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1981,2,28,14,0,0),
d(1981,10,24,15,30,0),
d(1982,3,6,14,30,0),
d(1982,10,30,15,30,0),
d(1983,3,5,14,30,0),
d(1983,10,29,15,30,0),
d(1984,3,3,14,30,0),
d(1984,10,27,15,30,0),
d(1985,3,2,14,30,0),
d(1985,10,26,15,30,0),
d(1986,3,15,15,0,0),
d(1986,10,18,15,30,0),
d(1987,3,14,15,0,0),
d(1987,10,24,15,30,0),
d(1988,3,19,15,0,0),
d(1988,10,29,15,30,0),
d(1989,3,18,15,0,0),
d(1989,10,28,15,30,0),
d(1990,3,3,15,0,0),
d(1990,10,27,15,30,0),
d(1991,3,2,15,0,0),
d(1991,10,26,15,30,0),
d(1992,2,29,15,0,0),
d(1992,10,24,15,30,0),
d(1993,3,6,15,0,0),
d(1993,10,30,15,30,0),
d(1994,3,5,15,0,0),
d(1994,10,29,15,30,0),
d(1995,3,4,15,0,0),
d(1995,10,28,15,30,0),
d(1996,3,30,15,0,0),
d(1996,10,26,15,30,0),
d(1997,3,29,15,0,0),
d(1997,10,25,15,30,0),
d(1998,3,28,15,0,0),
d(1998,10,24,15,30,0),
d(1999,3,27,15,0,0),
d(1999,10,30,15,30,0),
d(2000,3,25,15,0,0),
d(2000,8,26,15,30,0),
d(2001,3,24,15,0,0),
d(2001,10,27,15,30,0),
d(2002,3,30,15,0,0),
d(2002,10,26,15,30,0),
d(2003,3,29,15,0,0),
d(2003,10,25,15,30,0),
d(2004,3,27,15,0,0),
d(2004,10,30,15,30,0),
d(2005,3,26,15,0,0),
d(2005,10,29,15,30,0),
d(2006,4,1,15,0,0),
d(2006,10,28,15,30,0),
d(2007,3,24,15,0,0),
d(2007,10,27,15,30,0),
d(2008,3,29,15,0,0),
d(2008,10,25,15,30,0),
d(2009,3,28,15,0,0),
d(2009,10,24,15,30,0),
d(2010,3,27,15,0,0),
d(2010,10,30,15,30,0),
d(2011,3,26,15,0,0),
d(2011,10,29,15,30,0),
d(2012,3,24,15,0,0),
d(2012,10,27,15,30,0),
d(2013,3,30,15,0,0),
d(2013,10,26,15,30,0),
d(2014,3,29,15,0,0),
d(2014,10,25,15,30,0),
d(2015,3,28,15,0,0),
d(2015,10,24,15,30,0),
d(2016,3,26,15,0,0),
d(2016,10,29,15,30,0),
d(2017,3,25,15,0,0),
d(2017,10,28,15,30,0),
d(2018,3,24,15,0,0),
d(2018,10,27,15,30,0),
d(2019,3,30,15,0,0),
d(2019,10,26,15,30,0),
d(2020,3,28,15,0,0),
d(2020,10,24,15,30,0),
d(2021,3,27,15,0,0),
d(2021,10,30,15,30,0),
d(2022,3,26,15,0,0),
d(2022,10,29,15,30,0),
d(2023,3,25,15,0,0),
d(2023,10,28,15,30,0),
d(2024,3,30,15,0,0),
d(2024,10,26,15,30,0),
d(2025,3,29,15,0,0),
d(2025,10,25,15,30,0),
d(2026,3,28,15,0,0),
d(2026,10,24,15,30,0),
d(2027,3,27,15,0,0),
d(2027,10,30,15,30,0),
d(2028,3,25,15,0,0),
d(2028,10,28,15,30,0),
d(2029,3,24,15,0,0),
d(2029,10,27,15,30,0),
d(2030,3,30,15,0,0),
d(2030,10,26,15,30,0),
d(2031,3,29,15,0,0),
d(2031,10,25,15,30,0),
d(2032,3,27,15,0,0),
d(2032,10,30,15,30,0),
d(2033,3,26,15,0,0),
d(2033,10,29,15,30,0),
d(2034,3,25,15,0,0),
d(2034,10,28,15,30,0),
d(2035,3,24,15,0,0),
d(2035,10,27,15,30,0),
d(2036,3,29,15,0,0),
d(2036,10,25,15,30,0),
d(2037,3,28,15,0,0),
d(2037,10,24,15,30,0),
]
_transition_info = [
i(36000,0,'EST'),
i(37800,0,'LHST'),
i(41400,3600,'LHST'),
i(37800,0,'LHST'),
i(41400,3600,'LHST'),
i(37800,0,'LHST'),
i(41400,3600,'LHST'),
i(37800,0,'LHST'),
i(41400,3600,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
]
Lord_Howe = Lord_Howe()
|
|
# -*- coding: utf8 -*-
__author__ = 'sergey'
from dedupsqlfs.db.mysql.table import Table
class TableTree( Table ):
_table_name = "tree"
def create( self ):
cur = self.getCursor()
# Create table
cur.execute(
"CREATE TABLE IF NOT EXISTS `%s` (" % self._table_name+
"`id` BIGINT UNSIGNED PRIMARY KEY AUTO_INCREMENT, "+
"`parent_id` BIGINT UNSIGNED, "+
"`name_id` BIGINT UNSIGNED NOT NULL, "+
"`inode_id` BIGINT UNSIGNED NOT NULL"+
")"+
self._getCreationAppendString()
)
self.createIndexIfNotExists("pn", ('parent_id', 'name_id',), unique=True)
self.createIndexIfNotExists("inode", ('inode_id',))
self.createIndexIfNotExists("parent_id", ('parent_id', 'id'))
return
def getRowSize(self):
return 4 * 8
def insert( self, parent_id, name_id, inode_id ):
"""
:param parent_id: int|None
:param name_id: int
:param inode_id: int
:return: int
"""
self.startTimer()
cur = self.getCursor()
cur.execute(
"INSERT INTO `%s` " % self.getName()+
" (`parent_id`, `name_id`, `inode_id`) "+
"VALUES (%(parent)s, %(name)s, %(inode)s)",
{
"parent": parent_id,
"name": name_id,
"inode": inode_id
}
)
item = cur.lastrowid
self.stopTimer('insert')
return item
def rename_inode( self, node_id, new_parent_id, new_name_id):
"""
:param parent_id: int|None
:param name_id: int
:param inode_id: int
:return: int
"""
self.startTimer()
cur = self.getCursor()
cur.execute("UPDATE `%s` " % self.getName()+
"SET parent_id=%(parent)s, name_id=%(name)s WHERE id=%(id)s",
{
"parent": new_parent_id,
"name": new_name_id,
"id": node_id
})
item = cur.lastrowid
self.stopTimer('rename_inode')
return item
def delete(self, node_id):
self.startTimer()
cur = self.getCursor()
cur.execute(
"DELETE FROM `%s` " % self.getName()+
" WHERE `id`=%(id)s",
{
"id": node_id
}
)
item = cur.rowcount
self.stopTimer('delete')
return item
def find_by_parent_name(self, parent_id, name_id):
self.startTimer()
cur = self.getCursor()
cur.execute(
"SELECT `id`, `inode_id`, `name_id` FROM `%s` " % self.getName()+
" WHERE `parent_id`=%(parent)s AND `name_id`=%(name)s",
{
"parent": parent_id,
"name": name_id
}
)
item = cur.fetchone()
self.stopTimer('find_by_parent_name')
return item
def find_by_inode(self, inode_id):
self.startTimer()
cur = self.getCursor()
cur.execute(
"SELECT * FROM `%s` " % self.getName()+
" WHERE `inode_id`=%(inode)s",
{
"inode": inode_id
}
)
item = cur.fetchone()
self.stopTimer('find_by_inode')
return item
def get(self, node_id):
self.startTimer()
cur = self.getCursor()
cur.execute(
"SELECT * FROM `%s` " % self.getName()+
" WHERE `id`=%(node)s",
{
"node": node_id
}
)
item = cur.fetchone()
cur.close()
self.stopTimer('get')
return item
def get_children_inodes(self, parent_id):
self.startTimer()
cur = self.getCursor()
cur.execute(
"SELECT `inode_id` FROM `%s` " % self.getName()+
" WHERE `parent_id`=%(parent)s ORDER BY `id` ASC",
{
"parent": parent_id
}
)
_items = cur.fetchall()
items = ("%i" % _i["inode_id"] for _i in _items)
self.stopTimer('get_children_inodes')
return items
def get_children(self, parent_id, offset=0):
self.startTimer()
cur = self.getCursor()
cur.execute(
"SELECT `id`,`name_id`,`inode_id` FROM `%s` " % self.getName()+
" WHERE `parent_id`=%(parent)s AND `id`>%(offset)s ORDER BY `id` ASC",
{
"parent": parent_id,
"offset": offset
}
)
items = (item for item in cur)
self.stopTimer('get_children')
return items
def get_inodes_by_inodes(self, inode_ids):
self.startTimer()
iids = ()
id_str = ",".join(inode_ids)
if id_str:
cur = self.getCursor()
cur.execute("SELECT `inode_id` FROM `%s` " % self.getName()+
" WHERE `inode_id` IN (%s)" % (id_str,))
iids = set(str(item["inode_id"]) for item in cur)
self.stopTimer('get_inodes_by_inodes')
return iids
def get_inodes_by_inodes_intgen(self, id_str):
self.startTimer()
iids = ()
if id_str:
cur = self.getCursor()
cur.execute("SELECT `inode_id` FROM `%s` " % self.getName()+
" WHERE `inode_id` IN (%s)" % (id_str,))
iids = (item["inode_id"] for item in cur)
self.stopTimer('get_inodes_by_inodes_intgen')
return iids
def get_all_inodes_set(self):
self.startTimer()
cur = self.getCursor()
cur.execute("SELECT `inode_id` FROM `%s` " % self.getName())
iids = set(item["inode_id"] for item in cur)
self.stopTimer('get_all_inodes_set')
return iids
pass
|
|
from scipy.integrate import trapz
import numpy as np
import matplotlib.pyplot as plt
class Results:
"""Results of a simulation of a single particle cluster
The results contain the time-varying magnetisation and field resulting from
stochastic simulation of a particle cluster consisting of `N` particles.
Args:
time (np.ndarray): 1d array of length `M`. Time in seconds for
each sample in the results
field (np.ndarray): 1d array of length `M`. Field amplitude at each point
in time. Field is always applied along the z-axis.
x (dict): `{0: np.ndarray, ..., N-1: np.ndarray}` key, value pair is an interger
particle id and a corresponding 1d array of length `M` for each of the
`N` particles in the cluster. 1d array is the x-coordinate of the particle
magnetisation vector at each point in time.
y (dict): `{0: np.ndarray, ..., N-1: np.ndarray}` key, value pair is an interger
particle id and a corresponding 1d array of length `M` for each of the
`N` particles in the cluster. 1d array is the y-coordinate of the particle
magnetisation vector at each point in time.
z (dict): `{0: np.ndarray, ..., N-1: np.ndarray}` key, value pair is an interger
particle id and a corresponding 1d array of length `M` for each of the
`N` particles in the cluster. 1d array is the z-coordinate of the particle
magnetisation vector at each point in time.
N (int): number of particles in the ensemble
"""
def __init__(self, time, field, x, y, z, N):
self.time = time
self.field = field
self.x = x
self.y = y
self.z = z
self.N = N
def plot(self):
"""Plots the magnetisation from the results
Plots the x,y,z coordinates of the magnetisation vector for every particle
in the particle cluster.
Returns:
matplotlib figure handle containing the resulting plot axes.
"""
fg, axs = plt.subplots(nrows=self.N)
if self.N==1:
axs = [axs]
for idx in range(self.N):
axs[idx].plot(self.time, self.x[idx], label='x')
axs[idx].plot(self.time, self.y[idx], label='y')
axs[idx].plot(self.time, self.z[idx], label='z')
axs[idx].legend()
axs[idx].set_title('Particle {}'.format(idx))
axs[idx].set_xlabel('Reduced time [dimless]')
fg.tight_layout()
return fg
def magnetisation(self, direction='z'):
"""Computes the total magnetisation of the cluster
Computes the total time-varying magnetisation of the cluster in a desired
direction. The total magnetisation is simply the sum of the individual
magnetisation vector components in the specified direction (x,y, or z).
Args:
direction (str, optional): the direction of magnetisation `x`, `y` or `z`.
Default value is `z`.
Returns:
np.ndarray: 1d array of length `M` the total magnetisation at each point
in `self.time`.
"""
return np.sum([vals for vals in getattr(self, direction).values()], axis=0)
def final_state(self):
"""The state of the cluster at the end of the simulation.
Returns the state of the particle cluster at the end of the simulation time.
Returns:
dict: a nested dictionary `{'x': {0: m_x, ..., N-1: m_x}, 'y': ...}`
containing the final value of the magnetisation vector for each
of the `N` particles in the cluster.
"""
return {
'x': {k:v[-1] for k,v in self.x.items()},
'y': {k:v[-1] for k,v in self.y.items()},
'z': {k:v[-1] for k,v in self.z.items()}
}
class EnsembleResults:
"""Results from a simulation of an ensemble of particle clusters
The EnsembleResults object holds the resulting `magpy.Results` objects for
an ensemble of simulated particle clusters. It provides a user-friendly
alternative to handling a large collection of `magpy.Results` instances and
implemetns methods for computing ensemble-wide properties.
Args:
results (list[magpy.Results]): results for each particle cluster in the ensemble
Attributes:
time: (np.ndarray): 1d array of length `M`. Time in seconds for each sample
in the ensemble results.
field: (np.ndarray): 1d array of length `M`. Field amplitude at each point in time.
Field is always applied along the z-axis.
"""
def __init__(self, results):
self.results = results
self.time = results[0].time
self.field = results[0].field
def magnetisation(self, direction='z'):
"""Total magnetisation of each member of the ensemble
The total magnetisation of cluster is computed by summing the components of
the magnetisation vector for each particle in the cluster. The component (`x`,`y`,`z`)
along which the magnetisation may be specified. The default value is `z`,
which is the same direction as the applied magnetic field.
Args:
direction (str, optional): direction of magnetisation `x`, `y` or `z`.
Default value is `z`.
Returns:
list[np.ndarray]: list containing a length `M` 1d array containing
the total magnetisation of each particle cluster in the ensemble.
"""
return [res.magnetisation(direction) for res in self.results]
def ensemble_magnetisation(self, direction='z'):
"""Total magnetisation of entire ensemble
The total magnetisation of an ensemble of particle clusters. The ensemble
magnetisation is the average value of the magnetisation of each particle
particle cluster in the ensemble at each point in time. The component (`x`,`y`,`z`)
along which the magnetisation may be specified. The default value is `z`,
which is the same direction as the applied magnetic field.
Args:
direction (str, optional): direction of magnetisation `x`, `y` or `z`.
Default value is `z`.
Returns:
np.ndarray: 1d array of length `M` containing the ensemble magnetisation
for each point in `self.time`
"""
return np.sum(self.magnetisation(direction), axis=0) / len(self.results)
def final_state(self):
"""State of each ensemble member at the end of the simulation.
The final state of each particle cluster in the ensemble at the end
of the simulation time. The state of each particle cluster is the value
of magnetisation vector of every particle in the cluster.
Returns:
list[dict]: a list of nested dictionaries like `{'x': {0: m_x, ..., N-1: m_x}, 'y': ...}`.
The dictionaries contain the final value of the magnetisation vector for
each of the `N` particles in the cluster.
"""
return [res.final_state() for res in self.results]
def energy_dissipated(self, start_time=None, end_time=None):
"""Total energy dissipated by the ensemble.
A simulation with a constant or zero applied field will
dissipate no energy. The energy dissipated by an ensemble of
magnetic particle clusters subjected to an alternating field
is the area of the hysteresis loop (magnetisation-field
plane).
The energy dissipated may be computed for the entire simulation
or within a specific time window, defined by `start_time` and `end_time`
Args:
start_time (double, optional): the start of the time window for computing energy dissipated.
Default value `None` uses the start of the simulation.
end_time (double, optional): the end of the time window for computing energy dissipated.
Default value `None` uses the end of the simulation.
Returns:
double: total energy dissipated by the ensemble during the time window
"""
before_mask = (self.time >= start_time) if start_time is not None else True
after_mask = (self.time <= end_time) if end_time is not None else True
mask = before_mask & after_mask
return -get_mu0() * trapz(self.field[mask], self.ensemble_magnetisation()[mask])
def final_cycle_energy_dissipated(self, field_frequency):
"""Energy dissipated by the final cycle of the magnetic field.
A simulation with a constant or zero applied field will
dissipate no energy. The energy dissipated by an ensemble of
magnetic particle clusters subjected to an alternating field
is the area of the hysteresis loop (magnetisation-field
plane).
Use this function to compute the energy dissipated by the final
cycle (i.e. period) of the applied alternating magnetic field
if the total simulation time contains multiple cycles of the field
(i.e. is longer than the period of the applied field). A common
use case for this is to simulate a large number field cycles to
reach equilibrium and then compute the energy dissipated during a
single cycle of the field in equilibrium.
Args:
field_frequency (double): the frequency of the applied magnetic field
Returns:
double: energy dissipated during the last cycle of the applied magnetic field.
"""
T = 1./field_frequency
return self.energy_dissipated(start_time=self.time[-1] - T)
|
|
# -*- coding: utf-8 -*-
from gluon import *
from s3 import *
# =============================================================================
class S3MainMenuLayout(S3NavigationItem):
"""
Application Main Menu Layout
"""
@staticmethod
def layout(item):
""" Custom Layout Method """
# Manage flags: hide any disabled/unauthorized items
if not item.authorized:
item.enabled = False
item.visible = False
elif item.enabled is None or item.enabled:
item.enabled = True
item.visible = True
if item.enabled and item.visible:
if item.parent is None:
# The main menu
items = item.render_components()
return UL(items, _id="nav")
else:
if item.components:
# A submenu
items = item.render_components()
_class = item.selected and "highlight" or ""
return LI(A(item.label,
_href=item.url(),
_id=item.attr._id,
_class=_class),
UL(items, _class="sub-menu"))
else:
# A menu item
return LI(A(item.label,
_href=item.url(),
_id=item.attr._id))
else:
return None
# =============================================================================
class S3OptionsMenuLayout(S3NavigationItem):
"""
Controller Options Menu Layout
"""
@staticmethod
def layout(item):
""" Custom Layout Method """
# Manage flags: hide any disabled/unauthorized items
if not item.authorized:
enabled = False
visible = False
elif item.enabled is None or item.enabled:
enabled = True
visible = True
if enabled and visible:
if item.parent is not None:
if item.enabled and item.authorized:
if item.components:
# Submenu
_class = ""
if item.parent.parent is None and item.selected:
_class = "highlight"
items = item.render_components()
if items:
items = LI(UL(items, _class="menu-extention"))
return [LI(A(item.label,
_href=item.url(),
_id=item.attr._id,
_class=_class)), items]
else:
# Submenu item
if item.parent.parent is None:
_class = item.selected and "highlight" or ""
else:
_class = " "
return LI(A(item.label,
_href=item.url(),
_id=item.attr._id,
_class=_class))
else:
# Main menu
items = item.render_components()
return UL(items, _id="main-sub-menu", _class="sub-menu")
else:
return None
# =============================================================================
class S3LanguageMenuLayout(S3NavigationItem):
@staticmethod
def layout(item):
""" Language menu layout
options for each entry:
- lang_code: the language code
- lang_name: the language name
option for the menu
- current_language: code of the current language
"""
if item.enabled:
if item.components:
# The language menu itself
current_language = current.T.accepted_language
items = item.render_components()
select = SELECT(items, value=current_language,
_name="_language",
# @ToDo T:
_title="Language Selection",
_onchange="S3.reloadWithQueryStringVars({'_language':$(this).val()});")
form = FORM(select, _id="language_selector",
_name="_language",
_action="",
_method="get")
return form
else:
# A language entry
return OPTION(item.opts.lang_name,
_value=item.opts.lang_code)
else:
return None
# -------------------------------------------------------------------------
def check_enabled(self):
""" Check whether the language menu is enabled """
if current.deployment_settings.get_L10n_display_toolbar():
return True
else:
return False
# -----------------------------------------------------------------------------
# Shortcut
ML = S3LanguageMenuLayout
# =============================================================================
class S3PersonalMenuLayout(S3NavigationItem):
@staticmethod
def layout(item):
if item.parent is None:
# The menu
items = item.render_components()
if items:
return DIV(UL(items), _class="pmenu-wrapper")
else:
return "" # menu is empty
else:
# A menu item
if item.enabled and item.authorized:
return LI(A(item.label, _href=item.url()))
else:
return None
# -----------------------------------------------------------------------------
# Shortcut
MP = S3PersonalMenuLayout
# =============================================================================
class S3DashBoardMenuLayout(S3NavigationItem):
""" Layout for the bottom-menu (dashboard menu) """
@staticmethod
def layout(item):
T = current.T
if item.components:
items = item.render_components()
else:
items = None
if item.parent is None:
#return items
#elif item.parent.parent is None:
if items:
if item.attr._id is not None:
_id = item.attr._id
else:
_id = "sub-dashboard"
return UL(items, _id=_id)
else:
return ""
else:
if item.components:
return LI(A(H2(item.label),
UL(items),
IMG(_src=URL(c="static", f="themes",
args=["IFRC", "img", item.opts.image]),
_alt=T(item.opts.title)),
_href=item.url()))
elif item.opts.text:
return LI(A(H2(item.label),
P(T(item.opts.text)),
IMG(_src=URL(c="static", f="themes",
args=["IFRC", "img", item.opts.image]),
_alt=item.opts.image),
_href=item.url()))
else:
return LI(A(item.label, _href=item.url()))
# -----------------------------------------------------------------------------
# Shortcut
DB = S3DashBoardMenuLayout
# =============================================================================
class S3OrgMenuLayout(S3NavigationItem):
""" Layout for the organisation-specific menu """
@staticmethod
def layout(item):
name = "IFRC"
logo = None
# Lookup Root Organisation name & Logo
root_org = current.auth.root_org()
if root_org:
s3db = current.s3db
table = s3db.org_organisation
record = current.db(table.id == root_org).select(table.name,
table.acronym,
table.logo,
limitby = (0, 1),
cache = s3db.cache,
).first()
if record:
if record.acronym:
name = _name = record.acronym
else:
_name = record.name
names = _name.split(" ")
names_with_breaks = []
nappend = names_with_breaks.append
for name in names:
nappend(name)
nappend(BR())
# Remove last BR()
names_with_breaks.pop()
name = TAG[""](*names_with_breaks)
if record.logo:
size = (60, None)
image = s3db.pr_image_library_represent(record.logo, size=size)
url_small = URL(c="default", f="download", args=image)
alt = "%s logo" % _name
logo = IMG(_src=url_small,
_alt=alt,
_width=60,
)
if not logo:
# Default to generic IFRC
logo = IMG(_src="/%s/static/themes/IFRC/img/dummy_flag.png" %
current.request.application,
_alt=current.T("Red Cross/Red Crescent"),
_width=60,
)
# Note: render using current.menu.org.render()[0] + current.menu.org.render()[1]
return (name, logo)
# -----------------------------------------------------------------------------
# Shortcut
OM = S3OrgMenuLayout
# END =========================================================================
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for as_string_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class AsStringOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testFloat(self):
float_inputs_ = [
0, 1, -1, 0.5, 0.25, 0.125, float("INF"), float("NAN"), float("-INF")
]
with self.cached_session():
for dtype in (dtypes.float32, dtypes.float64):
input_ = array_ops.placeholder(dtype)
output = string_ops.as_string(input_, shortest=True)
result = output.eval(feed_dict={input_: float_inputs_})
s = lambda strs: [x.decode("ascii") for x in strs]
self.assertAllEqual(s(result), ["%g" % x for x in float_inputs_])
output = string_ops.as_string(input_, scientific=True)
result = output.eval(feed_dict={input_: float_inputs_})
self.assertAllEqual(s(result), ["%e" % x for x in float_inputs_])
output = string_ops.as_string(input_)
result = output.eval(feed_dict={input_: float_inputs_})
self.assertAllEqual(s(result), ["%f" % x for x in float_inputs_])
output = string_ops.as_string(input_, width=3)
result = output.eval(feed_dict={input_: float_inputs_})
self.assertAllEqual(s(result), ["%3f" % x for x in float_inputs_])
output = string_ops.as_string(input_, width=3, fill="0")
result = output.eval(feed_dict={input_: float_inputs_})
self.assertAllEqual(s(result), ["%03f" % x for x in float_inputs_])
output = string_ops.as_string(input_, width=3, fill="0", shortest=True)
result = output.eval(feed_dict={input_: float_inputs_})
self.assertAllEqual(s(result), ["%03g" % x for x in float_inputs_])
output = string_ops.as_string(input_, precision=10, width=3)
result = output.eval(feed_dict={input_: float_inputs_})
self.assertAllEqual(s(result), ["%03.10f" % x for x in float_inputs_])
output = string_ops.as_string(
input_, precision=10, width=3, fill="0", shortest=True)
result = output.eval(feed_dict={input_: float_inputs_})
self.assertAllEqual(s(result), ["%03.10g" % x for x in float_inputs_])
with self.assertRaisesOpError("Cannot select both"):
output = string_ops.as_string(input_, scientific=True, shortest=True)
output.eval(feed_dict={input_: float_inputs_})
with self.assertRaisesOpError("Fill string must be one or fewer"):
output = string_ops.as_string(input_, fill="ab")
output.eval(feed_dict={input_: float_inputs_})
@test_util.run_deprecated_v1
def testInt(self):
# Cannot use values outside -128..127 for test, because we're also
# testing int8
int_inputs_ = [0, -1, 1, -128, 127, -101, 101, -0]
s = lambda strs: [x.decode("ascii") for x in strs]
with self.cached_session():
for dtype in (dtypes.int32, dtypes.int64, dtypes.int8):
input_ = array_ops.placeholder(dtype)
output = string_ops.as_string(input_)
result = output.eval(feed_dict={input_: int_inputs_})
self.assertAllEqual(s(result), ["%d" % x for x in int_inputs_])
output = string_ops.as_string(input_, width=3)
result = output.eval(feed_dict={input_: int_inputs_})
self.assertAllEqual(s(result), ["%3d" % x for x in int_inputs_])
output = string_ops.as_string(input_, width=3, fill="0")
result = output.eval(feed_dict={input_: int_inputs_})
self.assertAllEqual(s(result), ["%03d" % x for x in int_inputs_])
with self.assertRaisesOpError("scientific and shortest"):
output = string_ops.as_string(input_, scientific=True)
output.eval(feed_dict={input_: int_inputs_})
with self.assertRaisesOpError("scientific and shortest"):
output = string_ops.as_string(input_, shortest=True)
output.eval(feed_dict={input_: int_inputs_})
with self.assertRaisesOpError("precision not supported"):
output = string_ops.as_string(input_, precision=0)
output.eval(feed_dict={input_: int_inputs_})
@test_util.run_deprecated_v1
def testLargeInt(self):
# Cannot use values outside -128..127 for test, because we're also
# testing int8
s = lambda strs: [x.decode("ascii") for x in strs]
with self.cached_session():
input_ = array_ops.placeholder(dtypes.int32)
int_inputs_ = [np.iinfo(np.int32).min, np.iinfo(np.int32).max]
output = string_ops.as_string(input_)
result = output.eval(feed_dict={input_: int_inputs_})
self.assertAllEqual(s(result), ["%d" % x for x in int_inputs_])
input_ = array_ops.placeholder(dtypes.int64)
int_inputs_ = [np.iinfo(np.int64).min, np.iinfo(np.int64).max]
output = string_ops.as_string(input_)
result = output.eval(feed_dict={input_: int_inputs_})
self.assertAllEqual(s(result), ["%d" % x for x in int_inputs_])
@test_util.run_deprecated_v1
def testHalfInt(self):
s = lambda strs: [x.decode("ascii") for x in strs]
with self.cached_session():
input_ = array_ops.placeholder(dtypes.int16)
int_inputs_ = [np.iinfo(np.int16).min, np.iinfo(np.int16).max]
output = string_ops.as_string(input_)
result = output.eval(feed_dict={input_: int_inputs_})
self.assertAllEqual(s(result), ["%d" % x for x in int_inputs_])
@test_util.run_deprecated_v1
def testBool(self):
bool_inputs_ = [False, True]
s = lambda strs: [x.decode("ascii") for x in strs]
with self.cached_session():
for dtype in (dtypes.bool,):
input_ = array_ops.placeholder(dtype)
output = string_ops.as_string(input_)
result = output.eval(feed_dict={input_: bool_inputs_})
self.assertAllEqual(s(result), ["false", "true"])
@test_util.run_deprecated_v1
def testComplex(self):
float_inputs_ = [
0, 1, -1, 0.5, 0.25, 0.125, complex("INF"), complex("NAN"),
complex("-INF")
]
complex_inputs_ = [(x + (x + 1) * 1j) for x in float_inputs_]
with self.cached_session():
for dtype in (dtypes.complex64, dtypes.complex128):
input_ = array_ops.placeholder(dtype)
def clean_nans(s_l):
return [s.decode("ascii").replace("-nan", "nan") for s in s_l]
output = string_ops.as_string(input_, shortest=True)
result = output.eval(feed_dict={input_: complex_inputs_})
self.assertAllEqual(
clean_nans(result),
["(%g,%g)" % (x.real, x.imag) for x in complex_inputs_])
output = string_ops.as_string(input_, scientific=True)
result = output.eval(feed_dict={input_: complex_inputs_})
self.assertAllEqual(
clean_nans(result),
["(%e,%e)" % (x.real, x.imag) for x in complex_inputs_])
output = string_ops.as_string(input_)
result = output.eval(feed_dict={input_: complex_inputs_})
self.assertAllEqual(
clean_nans(result),
["(%f,%f)" % (x.real, x.imag) for x in complex_inputs_])
output = string_ops.as_string(input_, width=3)
result = output.eval(feed_dict={input_: complex_inputs_})
self.assertAllEqual(
clean_nans(result),
["(%03f,%03f)" % (x.real, x.imag) for x in complex_inputs_])
output = string_ops.as_string(input_, width=3, fill="0", shortest=True)
result = output.eval(feed_dict={input_: complex_inputs_})
self.assertAllEqual(
clean_nans(result),
["(%03g,%03g)" % (x.real, x.imag) for x in complex_inputs_])
output = string_ops.as_string(input_, precision=10, width=3)
result = output.eval(feed_dict={input_: complex_inputs_})
self.assertAllEqual(
clean_nans(result),
["(%03.10f,%03.10f)" % (x.real, x.imag) for x in complex_inputs_])
output = string_ops.as_string(
input_, precision=10, width=3, fill="0", shortest=True)
result = output.eval(feed_dict={input_: complex_inputs_})
self.assertAllEqual(
clean_nans(result),
["(%03.10g,%03.10g)" % (x.real, x.imag) for x in complex_inputs_])
with self.assertRaisesOpError("Cannot select both"):
output = string_ops.as_string(input_, scientific=True, shortest=True)
output.eval(feed_dict={input_: complex_inputs_})
if __name__ == "__main__":
test.main()
|
|
#!/usr/bin/python
#################################################################################################
#################################################################################################
#
# To create a schema with 1 million subscribers, first create the schema and 1M subscribers:
#
# $ ./filldb.py --subscribers=1000000 --max-inserts 50000 | mysql -u root --password=password
#
# **************
# ** NOTE **
# **************
#
# If you receive either of the following errors:
# IOError: [Errno 32] Broken pipe
# or
# ERROR 2006 (HY000) at line 7406: MySQL server has gone away
# Try increasing the value of the [mysqld] / max_alllowed_packet entry
# in /etc/mysql/my.cnf and restart mysql
# or executing this on the server 'set global max_allowed_packet=1073741824;'
#
#################################################################################################
#################################################################################################
import sys
import argparse
import calendar
import datetime
import random
from faker import Faker
fake = Faker()
fake_IN = Faker('hi_IN')
parser = argparse.ArgumentParser()
parser.add_argument("--file-name", help="the file to write output to", default="nms.sql", dest='file_name')
parser.add_argument("--database", help="database name", default="nms")
parser.add_argument("--subscribers", help="number of subscribers", default=1000, type=int)
parser.add_argument("--channels", help="number of channels", default=2, type=int)
parser.add_argument("--subscription_packs", help="number of subscription packs", default=2, type=int)
parser.add_argument("--operators", help="number of operators", default=6, type=int)
parser.add_argument("--campaigns", help="number of campaigns", default=2, type=int)
parser.add_argument("--hours", help="number of hours", default=24, type=int)
parser.add_argument("--minutes", help="number of minutes", default=60, type=int)
parser.add_argument("--states", help="number of states", default=40, type=int)
parser.add_argument("--districts", help="number of districts", default=10, type=int)
parser.add_argument("--blocks", help="number of blocks", default=10, type=int)
parser.add_argument("--years", help="number of years", default=4, type=int)
parser.add_argument("--max-inserts", help="max number of items to insert at a time", default=1000, type=int, dest='max_inserts')
args = parser.parse_args()
if __name__ == '__main__':
max_time_dimension_id = 0
f = sys.stdout
f.write("drop schema if exists {};\n".format(args.database))
f.write("create schema {};\n".format(args.database))
f.write("use {};\n".format(args.database))
f.write("""
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
/*!40101 SET NAMES utf8 */;
/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
/*!40103 SET TIME_ZONE='+00:00' */;
/*!40014 SET @OLD_AUTOCOMMIT=@@AUTOCOMMIT, AUTOCOMMIT=0 */;
/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
CREATE TABLE `time_dimension` (
`id` INT NOT NULL AUTO_INCREMENT,
`day` SMALLINT,
`week` TINYINT,
`month` TINYINT,
`year` SMALLINT,
`date` DATE,
PRIMARY KEY (`id`),
INDEX `ymd` (`year` ASC, `month` ASC, `day` ASC),
UNIQUE INDEX `dwmy` (`day` ASC, `week` ASC, `month` ASC, `year` ASC)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
CREATE TABLE `channel_dimension` (
`id` INT NOT NULL AUTO_INCREMENT,
`channel` VARCHAR(255),
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
CREATE TABLE `subscription_pack_dimension` (
`id` INT NOT NULL AUTO_INCREMENT,
`subscription_pack` VARCHAR(255),
`subscription_pack_alias` VARCHAR(255),
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
CREATE TABLE `operator_dimension` (
`id` INT NOT NULL AUTO_INCREMENT,
`operator` VARCHAR(255) NOT NULL,
`start_pulse` INT,
`end_pulse` INT,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
CREATE TABLE `location_dimension` (
`id` INT NOT NULL AUTO_INCREMENT,
`district` VARCHAR(255) NOT NULL,
`block` VARCHAR(255) NOT NULL,
`panchayat` VARCHAR(255) NOT NULL,
`status` VARCHAR(36),
`last_modified_time` TIMESTAMP,
`alternate_location` VARCHAR(10),
`state` VARCHAR(255) NOT NULL,
PRIMARY KEY (`id`),
UNIQUE INDEX `sdbp` (`state` ASC, `district` ASC, `block` ASC, `panchayat` ASC)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
CREATE TABLE `hour_dimension` (
`id` INT NOT NULL AUTO_INCREMENT,
`hour_of_day` INT,
`minute_of_hour` INT,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
CREATE TABLE `campaign_dimension` (
`id` INT NOT NULL AUTO_INCREMENT,
`campaign_id` VARCHAR(45),
`obd_message_duration` INT,
`inbox_message_duration` INT,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
CREATE TABLE `subscribers` (
`id` INT NOT NULL AUTO_INCREMENT,
`name` VARCHAR(255),
`age_of_beneficiary` INT,
`estimated_date_of_delivery` DATE,
`channel_id` INT,
`location_id` INT,
`time_id` INT,
`operator_id` INT,
`start_week_number` INT,
`last_modified_time` TIMESTAMP,
PRIMARY KEY (`id`),
INDEX `subscribers_location_id_idx` (`location_id` ASC),
CONSTRAINT `fk_subscribers_channel_dimension`
FOREIGN KEY (`channel_id`)
REFERENCES `channel_dimension` (`id`),
CONSTRAINT `fk_subscribers_location_dimension`
FOREIGN KEY (`location_id`)
REFERENCES `location_dimension` (`id`),
CONSTRAINT `fk_subscribers_time_dimension`
FOREIGN KEY (`time_id`)
REFERENCES `time_dimension` (`id`),
CONSTRAINT `fk_subscribers_operator_dimension`
FOREIGN KEY (`operator_id`)
REFERENCES `operator_dimension` (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
CREATE TABLE `subscriptions` (
`id` INT NOT NULL AUTO_INCREMENT,
`subscriber_id` INT NOT NULL,
`subscription_pack_id` INT,
`channel_id` INT NOT NULL,
`operator_id` INT,
`time_id` INT NOT NULL,
`subscription_id` varchar(255),
`last_modified_time` timestamp,
`subscription_status` varchar(255),
`start_date` timestamp,
`old_subscription_id` INT,
`msisdn` varchar(15) NOT NULL,
`last_scheduled_message_date` timestamp,
`message_campaign_pack` varchar(255),
`referred_by_flw_msisdn` varchar(10),
`referred_by_flag` bit(1) DEFAULT b'0',
PRIMARY KEY (`id`),
INDEX `msisdn` (`msisdn`),
CONSTRAINT `fk_subscriptions_subscribers`
FOREIGN KEY (`subscriber_id`)
REFERENCES `subscribers` (`id`),
CONSTRAINT `fk_subscriptions_subscription_pack_dimension`
FOREIGN KEY (`subscription_pack_id`)
REFERENCES `subscription_pack_dimension` (`id`),
CONSTRAINT `fk_subscriptions_channel_dimension`
FOREIGN KEY (`channel_id`)
REFERENCES `channel_dimension` (`id`),
CONSTRAINT `fk_subscriptions_operator_dimension`
FOREIGN KEY (`operator_id`)
REFERENCES `operator_dimension` (`id`),
CONSTRAINT `fk_subscriptions_time_dimension`
FOREIGN KEY (`time_id`)
REFERENCES `time_dimension` (`id`),
CONSTRAINT `fk_subscriptions_old_subscription`
FOREIGN KEY (`old_subscription_id`)
REFERENCES `subscriptions` (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
CREATE TABLE `subscription_status_measure` (
`id` INT NOT NULL AUTO_INCREMENT,
`subscription_id` INT NOT NULL,
`status` VARCHAR(255),
`week_number` INT,
`channel_id` INT,
`operator_id` INT,
`subscription_pack_id` INT,
`remarks` VARCHAR(255),
`grace_count` INT,
`time_id` INT,
`hour_id` INT,
`last_modified_time` TIMESTAMP,
`mode` VARCHAR(255),
PRIMARY KEY (`id`),
INDEX `lastmodifiedtime_subscriptionid` (`last_modified_time` ASC, `subscription_id` ASC),
INDEX `subscriptionid_status` (`subscription_id` ASC, `status` ASC),
CONSTRAINT `subscription_status_measure_subscription_id_fkey`
FOREIGN KEY (`subscription_id`)
REFERENCES `subscriptions` (`id`),
CONSTRAINT `fk_subscription_status_measure_channel_dimension`
FOREIGN KEY (`channel_id`)
REFERENCES `channel_dimension` (`id`),
CONSTRAINT `fk_subscription_status_measure_operator_dimension`
FOREIGN KEY (`operator_id`)
REFERENCES `operator_dimension` (`id`),
CONSTRAINT `fk_subscription_status_measure_subscription_pack_dimension`
FOREIGN KEY (`subscription_pack_id`)
REFERENCES `subscription_pack_dimension` (`id`),
CONSTRAINT `fk_subscription_status_measure_time_dimension`
FOREIGN KEY (`time_id`)
REFERENCES `time_dimension` (`id`),
CONSTRAINT `fk_subscription_status_measure_hour_dimension`
FOREIGN KEY (`hour_id`)
REFERENCES `hour_dimension` (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
CREATE TABLE `subscriber_call_measure` (
`id` INT NOT NULL AUTO_INCREMENT,
`subscription_id` INT NOT NULL,
`call_status` VARCHAR(255),
`duration` INT,
`operator_id` INT,
`subscription_pack_id` INT,
`service_option` VARCHAR(255),
`percentage_listened` TINYINT,
`campaign_id` INT,
`start_date` INT,
`end_date` INT,
`start_time` INT,
`end_time` INT,
`call_source` VARCHAR(255),
`subscription_status` VARCHAR(255),
`duration_in_pulse` INT,
PRIMARY KEY (`id`),
CONSTRAINT `fk_subscriber_call_measure_operator_dimension`
FOREIGN KEY (`operator_id`)
REFERENCES `operator_dimension` (`id`),
CONSTRAINT `fk_subscriber_call_measure_subscription_pack_dimension`
FOREIGN KEY (`subscription_pack_id`)
REFERENCES `subscription_pack_dimension` (`id`),
CONSTRAINT `fk_subscriber_call_measure_campaign_dimension`
FOREIGN KEY (`campaign_id`)
REFERENCES `campaign_dimension` (`id`),
CONSTRAINT `subscriber_call_measure_start_date_fkey`
FOREIGN KEY (`start_date`)
REFERENCES `time_dimension` (`id`),
CONSTRAINT `subscriber_call_measure_start_time_fkey`
FOREIGN KEY (`start_time`)
REFERENCES `hour_dimension` (`id`),
CONSTRAINT `subscriber_call_measure_end_date_fkey`
FOREIGN KEY (`end_date`)
REFERENCES `time_dimension` (`id`),
CONSTRAINT `subscriber_call_measure_end_time_fkey`
FOREIGN KEY (`end_time`)
REFERENCES `hour_dimension` (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
""")
f.write('LOCK TABLES time_dimension WRITE;\n')
f.write('/*!40000 ALTER TABLE `time_dimension` DISABLE KEYS */;\n')
f.write('INSERT INTO time_dimension (day, week, month, year, date) VALUES\n')
sep = ","
this_year = datetime.date.today().year
for year in range(this_year, this_year+args.years+1):
day = 1
week = 1
for month in range(1, 13):
last_day_of_month = calendar.monthrange(year, month)[1]
for day_of_month in range(1, last_day_of_month+1):
max_time_dimension_id += 1
if year == this_year+args.years and month == 12 and day_of_month == last_day_of_month:
sep = ";"
f.write('({},{},{},{},"{}-{}-{}"){}\n'.format(day, week, month, year, year, month, day_of_month, sep))
day += 1
if day % 7 == 0:
week += 1
f.write('/*!40000 ALTER TABLE `time_dimension` ENABLE KEYS */;\n')
f.write('UNLOCK TABLES;\n')
f.write('LOCK TABLES campaign_dimension WRITE;\n')
f.write('/*!40000 ALTER TABLE `campaign_dimension` DISABLE KEYS */;\n')
f.write('INSERT INTO campaign_dimension (campaign_id) VALUES\n')
for campaign in range(1, args.campaigns+1):
f.write('("campaign{}"){}\n'.format(campaign, ';' if campaign == args.campaigns else ','))
f.write('/*!40000 ALTER TABLE `campaign_dimension` ENABLE KEYS */;\n')
f.write('UNLOCK TABLES;\n')
f.write('LOCK TABLES channel_dimension WRITE;\n')
f.write('/*!40000 ALTER TABLE `channel_dimension` DISABLE KEYS */;\n')
f.write('INSERT INTO channel_dimension (channel) VALUES\n')
for channel in range(1, args.channels+1):
f.write('("channel{}"){}\n'.format(channel, ';' if channel == args.channels else ','))
f.write('/*!40000 ALTER TABLE `channel_dimension` ENABLE KEYS */;\n')
f.write('UNLOCK TABLES;\n')
f.write('LOCK TABLES hour_dimension WRITE;\n')
f.write('/*!40000 ALTER TABLE `hour_dimension` DISABLE KEYS */;\n')
f.write('INSERT INTO hour_dimension (hour_of_day, minute_of_hour) VALUES\n')
count_hours = args.hours * args.minutes
count = 1
for hour in range(0, args.hours):
for minute in range(0, args.minutes):
f.write('({},{}){}\n'.format(hour, minute, ';' if count == count_hours else ','))
count += 1
f.write('/*!40000 ALTER TABLE `hour_dimension` ENABLE KEYS */;\n')
f.write('UNLOCK TABLES;\n')
f.write('LOCK TABLES location_dimension WRITE;\n')
f.write('/*!40000 ALTER TABLE `location_dimension` DISABLE KEYS */;\n')
f.write('INSERT INTO location_dimension (state, district, block, panchayat) VALUES\n')
count = 1
count_dimensions = args.states*args.districts*args.blocks
for state in range(1, args.states+1):
for district in range(1, args.districts+1):
for block in range(1, args.blocks+1):
f.write('("state{}","district{}","block{}","panchayat{}"){}\n'.format(state, district, block, 1, ';' if count == count_dimensions else ','))
count += 1
f.write('/*!40000 ALTER TABLE `location_dimension` ENABLE KEYS */;\n')
f.write('UNLOCK TABLES;\n')
f.write('LOCK TABLES operator_dimension WRITE;\n')
f.write('/*!40000 ALTER TABLE `operator_dimension` DISABLE KEYS */;\n')
f.write('INSERT INTO operator_dimension (operator) VALUES\n')
for operator in range(1,args.operators+1):
f.write('("operator{}"){}\n'.format(operator, ';' if operator == args.operators else ','))
f.write('/*!40000 ALTER TABLE `operator_dimension` ENABLE KEYS */;\n')
f.write('UNLOCK TABLES;\n')
f.write('LOCK TABLES subscription_pack_dimension WRITE;\n')
f.write('/*!40000 ALTER TABLE `subscription_pack_dimension` DISABLE KEYS */;\n')
f.write('INSERT INTO subscription_pack_dimension (subscription_pack) VALUES\n')
for subscription_pack in range(1, args.subscription_packs+1):
f.write('("subscription_pack{}"){}\n'.format(subscription_pack, ';' if subscription_pack == args.subscription_packs else ','))
f.write('/*!40000 ALTER TABLE `subscription_pack_dimension` ENABLE KEYS */;\n')
f.write('UNLOCK TABLES;\n')
insert_count = 0
f.write('LOCK TABLES subscribers WRITE;\n')
f.write('/*!40000 ALTER TABLE `subscribers` DISABLE KEYS */;\n')
for subscriber in range(1, args.subscribers+1):
if insert_count == 0:
f.write('INSERT INTO subscribers (name) VALUES\n')
f.write('("{}"){}\n'.format(fake.name(),
';' if insert_count == args.max_inserts or subscriber == args.subscribers else ','))
if insert_count == args.max_inserts or subscriber == args.subscribers:
insert_count = 0
else:
insert_count += 1
f.write('/*!40000 ALTER TABLE `subscribers` ENABLE KEYS */;\n')
f.write('UNLOCK TABLES;\n')
count = 1
insert_count = 0
count_subscriptions = args.subscribers*args.subscription_packs
f.write('LOCK TABLES subscriptions WRITE;\n')
f.write('/*!40000 ALTER TABLE `subscriptions` DISABLE KEYS */;\n')
for subscription_pack_id in range(1, args.subscription_packs+1):
for subscriber_id in range(1, args.subscribers+1):
if insert_count == 0:
f.write('INSERT INTO subscriptions (subscriber_id, subscription_pack_id, channel_id, time_id, msisdn) VALUES\n')
f.write('({},{},{},{},"{}"){}\n'.format(subscriber_id, subscription_pack_id,
random.randint(1, args.channels),
random.randint(1, max_time_dimension_id),
fake_IN.phone_number(),
';' if insert_count == args.max_inserts or count == count_subscriptions else ','))
if insert_count == args.max_inserts or count == count_subscriptions:
insert_count = 0
else:
insert_count += 1
count += 1
f.write('/*!40000 ALTER TABLE `subscriptions` ENABLE KEYS */;\n')
f.write('UNLOCK TABLES;\n')
insert_count = 0
count = 1
count_subscriptions = args.subscribers*args.subscription_packs
f.write('LOCK TABLES subscription_status_measure WRITE;\n')
f.write('/*!40000 ALTER TABLE `subscription_status_measure` DISABLE KEYS */;\n')
for subscription_pack_id in range(1, args.subscription_packs+1):
for subscriber_id in range(1, args.subscribers+1):
if insert_count == 0:
f.write('INSERT INTO subscription_status_measure (subscription_id, status, week_number, channel_id, operator_id, subscription_pack_id, remarks, time_id, hour_id) VALUES\n')
f.write('({},"{}",{},{},{},{},{},{},{}),\n'.format(count,
'PENDING',
random.randint(0, 40),
random.randint(1, args.channels),
random.randint(1, args.operators),
subscription_pack_id,
'"' + fake.text(max_nb_chars=200) + '"' if random.randint(0, 5) == 0 else "null",
random.randint(1, max_time_dimension_id/3),
random.randint(1, count_hours)))
f.write('({},"{}",{},{},{},{},{},{},{}),\n'.format(count,
'ACTIVE',
random.randint(0, 40),
random.randint(1, args.channels),
random.randint(1, args.operators),
subscription_pack_id,
'"' + fake.text(max_nb_chars=200) + '"' if random.randint(0, 5) == 0 else "null",
random.randint(max_time_dimension_id/3,
2*(max_time_dimension_id/3)),
random.randint(1, count_hours)))
f.write('({},"{}",{},{},{},{},{},{},{}){}\n'.format(count,
random.choice(['DISABLED',
'COMPLETE']),
random.randint(0, 40),
random.randint(1, args.channels),
random.randint(1, args.operators),
subscription_pack_id,
'"' + fake.text(max_nb_chars=200) + '"' if random.randint(0, 5) == 0 else "null",
random.randint(2*(max_time_dimension_id/3),
max_time_dimension_id),
random.randint(1, count_hours),
';' if insert_count >= args.max_inserts or count == count_subscriptions else ','))
if insert_count >= args.max_inserts or count == count_subscriptions:
insert_count = 0
else:
insert_count += 3
count += 1
f.write('/*!40000 ALTER TABLE `subscription_status_measure` ENABLE KEYS */;\n')
f.write('UNLOCK TABLES;\n')
insert_count = 0
sub_count = 1
f.write('LOCK TABLES subscriber_call_measure WRITE;\n')
f.write('/*!40000 ALTER TABLE `subscriber_call_measure` DISABLE KEYS */;\n')
for subscription_pack_id in range(1, args.subscription_packs+1):
for subscriber_id in range(1, args.subscribers+1):
subscription_id = sub_count
operator_id = random.randint(1, args.operators)
campaign_id = random.randint(1, args.campaigns)
# I assume half the users will be in the 42 week pack and the other half in the 72
if random.randint(0,1):
min_calls = 42
max_calls = 42 * 3
else:
min_calls = 72
max_calls = 72 * 3
call_count = random.randint(min_calls, max_calls)
for count in range(1, call_count+1):
if insert_count == 0:
f.write('INSERT INTO subscriber_call_measure (subscription_id, call_status, duration, operator_id, subscription_pack_id, percentage_listened, campaign_id, start_date, end_date, start_time, end_time, subscription_status, duration_in_pulse) VALUES\n')
# Some calls may span an hour boundary. I'm not going to model that
start_date = random.randint(1, max_time_dimension_id)
end_date = start_date
start_hour = random.randint(1, count_hours-2)
end_hour = start_hour + 2
f.write('({},"{}",{},{},{},{},{},{},{},{},{},"{}",{}){}\n'
.format(subscription_id,
random.choice(['SUCCESS', 'NOT_ANSWERED', 'SWITCHED_OFF', 'OTHER']),
random.randint(1, 120),
operator_id,
subscription_pack_id,
random.randint(1, 11),
campaign_id,
start_date,
end_date,
start_hour,
end_hour,
random.choice(['PENDING', 'ACITVE', 'DISABLED', 'CANCELLED']),
random.randint(1, 60),
';' if insert_count == args.max_inserts or count == call_count else ','))
if insert_count == args.max_inserts or count == call_count:
insert_count = 0
else:
insert_count += 1
sub_count += 1
f.write('/*!40000 ALTER TABLE `subscriber_call_measure` ENABLE KEYS */;\n')
f.write('UNLOCK TABLES;\n')
f.write('COMMIT;\n')
f.write("""
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
/*!40014 SET AUTOCOMMIT=@OLD_AUTOCOMMIT */;
/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
""")
|
|
import collections
import operator
import pytest
from pandas.compat import PY36
import pandas as pd
from pandas.tests.extension import base
import pandas.util.testing as tm
from .array import JSONArray, JSONDtype, make_data
@pytest.fixture
def dtype():
return JSONDtype()
@pytest.fixture
def data():
"""Length-100 PeriodArray for semantics test."""
data = make_data()
# Why the while loop? NumPy is unable to construct an ndarray from
# equal-length ndarrays. Many of our operations involve coercing the
# EA to an ndarray of objects. To avoid random test failures, we ensure
# that our data is coercible to an ndarray. Several tests deal with only
# the first two elements, so that's what we'll check.
while len(data[0]) == len(data[1]):
data = make_data()
return JSONArray(data)
@pytest.fixture
def data_missing():
"""Length 2 array with [NA, Valid]"""
return JSONArray([{}, {'a': 10}])
@pytest.fixture
def data_for_sorting():
return JSONArray([{'b': 1}, {'c': 4}, {'a': 2, 'c': 3}])
@pytest.fixture
def data_missing_for_sorting():
return JSONArray([{'b': 1}, {}, {'a': 4}])
@pytest.fixture
def na_value(dtype):
return dtype.na_value
@pytest.fixture
def na_cmp():
return operator.eq
@pytest.fixture
def data_for_grouping():
return JSONArray([
{'b': 1}, {'b': 1},
{}, {},
{'a': 0, 'c': 2}, {'a': 0, 'c': 2},
{'b': 1},
{'c': 2},
])
class BaseJSON:
# NumPy doesn't handle an array of equal-length UserDicts.
# The default assert_series_equal eventually does a
# Series.values, which raises. We work around it by
# converting the UserDicts to dicts.
def assert_series_equal(self, left, right, **kwargs):
if left.dtype.name == 'json':
assert left.dtype == right.dtype
left = pd.Series(JSONArray(left.values.astype(object)),
index=left.index, name=left.name)
right = pd.Series(JSONArray(right.values.astype(object)),
index=right.index, name=right.name)
tm.assert_series_equal(left, right, **kwargs)
def assert_frame_equal(self, left, right, *args, **kwargs):
tm.assert_index_equal(
left.columns, right.columns,
exact=kwargs.get('check_column_type', 'equiv'),
check_names=kwargs.get('check_names', True),
check_exact=kwargs.get('check_exact', False),
check_categorical=kwargs.get('check_categorical', True),
obj='{obj}.columns'.format(obj=kwargs.get('obj', 'DataFrame')))
jsons = (left.dtypes == 'json').index
for col in jsons:
self.assert_series_equal(left[col], right[col],
*args, **kwargs)
left = left.drop(columns=jsons)
right = right.drop(columns=jsons)
tm.assert_frame_equal(left, right, *args, **kwargs)
class TestDtype(BaseJSON, base.BaseDtypeTests):
pass
class TestInterface(BaseJSON, base.BaseInterfaceTests):
def test_custom_asserts(self):
# This would always trigger the KeyError from trying to put
# an array of equal-length UserDicts inside an ndarray.
data = JSONArray([collections.UserDict({'a': 1}),
collections.UserDict({'b': 2}),
collections.UserDict({'c': 3})])
a = pd.Series(data)
self.assert_series_equal(a, a)
self.assert_frame_equal(a.to_frame(), a.to_frame())
b = pd.Series(data.take([0, 0, 1]))
with pytest.raises(AssertionError):
self.assert_series_equal(a, b)
with pytest.raises(AssertionError):
self.assert_frame_equal(a.to_frame(), b.to_frame())
class TestConstructors(BaseJSON, base.BaseConstructorsTests):
@pytest.mark.skip(reason="not implemented constructor from dtype")
def test_from_dtype(self, data):
# construct from our dtype & string dtype
pass
class TestReshaping(BaseJSON, base.BaseReshapingTests):
@pytest.mark.skip(reason="Different definitions of NA")
def test_stack(self):
"""
The test does .astype(object).stack(). If we happen to have
any missing values in `data`, then we'll end up with different
rows since we consider `{}` NA, but `.astype(object)` doesn't.
"""
@pytest.mark.xfail(reason="dict for NA")
def test_unstack(self, data, index):
# The base test has NaN for the expected NA value.
# this matches otherwise
return super().test_unstack(data, index)
class TestGetitem(BaseJSON, base.BaseGetitemTests):
pass
class TestMissing(BaseJSON, base.BaseMissingTests):
@pytest.mark.skip(reason="Setting a dict as a scalar")
def test_fillna_series(self):
"""We treat dictionaries as a mapping in fillna, not a scalar."""
@pytest.mark.skip(reason="Setting a dict as a scalar")
def test_fillna_frame(self):
"""We treat dictionaries as a mapping in fillna, not a scalar."""
unhashable = pytest.mark.skip(reason="Unhashable")
unstable = pytest.mark.skipif(not PY36, # 3.6 or higher
reason="Dictionary order unstable")
class TestReduce(base.BaseNoReduceTests):
pass
class TestMethods(BaseJSON, base.BaseMethodsTests):
@unhashable
def test_value_counts(self, all_data, dropna):
pass
@unhashable
def test_sort_values_frame(self):
# TODO (EA.factorize): see if _values_for_factorize allows this.
pass
@unstable
def test_argsort(self, data_for_sorting):
super().test_argsort(data_for_sorting)
@unstable
def test_argsort_missing(self, data_missing_for_sorting):
super().test_argsort_missing(data_missing_for_sorting)
@unstable
@pytest.mark.parametrize('ascending', [True, False])
def test_sort_values(self, data_for_sorting, ascending):
super().test_sort_values(data_for_sorting, ascending)
@unstable
@pytest.mark.parametrize('ascending', [True, False])
def test_sort_values_missing(self, data_missing_for_sorting, ascending):
super().test_sort_values_missing(data_missing_for_sorting, ascending)
@pytest.mark.skip(reason="combine for JSONArray not supported")
def test_combine_le(self, data_repeated):
pass
@pytest.mark.skip(reason="combine for JSONArray not supported")
def test_combine_add(self, data_repeated):
pass
@pytest.mark.skip(reason="combine for JSONArray not supported")
def test_combine_first(self, data):
pass
@unhashable
def test_hash_pandas_object_works(self, data, kind):
super().test_hash_pandas_object_works(data, kind)
@pytest.mark.skip(reason="broadcasting error")
def test_where_series(self, data, na_value):
# Fails with
# *** ValueError: operands could not be broadcast together
# with shapes (4,) (4,) (0,)
super().test_where_series(data, na_value)
@pytest.mark.skip(reason="Can't compare dicts.")
def test_searchsorted(self, data_for_sorting):
super().test_searchsorted(data_for_sorting)
class TestCasting(BaseJSON, base.BaseCastingTests):
@pytest.mark.skip(reason="failing on np.array(self, dtype=str)")
def test_astype_str(self):
"""This currently fails in NumPy on np.array(self, dtype=str) with
*** ValueError: setting an array element with a sequence
"""
# We intentionally don't run base.BaseSetitemTests because pandas'
# internals has trouble setting sequences of values into scalar positions.
class TestGroupby(BaseJSON, base.BaseGroupbyTests):
@unhashable
def test_groupby_extension_transform(self):
"""
This currently fails in Series.name.setter, since the
name must be hashable, but the value is a dictionary.
I think this is what we want, i.e. `.name` should be the original
values, and not the values for factorization.
"""
@unhashable
def test_groupby_extension_apply(self):
"""
This fails in Index._do_unique_check with
> hash(val)
E TypeError: unhashable type: 'UserDict' with
I suspect that once we support Index[ExtensionArray],
we'll be able to dispatch unique.
"""
@unstable
@pytest.mark.parametrize('as_index', [True, False])
def test_groupby_extension_agg(self, as_index, data_for_grouping):
super().test_groupby_extension_agg(as_index, data_for_grouping)
class TestArithmeticOps(BaseJSON, base.BaseArithmeticOpsTests):
def test_error(self, data, all_arithmetic_operators):
pass
def test_add_series_with_extension_array(self, data):
ser = pd.Series(data)
with pytest.raises(TypeError, match="unsupported"):
ser + data
def test_divmod_series_array(self):
# GH 23287
# skipping because it is not implemented
pass
def _check_divmod_op(self, s, op, other, exc=NotImplementedError):
return super()._check_divmod_op(s, op, other, exc=TypeError)
class TestComparisonOps(BaseJSON, base.BaseComparisonOpsTests):
pass
class TestPrinting(BaseJSON, base.BasePrintingTests):
pass
|
|
#!/usr/bin/env python2
import os,re,sys,json,yaml
from StringIO import StringIO
from Tools.pretty import prettyPrint
from Tools.argue import Argue
args = Argue()
@args.argument(short='v', flag=True)
def verbose():
return False
#_______________________________________________________________
@args.command(name='valuable')
class Valuable(object):
'''
different file names for getter, setter and deleter
postumous definition of value = property()
'''
_value = 'abc123'
@args.attribute(short='v')
def value(self):
'''
value getter
'''
return self._value
def set_value(self,value):
'''
value setter
'''
self._value = value
def del_value(self):
'''
value deleter
'''
self._value = None
# define the property
value = property(
value,
set_value,
del_value
)
@args.operation
@args.parameter(
name='input',
help='just do it'
)
@args.returns(
type=str,
help='regex version of inout'
)
def do(self, input):
'''
class operation
'''
if self.value:
return '^%s$'%self.value
#_______________________________________________________________
@args.command(name='just')
class Propertly(object):
'''
command class
'''
_value = 'abc123'
@args.property(short='V')
def value(self):
'''
value getter
'''
return self._value
@value.setter
def value(self,value):
'''
value setter
'''
self._value = value
@value.deleter
def value(self):
'''
value deleter
'''
self._value = None
@args.operation
@args.parameter(
name='input',
help='just do it'
)
@args.returns(
type=str,
help='regex version of inout'
)
def do(self, input):
'''
class operation
'''
if self.value:
return '^%s$'%self.value
#_______________________________________________________________
@args.command(name='duck')
class Duck(object):
'''
command class
'''
@args.property(short='V', default='abc123')
def value(self):
'''
value getter
'''
return
@args.operation
@args.parameter(
name='input',
help='just do it'
)
@args.returns(
type=str,
help='regex version of inout'
)
def do(self, input):
'''
class operation
'''
if self.value:
return '^%s$'%self.value
#_______________________________________________________________
@args.command(name='test')
class TestMe(object):
'''
test the Propertly class as a normal class object
'''
@args.operation
def run(self):
'''
create the object then interact with the object
'''
for c in [Valuable, Propertly, Duck]:
if verbose():
output = sys.stdout
else:
output = StringIO()
output.write('%s\n'%c.__name__)
p = c()
# presume default value
output.write('\tget\n\t\tp.value=%s\n'%p.value)
assert('abc123' == p.value)
output.write('\t\tp.do(\'da\')=%s\n'%p.do('da'))
assert('^abc123$' == p.do('da'))
p.value = '321cbs'
output.write('\tset\n\t\tp.value=%s\n'%p.value)
assert('321cbs' == p.value)
output.write('\t\tp.do(\'da\')=%s\n'%p.do('da'))
assert('^321cbs$' == p.do('da'))
del p.value
output.write('\tdelete\n\t\tp.value=%s\n'%p.value)
assert(None == p.value)
output.write('\t\tp.do(\'da\')=%s\n'%p.do('da'))
assert(None == p.do('da'))
del p
#output.close()
return
#_______________________________________________________________
if __name__ == '__main__':
try:
# pythonista on iOS
import console
console.clear()
console.set_font('Menlo', 11)
except:
pass
result = args.execute()
if result:
prettyPrint(yaml.load(result), yaml=True)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
from shutil import rmtree
import tempfile
import re
from preggy import expect
from thumbor.config import Config
from thumbor.importer import Importer
from thumbor.context import Context
from tests.base import TestCase
from tests.fixtures.images import (
valid_image, valid_image_path,
too_small_image, too_small_image_path,
too_heavy_image
)
class UploadTestCase(TestCase):
@classmethod
def setUpClass(cls, *args, **kw):
cls.root_path = tempfile.mkdtemp()
cls.base_uri = "/image"
@classmethod
def tearDownClass(cls, *args, **kw):
rmtree(cls.root_path)
@property
def upload_storage(self):
return self.context.modules.upload_photo_storage
def get_path_from_location(self, location):
return "/".join(
location.lstrip('/').rstrip('/').split('/')[1:-1]
)
def get_context(self):
self.default_filename = 'image'
cfg = Config()
cfg.UPLOAD_ENABLED = True
cfg.UPLOAD_PHOTO_STORAGE = 'thumbor.storages.file_storage'
cfg.FILE_STORAGE_ROOT_PATH = self.root_path
cfg.UPLOAD_DELETE_ALLOWED = False
cfg.UPLOAD_PUT_ALLOWED = False
cfg.UPLOAD_DEFAULT_FILENAME = self.default_filename
importer = Importer(cfg)
importer.import_modules()
return Context(None, cfg, importer)
class UploadAPINewFileTestCase(UploadTestCase):
def test_can_post_image_with_content_type(self):
filename = 'new_image_with_a_filename.jpg'
response = self.post(self.base_uri, {'Content-Type': 'image/jpeg', 'Slug': filename}, valid_image())
expect(response.code).to_equal(201)
expect(response.headers).to_include('Location')
expect(response.headers['Location']).to_match(self.base_uri + r'/[^\/]{32}/' + filename)
expected_path = self.get_path_from_location(response.headers['Location'])
expected_path = self.upload_storage.path_on_filesystem(expected_path)
expect(expected_path).to_exist()
expect(expected_path).to_be_the_same_as(valid_image_path)
def test_can_post_image_with_charset(self):
filename = self.default_filename + '.jpg'
response = self.post(self.base_uri, {'Content-Type': 'image/jpeg;charset=UTF-8'}, valid_image())
expect(response.code).to_equal(201)
expect(response.headers).to_include('Location')
expect(response.headers['Location']).to_match(self.base_uri + r'/[^\/]{32}/' + filename)
expected_path = self.get_path_from_location(response.headers['Location'])
expected_path = self.upload_storage.path_on_filesystem(expected_path)
expect(expected_path).to_exist()
expect(expected_path).to_be_the_same_as(valid_image_path)
def test_can_post_image_with_unknown_charset(self):
filename = self.default_filename + '.jpg'
response = self.post(self.base_uri, {'Content-Type': 'image/thisIsAUnknwonOrBadlyFormedCHarset'}, valid_image())
expect(response.code).to_equal(201)
expect(response.headers).to_include('Location')
expect(response.headers['Location']).to_match(self.base_uri + r'/[^\/]{32}/' + filename)
expected_path = self.get_path_from_location(response.headers['Location'])
expected_path = self.upload_storage.path_on_filesystem(expected_path)
expect(expected_path).to_exist()
expect(expected_path).to_be_the_same_as(valid_image_path)
def test_can_post_image_without_filename(self):
filename = self.default_filename + '.jpg'
response = self.post(self.base_uri, {'Content-Type': 'image/jpeg'}, valid_image())
expect(response.code).to_equal(201)
expect(response.headers).to_include('Location')
expect(response.headers['Location']).to_match(self.base_uri + r'/[^\/]{32}/' + filename)
expected_path = self.get_path_from_location(response.headers['Location'])
expected_path = self.upload_storage.path_on_filesystem(expected_path)
expect(expected_path).to_exist()
expect(expected_path).to_be_the_same_as(valid_image_path)
def test_can_post_from_html_form(self):
filename = 'crocodile2.jpg'
image = ('media', filename, valid_image())
response = self.post_files(self.base_uri, {'Slug': 'another_filename.jpg'}, (image, ))
expect(response.code).to_equal(201)
expect(response.headers).to_include('Location')
expect(response.headers['Location']).to_match(self.base_uri + r'/[^\/]{32}/' + filename)
expected_path = self.get_path_from_location(response.headers['Location'])
expected_path = self.upload_storage.path_on_filesystem(expected_path)
expect(expected_path).to_exist()
expect(expected_path).to_be_the_same_as(valid_image_path)
class UploadAPIUpdateFileTestCase(UploadTestCase):
def get_context(self):
self.default_filename = 'image'
cfg = Config()
cfg.UPLOAD_ENABLED = True
cfg.UPLOAD_PHOTO_STORAGE = 'thumbor.storages.file_storage'
cfg.FILE_STORAGE_ROOT_PATH = self.root_path
cfg.UPLOAD_DELETE_ALLOWED = False
cfg.UPLOAD_PUT_ALLOWED = True
cfg.UPLOAD_DEFAULT_FILENAME = self.default_filename
importer = Importer(cfg)
importer.import_modules()
return Context(None, cfg, importer)
def test_can_modify_existing_image(self):
filename = self.default_filename + '.jpg'
response = self.post(self.base_uri, {'Content-Type': 'image/jpeg'}, valid_image())
location = response.headers['Location']
response = self.put(location, {'Content-Type': 'image/jpeg'}, too_small_image())
expect(response.code).to_equal(204)
id_should_exist = re.compile(self.base_uri + r'/([^\/]{32})/' + filename).search(location).group(1)
expected_path = self.upload_storage.path_on_filesystem(id_should_exist)
expect(expected_path).to_exist()
expect(expected_path).to_be_the_same_as(too_small_image_path)
id_shouldnt_exist = re.compile(self.base_uri + r'/(.*)').search(location).group(1)
expected_path = self.upload_storage.path_on_filesystem(id_shouldnt_exist)
expect(expected_path).not_to_exist()
class UploadAPIUpdateSmallIdFileTestCase(UploadTestCase):
def get_context(self):
self.default_filename = 'image'
cfg = Config()
cfg.UPLOAD_ENABLED = True
cfg.UPLOAD_PHOTO_STORAGE = 'thumbor.storages.file_storage'
cfg.FILE_STORAGE_ROOT_PATH = self.root_path
cfg.UPLOAD_DELETE_ALLOWED = False
cfg.UPLOAD_PUT_ALLOWED = True
cfg.UPLOAD_DEFAULT_FILENAME = self.default_filename
cfg.MAX_ID_LENGTH = 36
importer = Importer(cfg)
importer.import_modules()
return Context(None, cfg, importer)
def test_cant_get_truncated_id_when_stored_with_large_id(self):
image_id = 'e5bcf126-791b-4375-9f73-925ab8b9fb5f'
path = '/image/%s' % image_id
response = self.put(path, {'Content-Type': 'image/jpeg'}, valid_image())
expect(response.code).to_equal(204)
response = self.get(path[:7 + 32], {'Accept': 'image/jpeg'})
expect(response.code).to_equal(404)
def test_can_get_actual_id_when_stored_with_large_id(self):
path = '/image/e5bcf126-791b-4375-9f73-925ab8b9fb5g'
self.put(path, {'Content-Type': 'image/jpeg'}, valid_image())
response = self.get(path + '123456', {'Accept': 'image/jpeg'})
expect(response.code).to_equal(200)
expect(response.body).to_be_similar_to(valid_image())
class UploadAPIDeleteTestCase(UploadTestCase):
def get_context(self):
self.default_filename = 'image'
cfg = Config()
cfg.UPLOAD_ENABLED = True
cfg.UPLOAD_PHOTO_STORAGE = 'thumbor.storages.file_storage'
cfg.FILE_STORAGE_ROOT_PATH = self.root_path
cfg.UPLOAD_DELETE_ALLOWED = True
cfg.UPLOAD_PUT_ALLOWED = False
cfg.UPLOAD_DEFAULT_FILENAME = self.default_filename
importer = Importer(cfg)
importer.import_modules()
return Context(None, cfg, importer)
def test_can_delete_existing_image(self):
filename = self.default_filename + '.jpg'
response = self.post(self.base_uri, {'Content-Type': 'image/jpeg'}, valid_image())
expect(response.code).to_equal(201)
location = response.headers['Location']
image_id = re.compile(self.base_uri + r'/([^\/]{32})/' + filename).search(location).group(1)
image_location = self.upload_storage.path_on_filesystem(image_id)
expect(image_location).to_exist()
response = self.delete(location, {})
expect(response.code).to_equal(204)
expect(image_location).not_to_exist()
def test_deleting_unknown_image_returns_not_found(self):
uri = self.base_uri + '/an/unknown/image'
response = self.delete(uri, {})
expect(response.code).to_equal(404)
class UploadAPIRetrieveTestCase(UploadTestCase):
def get_context(self):
self.default_filename = 'image'
cfg = Config()
cfg.UPLOAD_ENABLED = True
cfg.UPLOAD_PHOTO_STORAGE = 'thumbor.storages.file_storage'
cfg.FILE_STORAGE_ROOT_PATH = self.root_path
cfg.UPLOAD_DELETE_ALLOWED = True
cfg.UPLOAD_PUT_ALLOWED = False
cfg.UPLOAD_DEFAULT_FILENAME = self.default_filename
importer = Importer(cfg)
importer.import_modules()
return Context(None, cfg, importer)
def test_can_retrieve_existing_image(self):
response = self.post(self.base_uri, {'Content-Type': 'image/jpeg'}, valid_image())
expect(response.code).to_equal(201)
location = response.headers['Location']
response = self.get(location, {'Accept': 'image/jpeg'})
expect(response.code).to_equal(200)
expect(response.body).to_be_similar_to(valid_image())
expect(response.headers['Content-Type']).to_equal('image/jpeg')
def test_retrieving_unknown_image_returns_not_found(self):
uri = self.base_uri + '/an/unknown/image'
response = self.get(uri, {'Accept': 'image/jpeg'})
expect(response.code).to_equal(404)
class UploadAPIValidationTestCase(UploadTestCase):
'''
Validation :
- Invalid image
- Size constraints
- Weight constraints
'''
def get_context(self):
self.default_filename = 'image'
cfg = Config()
cfg.UPLOAD_ENABLED = True
cfg.UPLOAD_PUT_ALLOWED = True
cfg.UPLOAD_PHOTO_STORAGE = 'thumbor.storages.file_storage'
cfg.FILE_STORAGE_ROOT_PATH = self.root_path
cfg.UPLOAD_DEFAULT_FILENAME = self.default_filename
cfg.MIN_WIDTH = 40
cfg.MIN_HEIGHT = 40
cfg.UPLOAD_MAX_SIZE = 72000
importer = Importer(cfg)
importer.import_modules()
return Context(None, cfg, importer)
def test_posting_invalid_image_fails(self):
response = self.post(self.base_uri, {'Content-Type': 'image/jpeg'}, 'invalid image')
expect(response.code).to_equal(415)
def test_posting_invalid_image_through_html_form_fails(self):
image = ('media', u'crocodile9999.jpg', 'invalid image')
response = self.post_files(self.base_uri, {}, (image, ))
expect(response.code).to_equal(415)
def test_modifying_existing_image_to_invalid_image(self):
response = self.post(self.base_uri, {'Content-Type': 'image/jpeg'}, valid_image())
expect(response.code).to_equal(201)
location = response.headers['Location']
response = self.put(location, {'Content-Type': 'image/jpeg'}, 'invalid image')
expect(response.code).to_equal(415)
expected_path = self.get_path_from_location(location)
expected_path = self.upload_storage.path_on_filesystem(expected_path)
expect(expected_path).to_exist()
expect(expected_path).to_be_the_same_as(valid_image_path)
def test_posting_a_too_small_image_fails(self):
response = self.post(self.base_uri, {'Content-Type': 'image/jpeg'}, too_small_image())
expect(response.code).to_equal(412)
def test_posting_a_too_small_image_from_html_form_fails(self):
image = ('media', u'crocodile9999.jpg', too_small_image())
response = self.post_files(self.base_uri, {}, (image, ))
expect(response.code).to_equal(412)
def test_modifying_existing_image_to_small_image(self):
response = self.post(self.base_uri, {'Content-Type': 'image/jpeg'}, valid_image())
expect(response.code).to_equal(201)
location = response.headers['Location']
response = self.put(location, {'Content-Type': 'image/jpeg'}, too_small_image())
expect(response.code).to_equal(412)
expected_path = self.get_path_from_location(location)
expected_path = self.upload_storage.path_on_filesystem(expected_path)
expect(expected_path).to_exist()
expect(expected_path).to_be_the_same_as(valid_image_path)
def test_posting_an_image_too_heavy_fails(self):
response = self.post(self.base_uri, {'Content-Type': 'image/jpeg'}, too_heavy_image())
expect(response.code).to_equal(412)
def test_posting_an_image_too_heavy_through_an_html_form_fails(self):
image = ('media', u'oversized9999.jpg', too_heavy_image())
response = self.post_files(self.base_uri, {}, (image, ))
expect(response.code).to_equal(412)
def test_modifying_existing_image_to_heavy_image_fails(self):
response = self.post(self.base_uri, {'Content-Type': 'image/jpeg'}, valid_image())
location = response.headers['Location']
response = self.put(location, {'Content-Type': 'image/jpeg'}, too_heavy_image())
expect(response.code).to_equal(412)
expected_path = self.get_path_from_location(location)
expected_path = self.upload_storage.path_on_filesystem(expected_path)
expect(expected_path).to_exist()
expect(expected_path).to_be_the_same_as(valid_image_path)
|
|
"""Append module search paths for third-party packages to sys.path.
****************************************************************
* This module is automatically imported during initialization. *
****************************************************************
In earlier versions of Python (up to 1.5a3), scripts or modules that
needed to use site-specific modules would place ``import site''
somewhere near the top of their code. Because of the automatic
import, this is no longer necessary (but code that does it still
works).
This will append site-specific paths to to the module search path. On
Unix, it starts with sys.prefix and sys.exec_prefix (if different) and
appends lib/python<version>/site-packages as well as lib/site-python.
On other platforms (mainly Mac and Windows), it uses just sys.prefix
(and sys.exec_prefix, if different, but this is unlikely). The
resulting directories, if they exist, are appended to sys.path, and
also inspected for path configuration files.
FOR DEBIAN, this sys.path is augmented with directories in /usr/local.
Local addons go into /usr/local/lib/python<version>/site-packages
(resp. /usr/local/lib/site-python), Debian addons install into
/usr/{lib,share}/python<version>/site-packages.
A path configuration file is a file whose name has the form
<package>.pth; its contents are additional directories (one per line)
to be added to sys.path. Non-existing directories (or
non-directories) are never added to sys.path; no directory is added to
sys.path more than once. Blank lines and lines beginning with
'#' are skipped. Lines starting with 'import' are executed.
For example, suppose sys.prefix and sys.exec_prefix are set to
/usr/local and there is a directory /usr/local/lib/python2.3/site-packages
with three subdirectories, foo, bar and spam, and two path
configuration files, foo.pth and bar.pth. Assume foo.pth contains the
following:
# foo package configuration
foo
bar
bletch
and bar.pth contains:
# bar package configuration
bar
Then the following directories are added to sys.path, in this order:
/usr/local/lib/python2.3/site-packages/bar
/usr/local/lib/python2.3/site-packages/foo
Note that bletch is omitted because it doesn't exist; bar precedes foo
because bar.pth comes alphabetically before foo.pth; and spam is
omitted because it is not mentioned in either path configuration file.
After these path manipulations, an attempt is made to import a module
named sitecustomize, which can perform arbitrary additional
site-specific customizations. If this import fails with an
ImportError exception, it is silently ignored.
"""
import sys, os
def makepath(*paths):
dir = os.path.abspath(os.path.join(*paths))
return dir, os.path.normcase(dir)
for m in sys.modules.values():
if hasattr(m, "__file__") and m.__file__:
m.__file__ = os.path.abspath(m.__file__)
del m
# This ensures that the initial path provided by the interpreter contains
# only absolute pathnames, even if we're running from the build directory.
L = []
_dirs_in_sys_path = {}
dir = dircase = None # sys.path may be empty at this point
for dir in sys.path:
# Filter out duplicate paths (on case-insensitive file systems also
# if they only differ in case); turn relative paths into absolute
# paths.
dir, dircase = makepath(dir)
if not dircase in _dirs_in_sys_path:
L.append(dir)
_dirs_in_sys_path[dircase] = 1
sys.path[:] = L
del dir, dircase, L
# Append ./build/lib.<platform> in case we're running in the build dir
# (especially for Guido :-)
# XXX This should not be part of site.py, since it is needed even when
# using the -S option for Python. See http://www.python.org/sf/586680
if (os.name == "posix" and sys.path and
os.path.basename(sys.path[-1]) == "Modules"):
from distutils.util import get_platform
s = "build/lib.%s-%.3s" % (get_platform(), sys.version)
s = os.path.join(os.path.dirname(sys.path[-1]), s)
sys.path.append(s)
del get_platform, s
def _init_pathinfo():
global _dirs_in_sys_path
_dirs_in_sys_path = d = {}
for dir in sys.path:
if dir and not os.path.isdir(dir):
continue
dir, dircase = makepath(dir)
d[dircase] = 1
def addsitedir(sitedir):
global _dirs_in_sys_path
if _dirs_in_sys_path is None:
_init_pathinfo()
reset = 1
else:
reset = 0
sitedir, sitedircase = makepath(sitedir)
if not sitedircase in _dirs_in_sys_path:
sys.path.append(sitedir) # Add path component
try:
names = os.listdir(sitedir)
except os.error:
return
names.sort()
for name in names:
if name[-4:] == os.extsep + "pth":
addpackage(sitedir, name)
if reset:
_dirs_in_sys_path = None
def addpackage(sitedir, name):
global _dirs_in_sys_path
if _dirs_in_sys_path is None:
_init_pathinfo()
reset = 1
else:
reset = 0
fullname = os.path.join(sitedir, name)
try:
f = open(fullname)
except IOError:
return
while 1:
dir = f.readline()
if not dir:
break
if dir[0] == '#':
continue
if dir.startswith("import"):
exec dir
continue
if dir[-1] == '\n':
dir = dir[:-1]
dir, dircase = makepath(sitedir, dir)
if not dircase in _dirs_in_sys_path and os.path.exists(dir):
sys.path.append(dir)
_dirs_in_sys_path[dircase] = 1
if reset:
_dirs_in_sys_path = None
prefixes = [os.path.join(sys.prefix, "local"), sys.prefix]
sitedir = None # make sure sitedir is initialized because of later 'del'
if sys.exec_prefix != sys.prefix:
prefixes.append(os.path.join(sys.exec_prefix, "local"))
for prefix in prefixes:
if prefix:
if sys.platform in ('os2emx', 'riscos'):
sitedirs = [os.path.join(prefix, "Lib", "site-packages")]
elif os.sep == '/':
sitedirs = [os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python")]
else:
sitedirs = [prefix, os.path.join(prefix, "lib", "site-packages")]
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
for sitedir in sitedirs:
if os.path.isdir(sitedir):
addsitedir(sitedir)
del prefix, sitedir
_dirs_in_sys_path = None
# Define new built-ins 'quit' and 'exit'.
# These are simply strings that display a hint on how to exit.
if os.sep == ':':
exit = 'Use Cmd-Q to quit.'
elif os.sep == '\\':
exit = 'Use Ctrl-Z plus Return to exit.'
else:
exit = 'Use Ctrl-D (i.e. EOF) to exit.'
import __builtin__
__builtin__.quit = __builtin__.exit = exit
del exit
# interactive prompt objects for printing the license text, a list of
# contributors and the copyright notice.
class _Printer:
MAXLINES = 23
def __init__(self, name, data, files=(), dirs=()):
self.__name = name
self.__data = data
self.__files = files
self.__dirs = dirs
self.__lines = None
def __setup(self):
if self.__lines:
return
data = None
for dir in self.__dirs:
for file in self.__files:
file = os.path.join(dir, file)
try:
fp = open(file)
data = fp.read()
fp.close()
break
except IOError:
pass
if data:
break
if not data:
data = self.__data
self.__lines = data.split('\n')
self.__linecnt = len(self.__lines)
def __repr__(self):
self.__setup()
if len(self.__lines) <= self.MAXLINES:
return "\n".join(self.__lines)
else:
return "Type %s() to see the full %s text" % ((self.__name,)*2)
def __call__(self):
self.__setup()
prompt = 'Hit Return for more, or q (and Return) to quit: '
lineno = 0
while 1:
try:
for i in range(lineno, lineno + self.MAXLINES):
print self.__lines[i]
except IndexError:
break
else:
lineno += self.MAXLINES
key = None
while key is None:
key = raw_input(prompt)
if key not in ('', 'q'):
key = None
if key == 'q':
break
__builtin__.copyright = _Printer("copyright", sys.copyright)
if sys.platform[:4] == 'java':
__builtin__.credits = _Printer(
"credits",
"Jython is maintained by the Jython developers (www.jython.org).")
else:
__builtin__.credits = _Printer("credits", """\
Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands
for supporting Python development. See www.python.org for more information.""")
here = os.path.dirname(os.__file__)
__builtin__.license = _Printer(
"license", "See http://www.python.org/%.3s/license.html" % sys.version,
["LICENSE.txt", "LICENSE"],
[os.path.join(here, os.pardir), here, os.curdir])
# Define new built-in 'help'.
# This is a wrapper around pydoc.help (with a twist).
class _Helper:
def __repr__(self):
return "Type help() for interactive help, " \
"or help(object) for help about object."
def __call__(self, *args, **kwds):
import pydoc
return pydoc.help(*args, **kwds)
__builtin__.help = _Helper()
# On Windows, some default encodings are not provided
# by Python (e.g. "cp932" in Japanese locale), while they
# are always available as "mbcs" in each locale.
# Make them usable by aliasing to "mbcs" in such a case.
if sys.platform == 'win32':
import locale, codecs
enc = locale.getdefaultlocale()[1]
if enc.startswith('cp'): # "cp***" ?
try:
codecs.lookup(enc)
except LookupError:
import encodings
encodings._cache[enc] = encodings._unknown
encodings.aliases.aliases[enc] = 'mbcs'
# Set the string encoding used by the Unicode implementation. The
# default is 'ascii', but if you're willing to experiment, you can
# change this.
encoding = "ascii" # Default value set by _PyUnicode_Init()
if 0:
# Enable to support locale aware default string encodings.
import locale
loc = locale.getdefaultlocale()
if loc[1]:
encoding = loc[1]
if 0:
# Enable to switch off string to Unicode coercion and implicit
# Unicode to string conversion.
encoding = "undefined"
if encoding != "ascii":
# On Non-Unicode builds this will raise an AttributeError...
sys.setdefaultencoding(encoding) # Needs Python Unicode build !
#
# Run custom site specific code, if available.
#
try:
import sitecustomize
except ImportError:
pass
#
# Remove sys.setdefaultencoding() so that users cannot change the
# encoding after initialization. The test for presence is needed when
# this module is run as a script, because this code is executed twice.
#
if hasattr(sys, "setdefaultencoding"):
del sys.setdefaultencoding
def _test():
print "sys.path = ["
for dir in sys.path:
print " %s," % `dir`
print "]"
if __name__ == '__main__':
_test()
|
|
#!/usr/bin/python
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper class for issuing git commands."""
# pylint: disable=logging-format-interpolation
import collections
import logging
import os
import re
import tempfile
import time
# pylint: disable=no-name-in-module
# pylint: disable=import-error
from distutils.version import LooseVersion
import yaml
from buildtool import (
add_parser_argument,
check_kwargs_empty,
check_subprocess,
ensure_dir_exists,
log_embedded_output,
run_subprocess,
raise_and_log_error,
ConfigError,
ExecutionError,
UnexpectedError)
class GitRepositorySpec(object):
"""A reference to a git repository with local and origin locations.
Attributes:
name: [string] The shorthand name of the repository
git_dir: [path] The local path the repository was cloned to, if any.
origin: [url] The url the local_path was cloned from, if known.
upstream: [url] The url the origin is refreshed from, if known.
"""
@property
def name(self):
"""The short name for the repository."""
return self.__name
@property
def git_dir(self):
"""The path to the local repository the origin was cloned to."""
if not self.__git_dir:
raise_and_log_error(
ConfigError('{0} does not specify a git_dir'.format(self)))
return self.__git_dir
@property
def origin(self):
"""The origin URL."""
if not self.__origin:
raise_and_log_error(
ConfigError('{0} does not specify an origin'.format(self)))
return self.__origin
@property
def upstream(self):
"""The upstream URL."""
if not self.__upstream:
raise_and_log_error(
ConfigError('{0} does not specify an upstream'.format(self)))
return self.__upstream
def __init__(self, name, **kwargs):
"""Create a new instance."""
self.__name = name
self.__git_dir = kwargs.pop('git_dir', None)
self.__origin = kwargs.pop('origin', None)
self.__upstream = kwargs.pop('upstream', None)
self.__commit = kwargs.pop('commit_id', None)
self.__branch = kwargs.pop('branch', None)
check_kwargs_empty(kwargs)
def branch_or_none(self):
"""Returns specific branch or None."""
return self.__branch
def commit_or_none(self):
"""Returns specific commit or None."""
return self.__commit
def git_dir_or_none(self):
"""Returns local git_dir path, which might be None."""
return self.__git_dir or None
def origin_or_none(self):
"""Returns origin URL, which might be None."""
return self.__origin or None
def upstream_or_none(self):
"""Returns upstream URL, which might be None."""
return self.__upstream or None
def __str__(self):
return self.__repr__()
def __repr__(self):
return 'git_dir={git_dir} origin={origin} upstream={upstream}'.format(
git_dir=self.__git_dir,
origin=self.__origin,
upstream=self.__upstream)
def __lt__(self, other):
if self.__name != other.name:
return self.__name < other.name
# a partial ordering would be fine, but we want strict __eq__ for testing
self_hash = hash((self.__git_dir, self.__origin, self.__upstream))
other_hash = hash((other.git_dir, other.origin, other.upstream))
return self_hash < other_hash
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __eq__(self, other):
return (self.__name == other.name
and self.__git_dir == other.git_dir_or_none()
and self.__origin == other.origin_or_none()
and self.__upstream == other.upstream_or_none())
def __ne__(self, other):
return not self.__eq__(other)
def __ge__(self, other):
return self.__gt__(other) or self.__eq__(other)
def __gt__(self, other):
if self.__name != other.name:
return self.__name > other.name
# a partial ordering would be fine, but we want strict __eq__ for testing
self_hash = hash((self.__git_dir, self.__origin, self.__upstream))
other_hash = hash((other.git_dir, other.origin, other.upstream))
return self_hash > other_hash
class SemanticVersion(
collections.namedtuple('SemanticVersion',
['series_name', 'major', 'minor', 'patch'])):
"""Helper class for interacting with semantic version tags."""
SEMVER_MATCHER = re.compile(r'(.+)-(\d+)\.(\d+)\.(\d+)')
TAG_INDEX = 0
MAJOR_INDEX = 1
MINOR_INDEX = 2
PATCH_INDEX = 3
@staticmethod
def make(tag):
"""Create a new SemanticVersion from the given tag instance.
Args:
tag: [string] in the form <series_name>-<major>.<minor>.<patch>
"""
match = SemanticVersion.SEMVER_MATCHER.match(tag)
if match is None:
raise_and_log_error(UnexpectedError('Malformed tag "%s"' % tag))
# Keep first group as a string, but use integers for the component parts
return SemanticVersion(match.group(1),
*[int(num) for num in match.groups()[1:]])
@staticmethod
def compare(semver, other):
"""Compare this to another semver.
Returns:
< 0 if this is before, 0 if equal, > 0 if this is after.
"""
if semver.series_name != other.series_name:
raise ValueError('Cannot compare different SemVer series.')
if semver.major != other.major:
return semver.major - other.major
if semver.minor != other.minor:
return semver.minor - other.minor
return semver.patch - other.patch
def most_significant_diff_index(self, arg):
"""Returns the *_INDEX for the most sigificant component differnce."""
if arg.series_name != self.series_name:
return self.TAG_INDEX
if arg.major != self.major:
return self.MAJOR_INDEX
if arg.minor != self.minor:
return self.MINOR_INDEX
if arg.patch != self.patch:
return self.PATCH_INDEX
return None
def to_version(self):
"""Return string encoding of underlying version number."""
return '{major}.{minor}.{patch}'.format(
major=self.major, minor=self.minor, patch=self.patch)
def to_tag(self):
"""Return string encoding of SemanticVersion tag."""
return '{series}-{major}.{minor}.{patch}'.format(
series=self.series_name,
major=self.major, minor=self.minor, patch=self.patch)
def to_release_branch(self):
"""Return release branch name for this SemanticVersion."""
return 'release-{major}.{minor}.x'.format(
major=self.major, minor=self.minor)
def next(self, at_index):
"""Returns the next SemanticVersion from this when bumping up.
Args:
at_index: [int] The component *_INDEX to bump at.
"""
if at_index is None:
raise_and_log_error(UnexpectedError('Invalid index={0}'.format(at_index)))
major = self.major
minor = self.minor
patch = self.patch
if at_index == self.PATCH_INDEX:
patch += 1
else:
patch = 0
if at_index == self.MINOR_INDEX:
minor += 1
elif at_index == self.MAJOR_INDEX:
minor = 0
major += 1
else:
raise_and_log_error(
UnexpectedError('Invalid index={0}'.format(at_index)))
return SemanticVersion(self.series_name, major, minor, patch)
class CommitTag(
collections.namedtuple('CommitTag', ['commit_id', 'tag', 'version'])):
"""Denotes an individual result of git show-ref --tags."""
@staticmethod
def make(ref_line):
"""Create a new instance from a response line.
Args:
ref_line: [string] Response from "git show-ref --tags"
"""
# ref_line is in the form "$commit_id refs/tags/$tag"
tokens = ref_line.split(' ')
line_id = tokens[0]
tag_parts = tokens[1].split('/')
tag = tag_parts[len(tag_parts) - 1]
version = LooseVersion(tag)
return CommitTag(line_id, tag, version)
@staticmethod
def compare_tags(first, second):
"""Comparator for instances compares the lexical order of the tags."""
return cmp(first.version, second.version)
# pylint: disable=multiple-statements
def __cmp__(self, other): return self.version.__cmp__(other.version)
def __lt__(self, other): return self.version < other.version
def __le__(self, other): return self.version <= other.version
def __eq__(self, other): return self.version == other.version
def __ge__(self, other): return self.version >= other.version
def __gt__(self, other): return self.version > other.version
def __ne__(self, other): return self.version != other.version
class CommitMessage(
collections.namedtuple('CommitMessage',
['commit_id', 'author', 'date', 'message'])):
"""Denotes an individual entry in 'git log --pretty'."""
_MEDIUM_PRETTY_COMMIT_MATCHER = re.compile(
'(.+)\n(?:Merge: .*?\n)?Author: *(.+)\nDate: *(.*)\n', re.MULTILINE)
_EMBEDDED_COMMIT_MATCHER = re.compile(
r'^( *)commit [a-f0-9]+\n'
r'^\s*Author: .+\n'
r'^\s*Date: .+\n',
re.MULTILINE)
_EMBEDDED_SUMMARY_MATCHER = re.compile(
r'^\s*(?:\*\s*)?[a-z]+\(.+?\): .+',
re.MULTILINE)
# The vocabulary in the following list was taken looking at what's
# used in practice (right or wrong), for conforming log entries,
# using the following command over the various spinnaker repositories:
#
# git log --pretty=oneline \
# | sed "s/^[a-f0-9]\+ //g" \
# | egrep "^[^ \(]+\(" \
# | sed "s/^\([^\(]\+\).*/\1/g" \
# | sort | uniq
DEFAULT_PATCH_REGEXS = [
# Some tags indicate only a patch release.
re.compile(r'^\s*'
r'(?:\*\s+)?'
r'((?:fix|bug|chore|docs?|test)[\(:].*)',
re.MULTILINE)
]
DEFAULT_MINOR_REGEXS = [
# Some tags indicate a minor release.
# These are features as well as hints to non-trivial
# implementation changes that suggest a higher level of risk.
re.compile(r'^\s*'
r'(?:\*\s+)?'
r'((?:feat|feature|refactor|perf|config)[\(:].*)',
re.MULTILINE)
]
DEFAULT_MAJOR_REGEXS = [
# Breaking changes are explicitly marked as such.
re.compile(r'^\s*'
r'(.*?BREAKING CHANGE.*)',
re.MULTILINE)
]
@staticmethod
def make_list_from_result(response_text):
"""Returns a list of CommitMessage from the command response.
Args:
response_text: [string] result of "git log --pretty=medium"
"""
all_entries = ('\n' + response_text.strip()).split('\ncommit ')[1:]
response = []
for entry in all_entries:
response.append(CommitMessage.make(entry))
return response
@staticmethod
def make(entry):
"""Create a new CommitMessage from an individual entry"""
match = CommitMessage._MEDIUM_PRETTY_COMMIT_MATCHER.match(entry)
if match is None:
raise_and_log_error(
UnexpectedError('Unexpected commit entry {0}'.format(entry)))
text = entry[match.end(3):]
# strip trailing spaces on each line
lines = [line.rstrip() for line in text.split('\n')]
# remove blank lines from beginning and end of text
while lines and not lines[0]:
del lines[0]
while lines and not lines[-1]:
del lines[-1]
# new string may have initial spacing but no leading/trailing blank lines.
text = '\n'.join(lines)
return CommitMessage(match.group(1), match.group(2), match.group(3), text)
@staticmethod
def normalize_message_list(msg_list):
"""Transform a series of CommitMessage into one without compound messages.
A compound message is a single commit that is a merge of multiple commits
as indicated by a message that looks like multiple entires.
This will break apart those compound commits into individual ones for
the same commit id entry for easier processing.
"""
msg_list = CommitMessage._unpack_embedded_commits(msg_list)
return CommitMessage._unpack_embedded_summaries(msg_list)
@staticmethod
def _unpack_embedded_commits(msg_list):
"""Helper function looking for merged commits.
These are indicated by having an embedded commit within them.
If found, unnest the indentation and run through as if these
came directly from a git result to turn them into additional commits.
"""
result = []
for commit_message in msg_list:
text = commit_message.message
found = CommitMessage._EMBEDDED_COMMIT_MATCHER.search(text)
if not found:
result.append(commit_message)
continue
text_before = text[:found.start(1)]
text_lines = text[found.start(1):].split('\n')
pruned_lines = []
offset = found.end(1) - found.start(1)
for line in text_lines:
if line.startswith(found.group(1)):
pruned_lines.append(line[offset:])
elif not line:
pruned_lines.append(line)
else:
logging.warning(
'"%s" looks like an composite commit, but not indented by %d.',
text, offset)
pruned_lines = []
result.append(commit_message)
break
if pruned_lines:
if text_before.strip():
logging.info('Dropping commit message "%s" in favor of "%s"',
text_before, '\n'.join(pruned_lines))
result.extend(
CommitMessage.make_list_from_result('\n'.join(pruned_lines)))
return result
@staticmethod
def _unpack_embedded_summaries(msg_list):
"""Helper function looking for embedded summaries.
An embedded summary is another top-line message block (e.g. fix(...): ...)
This is different from a merged commit in that it doesnt have the
commit/author/date fields. These types of entries are typically entered
manually to convey a single atomic commit contains multiple changes
as opposed to multiple commits becomming aggregated after the fact.
Note that embedded summaries all originated from an atomic commit, so
all the resulting CommitMessages will have the same underlying commit id.
"""
result = []
for commit_message in msg_list:
commit_id = commit_message.commit_id
author = commit_message.author
date = commit_message.date
lines = commit_message.message.split('\n')
prev = -1
for index, line in enumerate(lines):
if CommitMessage._EMBEDDED_SUMMARY_MATCHER.match(line):
logging.debug('Found embedded commit at line "%s" prev=%d',
line, prev)
if prev >= 0:
text = '\n'.join(lines[prev:index]).rstrip()
result.append(CommitMessage(commit_id, author, date, text))
prev = index
if prev < 0:
prev = 0
text = '\n'.join(lines[prev:]).rstrip()
result.append(CommitMessage(commit_id, author, date, text))
return result
@staticmethod
def determine_semver_implication_on_list(
msg_list, major_regexs=None, minor_regexs=None, patch_regexs=None,
default_semver_index=SemanticVersion.MINOR_INDEX):
"""Determine the worst case semvar component that needs incremented."""
if not msg_list:
return None
msi = SemanticVersion.PATCH_INDEX + 1
for commit_message in msg_list:
msi = min(msi, commit_message.determine_semver_implication(
major_regexs=major_regexs,
minor_regexs=minor_regexs,
patch_regexs=patch_regexs,
default_semver_index=default_semver_index))
return msi
def determine_semver_implication(
self, major_regexs=None, minor_regexs=None, patch_regexs=None,
default_semver_index=SemanticVersion.MINOR_INDEX):
"""Determine what effect this commit has on future semantic versioning.
Args:
major_regexs: [list of re] Regexes to look for indicating a MAJOR change
minor_regexs: [list of re] Regexes to look for indicating a MINOR change
patch_regexs: [list of re] Regexes to look for indicating a PATCH change
default_semver_index: SemanticVersion.*_INDEX for default change
Returns:
The SemanticVersion.*_INDEX of the affected idealized version component
that will need to be incremented to accomodate this change.
"""
def is_compliant(spec):
"""Determine if the commit message satisfies the specification."""
if not spec:
return None
if not isinstance(spec, list):
spec = [spec]
text = self.message.strip()
for matcher in spec:
match = matcher.search(text)
if match:
return match.groups()
return None
if major_regexs is None:
major_regexs = CommitMessage.DEFAULT_MAJOR_REGEXS
if minor_regexs is None:
minor_regexs = CommitMessage.DEFAULT_MINOR_REGEXS
if patch_regexs is None:
patch_regexs = CommitMessage.DEFAULT_PATCH_REGEXS
attempts = [('MAJOR', major_regexs, SemanticVersion.MAJOR_INDEX),
('MINOR', minor_regexs, SemanticVersion.MINOR_INDEX),
('PATCH', patch_regexs, SemanticVersion.PATCH_INDEX)]
for name, regexes, index in attempts:
reason = is_compliant(regexes)
if reason:
logging.debug('Commit is considered "%s" because it says "%s"',
name, reason)
return index
logging.debug('Commit is considered #%d by DEFAULT: message was "%s"',
default_semver_index, self.message)
return default_semver_index
def to_yaml(self):
"""Convert the summary to a yaml string."""
data = dict(self._asdict())
return yaml.safe_dump(data, default_flow_style=False)
def _asdict(self):
"""Override broken method in some Python3
https://bugs.python.org/issue24931
"""
return collections.OrderedDict([
('commit_id', self.commit_id),
('author', self.author),
('date', self.date),
('message', self.message)
])
class RepositorySummary(collections.namedtuple(
'RepositorySummary',
['commit_id', 'tag', 'version', 'prev_version', 'commit_messages'])):
"""Denotes information about a repository that a build-delta wants.
Attributes:
commit_id: [string] The HEAD commit id
tag: [string] The tag at the HEAD
version: [string] The Major.Minor.Patch version number
commit_messages: [list of CommitMessage] The commits since the last tag.
If this is empty then the tag and version already exists.
Otherwise the tag and version are proposed values.
"""
@staticmethod
def from_dict(content):
"""Construct from yaml dictionary."""
data = dict(content)
data['commit_messages'] = [CommitMessage(**raw)
for raw in data['commit_messages']]
return RepositorySummary(**data)
@property
def patchable(self):
"""Return True if the changes in this repository is only a patch release."""
previous_parts = self.prev_version.split('.')
current_parts = self.version.split('.')
if len(previous_parts) != 3:
raise_and_log_error(
ConfigError('Previous version %s is not X.Y.Z' % self.prev_version))
if len(current_parts) != 3:
raise_and_log_error(
ConfigError('Version %s is not X.Y.Z' % self.version))
if previous_parts[:2] != current_parts[:2]:
return False
if int(previous_parts[2]) != int(current_parts[2]) - 1:
raise_and_log_error(
UnexpectedError(
'Unexpected version sequence {prev} to {current}'.format(
prev=self.prev_version, current=self.version)))
return True
def to_yaml(self, with_commit_messages=True):
"""Convert the summary to a yaml string."""
data = dict(self._asdict())
if with_commit_messages:
data['commit_messages'] = [dict(m._asdict())
for m in data['commit_messages']]
else:
del data['commit_messages']
return yaml.safe_dump(data, default_flow_style=False)
def _asdict(self):
"""Override broken method in some Python3
https://bugs.python.org/issue24931
"""
return collections.OrderedDict([
('commit_id', self.commit_id),
('tag', self.tag),
('version', self.version),
('prev_version', self.prev_version),
('commit_messages', self.commit_messages)
])
class GitRunner(object):
"""Helper class for interacting with Git"""
__GITHUB_TOKEN = None
@staticmethod
def add_parser_args(parser, defaults):
"""Add standard parser options used by GitRunner."""
if hasattr(parser, 'added_git'):
return
parser.added_git = True
add_parser_argument(
parser, 'github_owner', defaults, None,
help='Github repository owner whose repositories we should'
' be operating on.')
add_parser_argument(
parser, 'github_pull_ssh', defaults, False, type=bool,
help='If True, github pull origin uses ssh rather than https.'
' Pulls are https by default since the standard repos are public.')
add_parser_argument(
parser, 'github_filesystem_root', defaults, None,
help='If set, then use this file path as the base origin root where'
' all the git repositories are assumed off that. This is only'
' intended to support testing.')
add_parser_argument(
parser, 'github_push_ssh', defaults, True, type=bool,
help='If False, github push origin uses https rather than ssh.'
' Pushes are ssh by default for enhanced security over https.')
add_parser_argument(
parser, 'github_disable_upstream_push', defaults, False, type=bool,
help='If True then disable upstream git pushes in local repos.'
' This is intended as a safety mechanism for testing.')
add_parser_argument(
parser, 'git_allow_no_baseline_tag', defaults, True, type=bool,
help='If True then do not require a baseline tag when searching back'
' from a commit to the previous version. Normally this would not'
' be allowed.')
@staticmethod
def add_publishing_parser_args(parser, defaults):
"""Add standard parser options used when pushing changes with GitRunner."""
if hasattr(parser, 'added_publishing'):
return
parser.added_publishing = True
add_parser_argument(
parser, 'git_allow_publish_master_branch', defaults, True,
help='If false then push to a version-specific branch'
' rather than "master" so it can be reviewed.')
add_parser_argument(
parser, 'git_never_push', defaults, False, type=bool,
help='Disable pushing to git.')
@staticmethod
def normalize_repo_url(url):
"""Normalize a repo url for purposes of checking equality.
Returns:
Either a tuple (HOST, OWNER, PATH) if url is a github-like URL
assumed to be in the form <PROTOCOL://HOST/OWNER/PATH> where
or ssh@<HOST>:<USER><REPO>
in these cases, a '.git' REPO postfix is considered superfluous.
Otherwise a string assuming the url is a local path
where the string will be the absolute path.
"""
dot_git = '.git'
gitless_url = (url[:-len(dot_git)]
if url.endswith(dot_git)
else url)
# e.g. http://github.com/USER/REPO
match = re.match(r'[a-z0-9]+://([^/]+)/([^/]+)/(.+)', gitless_url)
if not match:
# e.g. git@github.com:USER/REPO
match = re.match(r'git@([^:]+):([^/]+)/(.+)', gitless_url)
if match:
return match.groups()
return os.path.abspath(url)
@staticmethod
def is_same_repo(first, second):
"""Determine if two URLs refer to the same github repo."""
normalized_first = GitRunner.normalize_repo_url(first)
normalized_second = GitRunner.normalize_repo_url(second)
return normalized_first == normalized_second
@staticmethod
def stash_and_clear_auth_env_vars():
"""Remove git auth variables from global environment; keep internally."""
if 'GITHUB_TOKEN' in os.environ:
GitRunner.__GITHUB_TOKEN = os.environ['GITHUB_TOKEN']
del os.environ['GITHUB_TOKEN']
@staticmethod
def make_https_url(host, owner, repo):
"""Return github https url."""
return 'https://{host}/{owner}/{repo}'.format(
host=host, owner=owner, repo=repo)
@staticmethod
def make_ssh_url(host, owner, repo):
"""Return github https url."""
return 'git@{host}:{owner}/{repo}'.format(
host=host, owner=owner, repo=repo)
@property
def options(self):
"""Return bound options."""
return self.__options
def __init__(self, options):
self.__options = options
self.__auth_env = {}
if GitRunner.__GITHUB_TOKEN:
self.__auth_env['GITHUB_TOKEN'] = GitRunner.__GITHUB_TOKEN
def __inject_auth(self, keyword_args_to_modify):
"""Inject the configured git authentication environment variables.
Args:
keyword_args_to_modify: [dict]
The kwargs dictionary that will be passed to the subprocess is
modified to inject additional authentication variables,
if configured to do so.
"""
if not self.__auth_env:
return
new_env = dict(keyword_args_to_modify.get('env', os.environ))
new_env.update(self.__auth_env)
keyword_args_to_modify['env'] = new_env
def run_git(self, git_dir, command, **kwargs):
"""Wrapper around run_subprocess."""
self.__inject_auth(kwargs)
return run_subprocess(
'git -C "{dir}" {command}'.format(dir=git_dir, command=command),
**kwargs)
def check_run(self, git_dir, command, **kwargs):
"""Wrapper around check_subprocess."""
self.__inject_auth(kwargs)
return check_subprocess(
'git -C "{dir}" {command}'.format(dir=git_dir, command=command),
**kwargs)
def check_run_sequence(self, git_dir, commands):
"""Check a sequence of git commands.
Args:
git_dir: [path] All the commands refer to this lcoal repository.
commands: [list of string] To pass to check_run.
"""
for cmd in commands:
self.check_run(git_dir, cmd)
def check_commit_or_no_changes(self, git_dir, commit_commandline_args):
"""A variant of check_run 'commit' that tolerates 'no changes' errors."""
retcode, stdout = self.run_git(
git_dir, 'commit ' + commit_commandline_args)
if retcode == 1:
last_line = stdout.split('\n')[-1]
if last_line.lower().find('nothing to commit') >= 0:
logging.debug('No changes to commit -- raw changelog is unchanged.')
return stdout
log_embedded_output(logging.ERROR, 'command output', stdout)
raise_and_log_error(ExecutionError('git failed.'))
return stdout
def find_newest_tag_and_common_commit_from_id(
self, git_dir, commit_id, commit_tags):
"""Returns most recent tag and common commit to a given commit_id.
So if we have this:
<tag 0.1.0>
|
A - B - C <tag 0.2.0) - ...
|
X
|
Y - Z <id>
Then we want for <id> to get the tag <0.2.0> and commit A.
We'll use this to know that the changes since the tag are X, Y, Z,
and be able to determine the new semantic version tag based on 0.2.0 even
though it is not directly in <id>'s hierarchy.
Args:
base_commit_id [string]: If base_commit_id is provided then rather than
use it ias the base commit id for determining recent commits.
"""
# Find the starting commit, which is most recent tag in our direct history.
# For the example in the function docs, this would be tag 0.1.0
retcode, most_recent_ancestor_tag = self.run_git(
git_dir, 'describe --abbrev=0 --tags --match version-* ' + commit_id)
if retcode != 0:
start_tag = 'version-0.0.0'
logging.warning('No baseline tag for "%s", assuming this is first one.',
git_dir)
start_commit = self.check_run(git_dir, 'rev-list --max-parents=0 HEAD')
else:
start_tag = most_recent_ancestor_tag
start_commit = self.check_run(git_dir, 'rev-list -n 1 ' + start_tag)
if start_commit == commit_id:
logging.debug(
'Commit %s is already tagged with %s', start_commit, start_tag)
return start_tag, start_commit
# Get the master commit so we can use it in the merge-base call below.
# If we checked out some branch other than master, we might not have
# the actual branch so cannot use the symbolic name.
master_commit = self.check_run(git_dir, 'show-ref master').split(' ')[0]
# Find branch our commit is on. There could be multiple branches.
# We'll remember them all. These should be the same in practice, but
# could be different if a branch spawned another for some reason.
commit_branches = self.check_run(
git_dir, 'branch --contains {id}'.format(id=commit_id))
commit_branch_nodes = set([])
for commit_branch in commit_branches.split('\n'):
if commit_branch.startswith('*'):
commit_branch = commit_branch[1:].strip()
if commit_branch.startswith('('):
# Skip detached branches from when we checkout specific commits
continue
# Find place our branch diverges from master. We'll be using this to
# detect if a tag we consider was after our branch. We'll do this by
# checking if the common point between us is it is here.
commit_branch_nodes.add(self.check_run(
git_dir, 'merge-base {branch} {master}'.format(
branch=commit_branch, master=master_commit)))
logging.debug('Initial tag id=%s branch=%s which diverged @ %s with tag=%s.',
commit_id, commit_branches, commit_branch_nodes, start_tag)
# Now there could be other versions that were created in branches between
# that first commit and our commit, such as tag 0.2.0 in the above.
start_version = LooseVersion(start_tag)
for tag_entry in reversed(sorted(commit_tags)):
tag = tag_entry.tag
if LooseVersion(tag) <= start_version:
logging.debug('tag %s <= %s', tag, start_tag)
break
# Find where in our commit history the branch this tag is on intersects
tag_intersect = self.check_run(git_dir, 'merge-base {id} {tag}'.format(
id=commit_id, tag=tag))
if tag_intersect in commit_branch_nodes:
logging.debug('tag %s intersects branch at %s', tag, tag_intersect)
continue
logging.debug('Found newer tag=%s at intersect id=%s', tag, tag_intersect)
return tag, tag_intersect
return start_tag, start_commit
def query_local_repository_commits_to_existing_tag_from_id(
self, git_dir, commit_id, commit_tags, base_commit_id=None):
"""Returns the list of commit messages to the local repository."""
# pylint: disable=invalid-name
tag, found_commit = self.find_newest_tag_and_common_commit_from_id(
git_dir, commit_id, commit_tags)
base_commit = base_commit_id or found_commit
commit_history = self.check_run(
git_dir,
'log --pretty=medium {base_commit}..{id}'.format(
base_commit=base_commit, id=commit_id))
messages = CommitMessage.make_list_from_result(commit_history)
return tag, messages
def query_commit_at_tag(self, git_dir, tag):
"""Return the commit for the given tag, or None if tag is not known."""
retcode, stdout = self.run_git(git_dir, 'show-ref -- ' + tag)
if retcode != 0:
return None
lines = stdout.split('\n')
if len(lines) != 1:
raise_and_log_error(
UnexpectedError('"{tag}" -> "{msg}"'.format(tag=tag, msg=stdout)))
return stdout.split(' ')[0]
def query_local_repository_commit_id(self, git_dir):
"""Returns the current commit for the repository at git_dir."""
result = self.check_run(git_dir, 'rev-parse HEAD')
return result
def query_remote_repository_commit_id(self, url, branch):
"""Returns the current commit for the remote repository."""
args = {}
self.__inject_auth(args)
result = check_subprocess('git ls-remote %s %s' % (url, branch), **args)
return result.split('\t')[0]
def query_local_repository_branch(self, git_dir):
"""Returns the branch for the repository at git_dir."""
returncode, stdout = self.run_git(git_dir, 'rev-parse --abbrev-ref HEAD')
if returncode:
raise_and_log_error(
ExecutionError('Could detmine branch', program='git'),
'Could not determine branch in {dir}: {output}'.format(
dir=git_dir, output=stdout))
return stdout
def delete_branch_on_origin(self, git_dir, branch):
if self.options.git_never_push:
logging.warning(
'SKIP deleting branch because --git_never_push=true.'
'\nCommand would have been: %s',
'git -C "{dir}" push origin --delete {branch}'.format(
dir=git_dir, branch=branch))
return
logging.warning('Deleting origin branch="%s" for %s', branch, git_dir)
self.check_run(git_dir, 'push origin --delete ' + branch)
def push_branch_to_origin(self, git_dir, branch, force=False):
"""Push the given local repository back up to the origin.
This has no effect if the repository is not in the given branch.
"""
if self.options.git_never_push:
logging.warning(
'SKIP pushing branch because --git_never_push=true.'
'\nCommand would have been: %s',
'git -C "{dir}" push origin {branch}'.format(
dir=git_dir, branch=branch))
return
in_branch = self.query_local_repository_branch(git_dir)
if in_branch != branch:
logging.warning('Skipping push %s "%s" to origin because branch is "%s".',
git_dir, branch, in_branch)
return
force_flag = ' -f' if force else ''
self.check_run(git_dir, 'push origin ' + branch + force_flag)
def push_tag_to_origin(self, git_dir, tag):
"""Push the given tag back up to the origin."""
if self.options.git_never_push:
logging.warning(
'SKIP pushing tag because --git_never_push=true.'
'\nCommand would have been: %s',
'git -C "{dir}" push origin {tag}'.format(dir=git_dir, tag=tag))
return
logging.debug('Pushing tag "%s" and pushing to origin in %s', tag, git_dir)
self.check_run(git_dir, 'push origin ' + tag)
def fetch_tags(self, git_dir, remote_name='origin'):
"""Fetches the tags in the given remote as a list.
Args:
git_dir: [string] Which local repository to update.
remote_name: [remote_name] Which remote repository to pull from.
Returns:
A list of tags.
"""
logging.debug('Fetching tags for %s from remote %s', git_dir, remote_name)
self.check_run(git_dir, 'fetch --tags')
raw_tags = self.check_run(git_dir, 'tag')
return [s.strip() for s in raw_tags.split('\n')]
def refresh_local_repository(self, git_dir, remote_name, branch):
"""Refreshes the given local repository from the remote one.
Args:
git_dir: [string] Which local repository to update.
remote_name: [remote_name] Which remote repository to pull from.
branch: [string] Which branch to pull.
"""
repository = self.determine_git_repository_spec(git_dir)
if remote_name == 'upstream' and not repository.upstream:
logging.warning(
'Skipping pull {remote_name} {branch} in {repository} because'
' it does not have a remote "{remote_name}"'
.format(remote_name=remote_name,
branch=branch,
repository=repository.name))
return
local_branch = self.query_local_repository_branch(git_dir)
if local_branch != branch:
logging.warning(
'Skipping pull {remote_name} {branch} in {repository} because'
' its in branch={local_branch}'
.format(remote_name=remote_name,
branch=branch,
repository=repository.name,
local_branch=local_branch))
return
try:
logging.debug('Refreshing %s from %s branch %s',
git_dir, remote_name, branch)
command = 'pull {remote_name} {branch} --tags'.format(
remote_name=remote_name, branch=branch)
result = self.check_run(git_dir, command)
logging.info('%s:\n%s', repository.name, result)
except ExecutionError:
result = self.check_run(git_dir, 'branch -r')
if result.find(
'{which}/{branch}\n'.format(which=remote_name, branch=branch)) >= 0:
raise
logging.warning(
'WARNING {name} branch={branch} is not known to {which}.\n'
.format(name=repository.name, branch=branch, which=remote_name))
def __check_clone_branch(self, remote_url, base_dir, clone_command, branches):
remaining_branches = list(branches)
while True:
branch = remaining_branches.pop(0)
cmd = '{clone} -b {branch}'.format(clone=clone_command, branch=branch)
retcode, stdout = self.run_git(base_dir, cmd)
if not retcode:
return
not_found = stdout.find('Remote branch {branch} not found'
.format(branch=branch)) >= 0
if not not_found:
full_command = 'git -C "{dir}" {cmd}'.format(dir=base_dir, cmd=cmd)
raise_and_log_error(ExecutionError(full_command, program='git'),
full_command + ' failed with:\n' + stdout)
if remaining_branches:
logging.warning(
'Branch %s does not exist in %s. Retry with %s',
branch, remote_url, remaining_branches[0])
continue
lines = stdout.split('\n')
stdout = '\n '.join(lines)
logging.error('git -C "%s" %s failed with output:\n %s',
base_dir, cmd, stdout)
raise_and_log_error(ConfigError('Branches {0} do not exist in {1}.'
.format(branches, remote_url)))
def remove_all_non_version_tags(self, repository, git_dir=None):
"""Removes tags from the repository that confuse nebula.
This confusion is because nebula is assuming Netflix policies and tags,
but the OSS build has different policies and different tags to avoid
conflicts with Netflix internal usage.
"""
tag_matcher = re.compile(r'^version-[0-9]+\.[0-9]+\.[0-9]+$')
git_dir = git_dir or repository.git_dir
logging.debug('Clearing all non-version tags from %s', git_dir)
all_tags = self.check_run(git_dir, 'tag').split('\n')
tags_to_remove = [tag for tag in all_tags if not tag_matcher.match(tag)]
self.check_run(git_dir, 'tag -d ' + ' '.join(tags_to_remove))
logging.debug('%d of %d tags removed', len(tags_to_remove), len(all_tags))
def clone_repository_to_path(
self, repository, commit=None, branch=None, default_branch=None):
"""Clone the remote repository at the given commit or branch.
If requesting a branch and it is not found, then settle for the default
branch, if one was explicitly specified.
"""
# pylint: disable=too-many-arguments
if (commit != None) and (branch != None):
raise_and_log_error(
ConfigError('At most one of commit or branch can be specified.'))
origin = repository.origin
parts = self.normalize_repo_url(repository.origin)
if len(parts) == 3:
pull_url = (self.make_ssh_url(*parts) if self.__options.github_pull_ssh
else self.make_https_url(*parts))
else:
pull_url = origin
git_dir = repository.git_dir
logging.debug('Begin cloning %s', pull_url)
parent_dir = os.path.dirname(git_dir)
ensure_dir_exists(parent_dir)
clone_command = 'clone ' + pull_url
if branch:
branches = [branch]
if default_branch:
branches.append(default_branch)
self.__check_clone_branch(pull_url, parent_dir, clone_command, branches)
else:
self.check_run(parent_dir, clone_command)
logging.info('Cloned %s into %s', pull_url, parent_dir)
if commit:
self.check_run(git_dir, 'checkout -q ' + commit, echo=True)
upstream = repository.upstream_or_none()
if upstream and not self.is_same_repo(upstream, origin):
logging.debug('Adding upstream %s with disabled push', upstream)
self.check_run(git_dir, 'remote add upstream ' + upstream)
which = ('upstream'
if upstream and not self.is_same_repo(upstream, origin)
else 'origin')
if self.__options.github_disable_upstream_push:
self.check_run(
git_dir, 'remote set-url --push {which} disabled'.format(which=which))
if which != 'origin' or not self.__options.github_disable_upstream_push:
parts = self.normalize_repo_url(repository.origin)
if len(parts) == 3:
# Origin is not a local path
logging.debug('Fixing origin push url')
push_url = (self.make_ssh_url(*parts) if self.__options.github_push_ssh
else self.make_https_url(*parts))
self.check_run(git_dir, 'remote set-url --push origin ' + push_url)
logging.debug('Finished cloning %s', pull_url)
def tag_head(self, git_dir, tag):
"""Add tag to the local repository HEAD."""
self.check_run(git_dir, 'tag {tag} HEAD'.format(tag=tag))
def query_tag_commits(self, git_dir, tag_pattern):
"""Collect the TagCommit for each tag matching the pattern.
Returns: list of CommitTag sorted most recent first.
"""
retcode, stdout = self.run_git(git_dir, 'show-ref --tags')
if retcode and stdout:
raise_and_log_error(
ExecutionError('git failed in %s' % git_dir, program='git'),
'git -C "%s" show-ref --tags: %s' % (git_dir, stdout))
ref_lines = stdout.split('\n')
commit_tags = [CommitTag.make(line) for line in ref_lines if line]
matcher = re.compile(tag_pattern)
filtered = [ct for ct in commit_tags if matcher.match(ct.tag)]
return sorted(filtered, reverse=True)
def determine_git_repository_spec(self, git_dir):
"""Infer GitRepositorySpec from a local git repository."""
git_text = self.check_run(git_dir, 'remote -v')
remote_urls = {
match.group(1): match.group(2)
for match in re.finditer(r'(\w+)\s+(\S+)\s+\(fetch\)', git_text)
}
origin_url = remote_urls.get('origin')
if not origin_url:
raise_and_log_error(
UnexpectedError('{0} has no remote "origin"'.format(git_dir)))
return GitRepositorySpec(os.path.basename(git_dir),
git_dir=git_dir,
origin=origin_url,
upstream=remote_urls.get('upstream'))
def collect_repository_summary(self, git_dir, base_commit_id=None):
"""Collects RepsitorySummary from local repository directory."""
start_time = time.time()
logging.debug('Begin analyzing %s', git_dir)
all_tags = self.query_tag_commits(
git_dir, r'^version-[0-9]+\.[0-9]+\.[0-9]+$')
current_id = self.query_local_repository_commit_id(git_dir)
tag, msgs = self.query_local_repository_commits_to_existing_tag_from_id(
git_dir, current_id, all_tags, base_commit_id=base_commit_id)
if not tag:
current_semver = SemanticVersion.make('version-0.0.0')
else:
current_semver = SemanticVersion.make(tag)
next_semver = None
if msgs:
semver_significance = CommitMessage.determine_semver_implication_on_list(
msgs)
next_semver = current_semver.next(semver_significance)
use_tag = next_semver.to_tag()
use_version = next_semver.to_version()
else:
use_tag = tag
use_version = current_semver.to_version()
total_ms = int((time.time() - start_time) * 1000)
logging.debug('Finished analyzing %s in %d ms', git_dir, total_ms)
return RepositorySummary(current_id, use_tag, use_version,
current_semver.to_version(),
msgs)
def delete_local_branch_if_exists(self, git_dir, branch):
"""Delete the branch from git_dir if one exists.
This will fail if the branch exists and the git_dir is currently in it.
"""
result = self.check_run(git_dir, 'branch -l')
branches = []
for elem in result.split('\n'):
if elem.startswith('*'):
elem = elem[1:].strip()
branches.append(elem)
if branch in branches:
logging.info('Deleting existing branch %s from %s', branch, git_dir)
self.check_run(git_dir, 'branch -D ' + branch)
return
def initiate_github_pull_request(
self, git_dir, message, base='master', head=None):
"""Initialize a pull request for the given commit on the given branch.
Args:
git_dir: [path] The local repository to initiate the pull request with.
message: [string] The pull request message. If this is multiple lines
then the first line will be the title, subsequent lines will
be the PR description.
base: [string] The base reference for the pull request.
The default is master, but this could be a BRANCH or OWNER:BRANCH
head: [string] The branch to use for the pull request. By default this
is the current branch state of the the git_dir repository. This
too can be BRANCH or OWNER:BRANCH. This branch must have alraedy been
pushed to the origin repository -- not the local repository.
"""
options = self.options
message = message.strip()
if options.pr_notify_list:
message.append('\n\n@' + ', @'.join(','.split(options.pr_notify_list)))
hub_args = []
if base:
hub_args.extend(['-b', base])
if head:
hub_args.extend(['-h', head])
if options.git_never_push:
logging.warning(
'SKIP creating pull request because --git_never_push=true.'
'\nCommand would have been: %s',
'git -C "{dir}" pull-request {args} -m {msg!r}'.format(
dir=git_dir, args=' '.join(hub_args), msg=message))
return
message_path = None
if message.find('\n') < 0:
hub_args.extend(['-m', message])
else:
fd, message_path = tempfile.mkstemp(prefix='hubmsg')
os.write(fd, message)
os.close(fd)
hub_args.extend(['-F', message_path])
logging.info(
'Initiating pull request in %s from %s to %s with message:\n%s',
git_dir, base, head if head else '<current branch>', message)
try:
kwargs = {}
self.__inject_auth(kwargs)
output = check_subprocess(
'hub -C "{dir}" pull-request {args}'.format(
dir=git_dir, args=' '.join(hub_args)),
**kwargs)
logging.info(output)
finally:
if message_path:
os.remove(message_path)
|
|
# -*- coding: utf-8 -*-
from collections import OrderedDict
from collections import namedtuple
from tensorflow.python.platform import gfile
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.client import session
from tensorflow.python.ops import array_ops
from intel_quantization.quantize_graph.quantize_graph_common import QuantizeGraphHelper as helper
import logging
import tensorflow as tf
import numpy as np
import os
class QuantizeGraphBase(object):
"""
This is the base class for quantize graph.
"""
def __init__(self, output_node_names):
self.output_node_names = output_node_names
self.transformers = OrderedDict()
def register_transformer(self, node_name, entry):
if node_name not in self.transformers:
self.transformers[node_name] = []
self.transformers[node_name].append(entry)
def do_transform(self):
"""
This is the virtual interface need to be implemented by derived class
:return:
"""
pass
def remove_dead_nodes(self, input_graph, output_names):
"""Removes nodes that are no longer needed for inference from the graph."""
return graph_util.extract_sub_graph(input_graph, output_names)
def get_supported_fusion_node(self):
return self.transformers.keys()
class QuantizeNodeBase(object):
"""This is the base class for nodes fusion
Arguments:
object {[type]} -- [description]
"""
node_details = namedtuple('node_details', ['node', 'input_node', 'output'])
def __init__(self,
input_graph,
output_node_names,
per_channel,
start_node_name,
enable_s8=True):
if isinstance(input_graph, graph_pb2.GraphDef):
self.input_graph = input_graph
else:
self.input_graph = graph_pb2.GraphDef()
with gfile.Open(input_graph, 'rb') as f:
self.input_graph.ParseFromString(f.read())
self._parse_graph()
self.output_node_names = output_node_names
self.output_node_maps = {}
self.output_graph = graph_pb2.GraphDef()
self.quantized_node_dict = {}
self.intel_cpu_eightbitize = True
self.per_channel = per_channel
self.start_node_name = start_node_name
self.enable_s8 = False if tf.__version__ < '2.1.0' else enable_s8
def apply_the_transform(self):
"""
This is the virtual interface to be implemented by derived class
:return:
"""
pass
def get_longest_fuse(self):
pass
def _is_match(self, patterns):
"""Detect the rule matched nodes collections.
Returns:
[List] -- [the matched rule]
[String] -- [the list contains the matched node name]
"""
matched_node_name = []
for k, v in enumerate(self.op_list):
if v in set(fusion[0] for fusion in patterns):
cur_node = self.node_name_mapping[list(
self.node_name_mapping.keys())[k]].node
if cur_node.name != self.start_node_name:
continue
if (v in ("MatMul") or (
v in ("Conv2D", "DepthwiseConv2dNative") and not self.enable_s8)) and not self._find_relu_node(
cur_node):
continue
for sub_rule in patterns:
if v != sub_rule[0]:
continue
sub_rule_len = len(sub_rule)
logging.debug("Try to apply rule: {}".format(sub_rule))
cur_node_name = list(self.node_name_mapping.keys())[k]
matched_node_name.append(cur_node_name)
while sub_rule_len > 1:
if not self.node_name_mapping[cur_node_name].output:
logging.debug("Failed to match {}".format(sub_rule))
break
next_node_name = self.node_name_mapping[
cur_node_name].output[0]
next_node_op = self.node_name_mapping[
next_node_name].node.op
is_shared_output = True if len(
self.node_name_mapping[cur_node_name].output
) > 1 else False
if not is_shared_output and next_node_op == sub_rule[
1 - sub_rule_len]:
matched_node_name.append(next_node_name)
sub_rule_len -= 1
cur_node_name = next_node_name
else:
matched_node_name.clear()
logging.debug("Failed to match {}".format(sub_rule))
break
if sub_rule_len == 1:
logging.debug("match {} on nodes {} ".format(
sub_rule, matched_node_name))
return sub_rule, matched_node_name
return None, None
def _need_to_check(self, node_type):
op_list = ("ConcatV2", "Conv2D", "DepthwiseConv2D", "QuantizeV2",
"DepthwiseConv2dNative", "MaxPool", "Requantize", "AvgPool",
"Pad", "CropAndResize", "Dequantize", "Mean", "MatMul", "Transpose", "Reshape")
return any([node_type.find(i) != -1 for i in op_list])
def _find_relu_node(self, node):
if node.op in ("Relu", "Relu6") or node.op.find("AndRelu") != -1:
return True
elif (node.op.find("QuantizedConv") != -1 or
node.op.find("QuantizedDepthwiseConv") != -1
) and node.op.find("Relu") == -1:
return False
elif self._need_to_check(node.op):
input_node = self.node_name_mapping[helper.node_name_from_input(
node.input[0])]
return self._find_relu_node(input_node.node)
else:
return False
def _add_output_node(self, node_name, node):
if node_name not in self.output_node_maps:
self.output_node_maps[node_name] = node
else:
raise ValueError("Duplicate Node Found {} {} {}".format(
node_name, node.op, self.output_node_maps[node_name].op))
def _reset_output_node_maps(self):
self.output_node_maps = {}
def write_graph(self, out_graph_def, out_graph_file):
"""Write output graphDef to file.
:param out_graph_def: output graphDef.
:param out_graph_file: path to output graph file.
:return: None.
"""
if not isinstance(out_graph_def, tf.compat.v1.GraphDef):
raise ValueError(
'out_graph_def is not instance of TensorFlow GraphDef.')
if out_graph_file and not os.path.exists(
os.path.dirname(out_graph_file)):
raise ValueError('"output_graph" directory does not exists.')
f = gfile.GFile(out_graph_file, 'wb')
f.write(out_graph_def.SerializeToString())
def _get_op_list(self):
self.op_list = []
for _, v in enumerate(self.node_name_mapping):
self.op_list.append(self.node_name_mapping[v].node.op)
def _get_node_input(self, node_name):
"""
Return control_input name, non-control_input node name
"""
return [
i for i in self.node_name_mapping[node_name].node.input
if i[0] == '^'
], [
i for i in self.node_name_mapping[node_name].node.input
if i[0] != '^'
]
def _intel_cpu_add_dequantize_result_node(self,
quantized_output_name,
original_node_name,
dtype=dtypes.quint8,
min_tensor_index=1):
min_max_inputs = [
"%s:%s" % (quantized_output_name, min_tensor_index),
"%s:%s" % (quantized_output_name, min_tensor_index + 1)
]
dequantize_name = original_node_name
dequantize_node = helper.create_node(
"Dequantize", dequantize_name,
[quantized_output_name, min_max_inputs[0], min_max_inputs[1]])
helper.set_attr_dtype(dequantize_node, "T", dtype)
helper.set_attr_string(dequantize_node, "mode",
b"SCALED" if self.per_channel else b"MIN_FIRST")
self.add_output_graph_node(dequantize_node)
def eightbitize_single_input_tensor_node(self, original_node,
add_op_function):
quantized_op_name = original_node.name + "_eightbit_quantized"
quantized_op_type = "Quantized" + original_node.op
all_input_names = self._add_eightbit_prologue_nodes(original_node.name)
quantized_op_node = helper.create_node(quantized_op_type,
quantized_op_name,
all_input_names)
add_op_function(original_node, quantized_op_node)
self.add_output_graph_node(quantized_op_node)
self._intel_cpu_add_dequantize_result_node(quantized_op_name,
original_node.name)
def _add_eightbit_prologue_nodes(self, original_node):
namespace_prefix = original_node + "_eightbit"
reshape_dims_name, reduction_dims_name = self._add_common_quantization_nodes(
namespace_prefix,
self.node_name_mapping[original_node].node.input[0])
input_names = []
min_max_names = []
for each_input_name in self.node_name_mapping[original_node].node.input:
if each_input_name[0] == '^':
continue
input_node_name = helper.node_name_from_input(each_input_name)
if self.intel_cpu_eightbitize and input_node_name in self.output_node_maps:
dtype = dtypes.DType(
self.output_node_maps[input_node_name].attr["T"].type
) if self.output_node_maps[
input_node_name].op == "Dequantize" else dtypes.quint8
else:
dtype = dtypes.quint8 if self._find_relu_node(
self.node_name_mapping[original_node].node
) else dtypes.qint8
quantize_input_name, min_input_name, max_input_name = (
self._eightbitize_input_to_node(namespace_prefix,
each_input_name,
reshape_dims_name,
reduction_dims_name,
dtype=dtype))
input_names.append(quantize_input_name)
min_max_names.append(min_input_name)
min_max_names.append(max_input_name)
all_input_names = []
all_input_names.extend(input_names)
all_input_names.extend(min_max_names)
for original_input_name in self.node_name_mapping[
original_node].node.input:
if original_input_name[0] == '^':
all_input_names.append(original_input_name)
return all_input_names
def _add_common_quantization_nodes(self,
namespace_prefix,
control_input_names=None):
"""Builds constant nodes needed for quantization of inputs."""
reshape_dims_name = namespace_prefix + "_reshape_dims"
reduction_dims_name = namespace_prefix + "_reduction_dims"
reshape_dims_node = helper.create_constant_node(reshape_dims_name, -1,
dtypes.int32, [1])
if control_input_names:
reshape_dims_node.input.append("^" + control_input_names)
self.add_output_graph_node(reshape_dims_node)
reduction_dims_node = helper.create_constant_node(
reduction_dims_name, 0, dtypes.int32, [1])
if control_input_names:
reduction_dims_node.input.append("^" + control_input_names)
self.add_output_graph_node(reduction_dims_node)
return reshape_dims_name, reduction_dims_name
def add_output_graph_node(self, output_node):
"""Inserts one node into the new graph."""
self.output_graph.node.extend([output_node])
self._add_output_node(output_node.name, output_node)
def _parse_graph(self, input_graph=None):
"""
Parse the graph and get the input node and output node name details.
"""
logging.debug("start parsing graph")
self.node_name_mapping = OrderedDict()
graph = self.input_graph if input_graph is None else input_graph
for node in graph.node:
each_node = self.node_details(node=node, input_node=[], output=[])
if node.name in self.node_name_mapping:
raise ValueError(
"Duplicate Node Found when _parse_graph, the node name is {}"
.format(node.name))
self.node_name_mapping[node.name] = each_node
for node in graph.node:
for input in node.input:
self.node_name_mapping[helper.node_name_from_input(
input)].output.append(node.name)
def remove_redundant_quantization(self, old_graph):
old_nodes_map = self.create_nodes_map(old_graph)
self.output_graph = graph_pb2.GraphDef()
inputs_to_rename = {}
# We go through all the nodes, looking for any that match the patterns we
# know how to optimize away.
for node in old_graph.node:
# We always start with a Quantize node, and examine its inputs to see if
# they are in a form that can be removed.
if node.op not in ["Quantize", "QuantizeV2"]:
continue
dequantize_node_name = helper.node_name_from_input(node.input[0])
if dequantize_node_name not in old_nodes_map:
raise ValueError("Input node name '" + dequantize_node_name +
"' not found in node '" + node.name + "'")
dequantize_node = old_nodes_map[dequantize_node_name]
# Do we have a Dequantize feeding in, with the same type as the Quantize?
if dequantize_node.op != "Dequantize":
continue
if node.attr["T"] != dequantize_node.attr["T"]:
continue
# Now look at the other inputs, and ensure they're Min/Max nodes.
min_node_name = helper.node_name_from_input(node.input[1])
max_node_name = helper.node_name_from_input(node.input[2])
min_node = old_nodes_map[min_node_name]
max_node = old_nodes_map[max_node_name]
is_min_right_type = (min_node.op in ["Min", "Dequantize"])
is_max_right_type = (max_node.op in ["Max", "Dequantize"])
if not is_min_right_type or not is_max_right_type:
print("Didn't find expected types on inputs : %s, %s." %
(min_node.op, max_node.op))
continue
min_node_input_name = helper.node_name_from_input(min_node.input[0])
max_node_input_name = helper.node_name_from_input(max_node.input[0])
# There are two different patterns for Min nodes we can recognize, one
# where the input comes directly from the same one as the Max, and
# another where we run it through another Min first, so check for both.
is_same_input = False
if min_node_input_name == max_node_input_name:
is_same_input = True
else:
first_min_node_input = old_nodes_map[min_node_input_name]
if first_min_node_input.op == "Concat":
second_min_node_name = helper.node_name_from_input(
first_min_node_input.input[1])
second_min_node = old_nodes_map[second_min_node_name]
if second_min_node.op == "Min":
second_min_node_input_name = helper.node_name_from_input(
second_min_node.input[0])
is_same_input = (
second_min_node_input_name == max_node_input_name)
if not is_same_input:
print("Different min/max inputs: " + min_node_input_name)
continue
# We recognize this pattern, so mark the graph edges to be rewired to
# route around it entirely, since we know it's a no-op.
dequantize_source_name = helper.node_name_from_input(
dequantize_node.input[0])
node_tensor_name = helper.ensure_tensor_name_has_port(node.name)
min_tensor_name = node.name + ":1"
max_tensor_name = node.name + ":2"
inputs_to_rename[node_tensor_name] = dequantize_source_name
inputs_to_rename[min_tensor_name] = dequantize_node.input[1]
inputs_to_rename[max_tensor_name] = dequantize_node.input[2]
# Finally we apply all the rewiring we've marked to the graph.
for node in old_graph.node:
for index, input_full_name in enumerate(node.input):
input_name = helper.ensure_tensor_name_has_port(input_full_name)
if input_name in inputs_to_rename:
node.input[index] = inputs_to_rename[input_name]
self.add_output_graph_node(node)
return self.output_graph
def apply_final_node_renames(self):
"""Applies node renames in self.final_node_renames to self.output_graph."""
old_graph = self.output_graph
self.output_graph = graph_pb2.GraphDef()
for node in old_graph.node:
node.name = self.final_node_renames.get(node.name, node.name)
for index, input_name in enumerate(node.input):
node_name = helper.node_name_from_input(input_name)
input_full_name = helper.ensure_tensor_name_has_port(input_name)
if node_name in self.final_node_renames:
node.input[index] = "%s%s" % (
self.final_node_renames[node_name],
input_full_name[len(node_name):])
self.add_output_graph_node(node)
return self.output_graph
def create_nodes_map(self, graph):
"""Builds a mapping of node names to their defs from the graph."""
nodes_map = {}
for node in graph.node:
if node.name not in nodes_map.keys():
nodes_map[node.name] = node
else:
raise ValueError("Duplicate node names detected.")
return nodes_map
def _add_quantize_down_nodes(self,
original_node,
quantized_output_name,
requantize_type=dtypes.quint8,
is_relu6=False):
quantized_outputs = [
quantized_output_name, quantized_output_name + ":1",
quantized_output_name + ":2"
]
# Add a RequantizationRange node for finding the min and max values.
requant_range_node = helper.create_node(
"RequantizationRangePerChannel"
if self.per_channel else "RequantizationRange",
original_node.name + "_eightbit_requant_range", quantized_outputs)
if self.per_channel:
helper.set_attr_dtype(requant_range_node, "T", dtypes.qint32)
if is_relu6:
helper.set_attr_float(requant_range_node, "clip_value_max", 6.0)
else:
helper.set_attr_float(requant_range_node, "clip_value_max",
1e30)
else:
helper.set_attr_dtype(requant_range_node, "Tinput", dtypes.qint32)
self.add_output_graph_node(requant_range_node)
min_max_inputs = [
requant_range_node.name + ":0", requant_range_node.name + ":1"
]
requantize_node = helper.create_node(
"RequantizePerChannel" if self.per_channel else "Requantize",
original_node.name + "_eightbit_requantize",
quantized_outputs + min_max_inputs)
if self.per_channel:
helper.set_attr_dtype(requantize_node, "T", dtypes.qint32)
else:
helper.set_attr_dtype(requantize_node, "Tinput", dtypes.qint32)
helper.set_attr_dtype(requantize_node, "out_type", requantize_type)
self.add_output_graph_node(requantize_node)
return requantize_node.name
def add_dequantize_result_node(self,
quantized_output_name,
original_node_name,
min_tensor_index=1):
min_max_inputs = [
"%s:%s" % (quantized_output_name, min_tensor_index),
"%s:%s" % (quantized_output_name, (min_tensor_index + 1))
]
dequantize_name = original_node_name
dequantize_node = helper.create_node(
"Dequantize", dequantize_name,
[quantized_output_name, min_max_inputs[0], min_max_inputs[1]])
helper.set_attr_dtype(dequantize_node, "T", dtypes.quint8)
helper.set_attr_string(
dequantize_node, "mode",
b"SCALED" if self.intel_cpu_eightbitize else b"MIN_FIRST")
self.add_output_graph_node(dequantize_node)
def _eightbitize_input_to_node(self,
namespace_prefix,
original_input_name,
reshape_dims_name,
reduction_dims_name,
dtype=dtypes.quint8):
"""Takes one float input to an op, and converts it to quantized form."""
unique_input_name = helper.unique_node_name_from_input(
original_input_name)
if unique_input_name in self.quantized_node_dict:
quantized_tuple = self.quantized_node_dict[unique_input_name]
return quantized_tuple[0], quantized_tuple[1], quantized_tuple[2]
reshape_input_name = namespace_prefix + "_reshape_" + unique_input_name
min_input_name = namespace_prefix + "_min_" + unique_input_name
max_input_name = namespace_prefix + "_max_" + unique_input_name
quantize_input_name = namespace_prefix + "_quantize_" + unique_input_name
reshape_input_node = helper.create_node(
"Reshape", reshape_input_name,
[original_input_name, reshape_dims_name])
helper.set_attr_dtype(reshape_input_node, "T", dtypes.float32)
self.add_output_graph_node(reshape_input_node)
min_input_node = helper.create_node(
"Min", min_input_name, [reshape_input_name, reduction_dims_name])
helper.set_attr_dtype(min_input_node, "T", dtypes.float32)
helper.set_attr_dtype(min_input_node, "Tidx", dtypes.int32)
helper.set_attr_bool(min_input_node, "keep_dims", False)
self.add_output_graph_node(min_input_node)
max_input_node = helper.create_node(
"Max", max_input_name, [reshape_input_name, reduction_dims_name])
helper.set_attr_dtype(max_input_node, "T", dtypes.float32)
helper.set_attr_dtype(max_input_node, "Tidx", dtypes.int32)
helper.set_attr_bool(max_input_node, "keep_dims", False)
self.add_output_graph_node(max_input_node)
quantize_input_node = helper.create_node(
"QuantizeV2", quantize_input_name,
[original_input_name, min_input_name, max_input_name])
helper.set_attr_dtype(quantize_input_node, "T", dtype)
helper.set_attr_string(quantize_input_node, "mode", b"SCALED")
helper.set_attr_string(quantize_input_node, "round_mode",
b"HALF_TO_EVEN")
# if FLAGS.model_name in ["wide_deep_large_ds"]:
# set_attr_string(quantize_input_node, "mode", b"MIN_FIRST")
# else:
# set_attr_string(quantize_input_node, "mode",
# b"SCALED" if self.intel_cpu_eightbitize else b"MIN_FIRST")
# set_attr_string(quantize_input_node, "round_mode",
# b"HALF_TO_EVEN" if self.intel_cpu_eightbitize
# else b"HALF_AWAY_FROM_ZERO")
self.add_output_graph_node(quantize_input_node)
min_output_name = quantize_input_name + ":1"
max_output_name = quantize_input_name + ":2"
self.quantized_node_dict[unique_input_name] = (quantize_input_name,
min_output_name,
max_output_name)
return quantize_input_name, min_output_name, max_output_name
def _intel_cpu_quantize_weight_eightbit(self,
parent,
input_node,
per_channel,
quantization_mode=b"SCALED"):
base_name = input_node.name + "_"
qint8_const_name = base_name + "qint8_const"
min_name = base_name + "min"
max_name = base_name + "max"
float_tensor = tensor_util.MakeNdarray(input_node.attr["value"].tensor)
epsilon = 1e-4 # Needs to be set empirically if accuracy is not satisfactory
if parent in ("Conv2D", "MatMul"):
if per_channel:
ranges = np.abs(float_tensor).max(axis=(0, 1, 2))
min_value = -ranges
max_value = ranges
# nudging min-max values outside epsilon radius around zero
ranges[ranges < epsilon] = epsilon
min_value[np.abs(min_value) < epsilon] = -epsilon
max_value[np.abs(max_value) < epsilon] = epsilon
qint8_tensor = (float_tensor * 127.0 / ranges).astype(np.int8)
else:
min_value = np.min(float_tensor.flatten())
max_value = np.max(float_tensor.flatten())
# Same processing of min-max as in quantize_weight_eightbit
# function.
if min_value > 0.0:
min_value = 0.0
if min_value == max_value:
if abs(min_value) < 0.000001:
max_value = min_value + 1.0
elif min_value > 0:
max_value = 2 * min_value
else:
max_value = min_value / 2.0
sess = session.Session()
with sess.as_default():
quantize_op = array_ops.quantize_v2(
float_tensor,
min_value,
max_value,
dtypes.qint8,
mode=quantization_mode,
round_mode="HALF_TO_EVEN")
qint8_tensor = quantize_op[0].eval()
# Updated min-max values should be passed to the next feeding node.
min_value = quantize_op[1].eval()
max_value = quantize_op[2].eval()
elif parent == "DepthwiseConv2dNative":
# get the max values based on dim 0 and 1 for depthwise conv
# since, the output channel will be dim 2 * dim 3
ranges = np.abs(float_tensor).max(axis=(0, 1))
ranges = ranges.flatten()
min_value = -ranges
max_value = ranges
# nudging min-max values outside epsilon radius around zero
ranges[ranges < epsilon] = epsilon
min_value[np.abs(min_value) < epsilon] = -epsilon
max_value[np.abs(max_value) < epsilon] = epsilon
# Since output channel will be 1 dim which is dim 2 * dim 3
# When divide by range, qint8_tensor needs to be 3 dim
# where, 3rd dim should be same dim of ranges
a, b, c, d = float_tensor.shape
qint8_tensor = (float_tensor.reshape(a, b, c * d) * 127.0 /
ranges).astype(np.int8)
# get the shape back to 4 dim
qint8_tensor = qint8_tensor.reshape(a, b, c, d)
shape = tensor_util.TensorShapeProtoToList(
input_node.attr["value"].tensor.tensor_shape)
qint8_const_node = helper.create_constant_node(qint8_const_name,
qint8_tensor,
dtypes.qint8,
shape=shape)
min_node = helper.create_constant_node(min_name, min_value,
dtypes.float32)
max_node = helper.create_constant_node(max_name, max_value,
dtypes.float32)
dequantize_node = helper.create_node(
"Dequantize", input_node.name,
[qint8_const_name, min_name, max_name])
helper.set_attr_dtype(dequantize_node, "T", dtypes.qint8)
helper.set_attr_string(dequantize_node, "mode", b"SCALED")
self.add_output_graph_node(qint8_const_node)
self.add_output_graph_node(min_node)
self.add_output_graph_node(max_node)
self.add_output_graph_node(dequantize_node)
|
|
"""
Nodes for representing the parse tree.
The return type for `.rule.Rule.parse`.
"""
from __future__ import unicode_literals
from pygments.token import Token
__all__ = ('AnyNode', 'LiteralNode', 'RepeatNode', 'SequenceNode', 'VariableNode')
class ParseNode(object):
def __init__(self, rule):
self.rule = rule
@property
def is_complete(self):
"""
Boolean, indicating that we have a gramatical match; all the
variables/literals are filled in.
"""
return True
@property
def is_extendible(self):
"""
Boolean, indicating whether this node could consume any more tokens.
In case of repeats for instance, a node can keep consuming tokens.
"""
return not self.is_complete
def complete(self, text=''):
"""
Given the beginning text of the *following* token, yield a list of
`Completion` instances.
"""
if False:
yield
def get_help_tokens(self):
"""
Only yield a help part if this node didn't contain any text yet.
"""
if not self._text:
for t in self.rule.get_help_tokens():
yield t
def get_variables(self):
"""
Get a dictionary that contains all the variables (`dest` in the grammar
tree definition.).
"""
if self.rule.dest:
return {self.rule.dest: True}
else:
return {}
class EmptyNode(ParseNode):
"""
When the inputstream is empty, but input tokens are required, this node is
a placeholder for expected input.
"""
def __repr__(self):
return 'EmptyNode(%r)' % self.rule
# This node is obviously not complete, as we lack an input token.
is_complete = False
def complete(self, text=''):
for c in self.rule.complete(text):
yield c
def get_help_tokens(self):
for k in self.rule.get_help_tokens():
yield k
class SequenceNode(ParseNode):
""" Parse tree result of sequence """
def __init__(self, rule, children):
super(SequenceNode, self).__init__(rule)
self.children = children
def __repr__(self):
return 'SequenceNode(%r)' % self.children
@property
def is_complete(self):
return len(self.children) == len(self.rule.rules) and all(d.is_complete for d in self.children)
@property
def is_extendible(self):
""" This node can be extended as long as it's incomplete, or the last
child is extendible (repeatable). """
return not self.is_complete or (self.children and self.children[-1].is_extendible)
def complete(self, text=''):
# When the last child node is unfinished, complete that.
# (e.g. nested Sequence, only containing a few tokens.)
if self.children and not self.children[-1].is_complete:
for completion in self.children[-1].complete(text):
yield completion
# Every child in this sequence is 'complete.'
else:
# Complete using the first following rule.
if len(self.children) < len(self.rule.rules):
for completion in self.rule.rules[len(self.children)].complete(text):
yield completion
# If the last child allows repetitions (Nested repeat.)
if self.children and self.children[-1].is_extendible:
for completion in self.children[-1].complete(text):
yield completion
def get_help_tokens(self):
first = True
if self.children and self.children[-1].is_extendible:
for k in self.children[-1].get_help_tokens():
yield k
first = False
for rule in self.rule.rules[len(self.children):]:
if not first:
yield (Token, ' ')
first = False
for k in rule.get_help_tokens():
yield k
def get_variables(self):
result = super(SequenceNode, self).get_variables()
for c in self.children:
result.update(c.get_variables())
return result
class RepeatNode(ParseNode):
def __init__(self, rule, children, tokens_after):
super(RepeatNode, self).__init__(rule)
self.children = children
#: True if there were input tokens following this tree.
self._tokens_after = tokens_after
def __repr__(self):
return 'RepeatNode(%r)' % self.children
@property
def is_complete(self): # TODO: revise the definition of 'is_complete'... (does it mean not showing help info or processable?)
# Note that an empty repeat is also 'complete'
return all(c.is_complete for c in self.children)
@property
def is_extendible(self):
return True
def complete(self, text=''):
if self.children and not self.children[-1].is_complete:
for c in self.children[-1].complete(text):
yield c
else:
for c in self.rule.complete(text):
yield c
def get_help_tokens(self):
# If in the original input, there were tokens following the repeat, then
# we can consider this node complete.
if self._tokens_after:
pass
# If we don't have children yet, take the help of the nested grammar itself.
elif not self.children or self.is_complete:
for t in self.rule.get_help_tokens():
yield t
else:
for k in self.children[-1].get_help_tokens():
yield k
def get_variables(self):
result = super(RepeatNode, self).get_variables()
for c in self.children:
result.update(c.get_variables())
return result
class AnyNode(ParseNode):
def __init__(self, rule, child):
assert isinstance(child, ParseNode)
super(AnyNode, self).__init__(rule)
self.child = child
def __repr__(self):
return 'AnyNode(%r)' % self.child
@property
def is_complete(self):
return self.child.is_complete
@property
def is_extendible(self):
return self.child.is_extendible
def complete(self, text=''):
for completion in self.child.complete(text):
yield completion
def get_help_tokens(self):
for t in self.child.get_help_tokens():
yield t
def get_variables(self):
result = super(AnyNode, self).get_variables()
result.update(self.child.get_variables())
return result
class LiteralNode(ParseNode):
def __init__(self, rule, text):
# #assert isinstance(rule, Literal)
super(LiteralNode, self).__init__(rule)
self._text = text
def __repr__(self):
return 'LiteralNode(%r)' % self._text
def get_variables(self):
if self.rule.dest:
return {self.rule.dest: self._text}
else:
return {}
class VariableNode(ParseNode):
def __init__(self, rule, text):
# #assert isinstance(rule, Variable)
super(VariableNode, self).__init__(rule)
self._text = text
def __repr__(self):
return 'VariableNode(%r)' % self._text
def get_variables(self):
if self.rule.dest:
return {self.rule.dest: self._text}
else:
return {}
|
|
import unittest
import mock
import numpy as np
import chainer
from chainer import cuda
from chainer import optimizer
from chainer import optimizers
from chainer import testing
from chainer.testing import attr
class TestOptimizerUtility(unittest.TestCase):
def setUp(self):
self.x = np.linspace(-1.0, 1.5, num=6).astype(np.float32).reshape(2, 3)
self.a = np.array(2.0)
def test_sqnorm_cpu(self):
# \Sum_{n=0}^{5} (-1.0+0.5n)**2 = 4.75
self.assertAlmostEqual(optimizer._sum_sqnorm([self.x]), 4.75)
def test_sqnorm_scalar_cpu(self):
self.assertAlmostEqual(optimizer._sum_sqnorm([self.a]), 4)
@attr.gpu
def test_sqnorm_gpu(self):
x = cuda.to_gpu(self.x)
self.assertAlmostEqual(optimizer._sum_sqnorm([x]), 4.75)
@attr.gpu
def test_sqnorm_scalar_gpu(self):
a = cuda.to_gpu(self.a)
self.assertAlmostEqual(optimizer._sum_sqnorm([a]), 4)
@attr.gpu
def test_sqnorm_array(self):
x = cuda.to_gpu(self.x)
a = cuda.to_gpu(self.a)
self.assertAlmostEqual(optimizer._sum_sqnorm(
[self.x, self.a, x, a]), 8.75 * 2)
@attr.multi_gpu(2)
def test_sqnorm_array_multi_gpu(self):
x0 = cuda.to_gpu(self.x, device=0)
x1 = cuda.to_gpu(self.x, device=1)
a0 = cuda.to_gpu(self.a, device=0)
a1 = cuda.to_gpu(self.a, device=1)
self.assertAlmostEqual(optimizer._sum_sqnorm(
[self.x, self.a, x0, a0, x1, a1]), 8.75 * 3)
class TestOptimizerHook(unittest.TestCase):
def setUp(self):
self.optimizer = optimizer.Optimizer()
self.target = SimpleLink(
np.arange(6, dtype=np.float32).reshape(2, 3),
np.arange(3, -3, -1, dtype=np.float32).reshape(2, 3))
def test_add_hook(self):
h1 = mock.MagicMock()
self.optimizer.setup(self.target)
self.optimizer.add_hook(h1, 'h1')
self.optimizer.call_hooks()
h1.assert_called_with(self.optimizer)
def test_remove_hook(self):
h1 = mock.MagicMock()
self.optimizer.setup(self.target)
self.optimizer.add_hook(h1, 'h1')
self.optimizer.remove_hook('h1')
self.optimizer.call_hooks()
self.assertFalse(h1.called)
def test_duplicated_hook(self):
self.optimizer.setup(self.target)
self.optimizer.add_hook(lambda s: None, 'h1')
with self.assertRaises(KeyError):
self.optimizer.add_hook(lambda s: None, 'h1')
def test_invalid_hook(self):
with self.assertRaises(TypeError):
self.optimizer.add_hook(1)
def test_add_hook_before_setup(self):
with self.assertRaises(RuntimeError):
self.optimizer.add_hook(lambda s: None, 'h1')
class SimpleLink(chainer.Link):
def __init__(self, w, g):
super(SimpleLink, self).__init__(param=w.shape)
self.param.data = w
self.param.grad = g
class TestOptimizerWeightDecay(unittest.TestCase):
def setUp(self):
self.target = SimpleLink(
np.arange(6, dtype=np.float32).reshape(2, 3),
np.arange(3, -3, -1, dtype=np.float32).reshape(2, 3))
def check_weight_decay(self):
w = self.target.param.data
g = self.target.param.grad
decay = 0.2
expect = w - g - decay * w
opt = optimizers.SGD(lr=1)
opt.setup(self.target)
opt.add_hook(optimizer.WeightDecay(decay))
opt.update()
testing.assert_allclose(expect, w)
def test_weight_decay_cpu(self):
self.check_weight_decay()
@attr.gpu
def test_weight_decay_gpu(self):
self.target.to_gpu()
self.check_weight_decay()
class TestOptimizerLasso(unittest.TestCase):
def setUp(self):
self.target = SimpleLink(
np.arange(6, dtype=np.float32).reshape(2, 3),
np.arange(3, -3, -1, dtype=np.float32).reshape(2, 3))
def check_lasso(self):
w = self.target.param.data
g = self.target.param.grad
xp = cuda.get_array_module(w)
decay = 0.2
expect = w - g - decay * xp.sign(w)
opt = optimizers.SGD(lr=1)
opt.setup(self.target)
opt.add_hook(optimizer.Lasso(decay))
opt.update()
testing.assert_allclose(expect, w)
def test_lasso_cpu(self):
self.check_lasso()
@attr.gpu
def test_lasso_gpu(self):
self.target.to_gpu()
self.check_lasso()
class TestOptimizerGradientNoise(unittest.TestCase):
eta = 0.01
def setUp(self):
self.target = SimpleLink(
np.arange(6, dtype=np.float32).reshape(2, 3),
np.arange(3, -3, -1, dtype=np.float32).reshape(2, 3))
self.noise_value = np.random.normal(
loc=0, scale=np.sqrt(self.eta / np.power(1, 0.55)),
size=(2, 3)).astype(np.float32)
def check_gradient_noise(self):
w = self.target.param.data
g = self.target.param.grad
xp = cuda.get_array_module(w)
noise_value = xp.asarray(self.noise_value)
expect = w - g - noise_value
noise = mock.Mock(return_value=noise_value)
opt = optimizers.SGD(lr=1)
opt.setup(self.target)
hook = optimizer.GradientNoise(self.eta, noise_func=noise)
opt.add_hook(hook)
opt.update()
testing.assert_allclose(expect, w, rtol=0.4)
noise.assert_called_once_with(xp, (2, 3), np.float32, hook, opt)
def test_gradient_noise_cpu(self):
self.check_gradient_noise()
@attr.gpu
def test_gradient_noise_gpu(self):
self.target.to_gpu()
self.check_gradient_noise()
class TestGradientHardClipping(unittest.TestCase):
def setUp(self):
self.target = SimpleLink(
np.arange(6, dtype=np.float32).reshape(2, 3),
np.arange(3, -3, -1, dtype=np.float32).reshape(2, 3))
def check_hardclipping(self):
w = self.target.param.data
g = self.target.param.grad
xp = cuda.get_array_module(w)
lower_bound = -0.9
upper_bound = 1.1
expect = w - xp.clip(g, lower_bound, upper_bound)
opt = optimizers.SGD(lr=1)
opt.setup(self.target)
opt.add_hook(optimizer.GradientHardClipping(lower_bound, upper_bound))
opt.update()
testing.assert_allclose(expect, w)
def test_hardclipping_cpu(self):
self.check_hardclipping()
@attr.gpu
def test_hardclipping_gpu(self):
self.target.to_gpu()
self.check_hardclipping()
class TestGradientMethod(unittest.TestCase):
def _suffix(self, gpu):
if gpu:
return 'gpu'
else:
return 'cpu'
def _get_method(self, prefix, gpu):
return getattr(self.optimizer, prefix + '_' + self._suffix(gpu))
def setUp(self):
opt = chainer.GradientMethod()
opt.init_state_cpu = mock.MagicMock()
opt.init_state_gpu = mock.MagicMock()
opt.update_one_cpu = mock.MagicMock()
opt.update_one_gpu = mock.MagicMock()
self.optimizer = opt
self.target = SimpleLink(
np.arange(3).astype(np.float32),
np.arange(3).astype(np.float32))
def setup_cpu(self):
self.optimizer.setup(self.target)
def setup_gpu(self, dst_id=None):
self.target.to_gpu(dst_id)
self.optimizer.setup(self.target)
def check_init_state(self, gpu):
param = chainer.Variable(np.arange(3))
param.grad = np.arange(3)
if gpu:
param.to_gpu()
state = {}
self.optimizer.init_state(param, state)
self._get_method('init_state', gpu).assert_called_once_with(
param, state)
self.assertEqual(self._get_method('init_state', not gpu).call_count, 0)
def test_init_state_cpu(self):
self.check_init_state(False)
@attr.gpu
def test_init_state_gpu(self):
self.check_init_state(True)
def check_update(self, gpu):
self.assertEqual(self.optimizer.t, 0)
self.optimizer.update()
self.assertEqual(self.optimizer.t, 1)
self._get_method('update_one', gpu).assert_called_once_with(
self.target.param, {})
self.assertEqual(self._get_method('update_one', not gpu).call_count, 0)
self.optimizer.zero_grads()
self.assertTrue((cuda.to_cpu(self.target.param.grad) == 0).all())
def test_update_cpu(self):
self.setup_cpu()
self.check_update(False)
@attr.gpu
def test_update_gpu(self):
self.setup_gpu()
self.check_update(True)
def check_accumulate_grads_from_cpu(self):
self.optimizer.accumulate_grads([np.arange(3)])
grad = self.target.param.grad
self.assertTrue((cuda.to_cpu(grad) == np.arange(3) * 2).all())
@attr.gpu
def check_accumulate_grads_from_gpu(self, src_id):
with cuda.Device(src_id):
self.optimizer.accumulate_grads([cuda.cupy.arange(3)])
grad = self.target.param.grad
self.assertTrue((cuda.to_cpu(grad) == np.arange(3) * 2).all())
def test_accumulate_grads_cpu_to_cpu(self):
self.setup_cpu()
self.check_accumulate_grads_from_cpu()
@attr.gpu
def test_accumulate_grads_cpu_to_gpu(self):
self.setup_gpu()
self.check_accumulate_grads_from_cpu()
@attr.gpu
def test_accumulate_grads_gpu_to_cpu(self):
self.setup_cpu()
self.check_accumulate_grads_from_gpu(cuda.Device().id)
@attr.gpu
def test_accumulate_grads_gpu_to_gpu(self):
device_id = cuda.Device().id
self.setup_gpu(device_id)
self.check_accumulate_grads_from_gpu(device_id)
@attr.multi_gpu(2)
def test_accumulate_grads_multigpu(self):
self.setup_gpu(0)
self.check_accumulate_grads_from_gpu(1)
def check_compute_grads_norm(self):
norm = self.optimizer.compute_grads_norm()
self.assertAlmostEqual(norm, np.sqrt(5))
def test_compute_grads_norm_cpu(self):
self.setup_cpu()
self.check_compute_grads_norm()
@attr.gpu
def test_compute_grads_norm_gpu(self):
self.setup_gpu()
self.check_compute_grads_norm()
def check_weight_decay(self):
self.optimizer.weight_decay(0.1)
g = cuda.to_cpu(self.target.param.grad)
expect = np.array([0.0, 1.1, 2.2], dtype=np.float32)
testing.assert_allclose(g, expect)
def test_weight_decay_cpu(self):
self.setup_cpu()
self.check_weight_decay()
@attr.gpu
def test_weight_decay_gpu(self):
self.setup_gpu()
self.check_weight_decay()
def check_clip_grads(self):
self.optimizer.clip_grads(1.0)
g = cuda.to_cpu(self.target.param.grad)
sqnorm = g.dot(g)
self.assertAlmostEqual(sqnorm, 1.0, delta=1.0e-5)
def test_clip_grads_cpu(self):
self.setup_cpu()
self.check_clip_grads()
@attr.gpu
def test_clip_grads_gpu(self):
self.setup_gpu()
self.check_clip_grads()
testing.run_module(__name__, __file__)
|
|
# General utilities
from __future__ import print_function, division
from math import *
import inspect
import numpy as np
import sys
import astropy
from astropy.table import Table
# arrays
def extractm(x, M):
"""Return every Mth element of x (array-like)"""
N = np.size(x)
inc = np.arange(N)
ind = np.where(inc%M==0)
return x[ind]
def printcol(*arg, **kwarg):
"""Print vectors in columns
Use: printcol <vec1> <vec2> .. <vecn> (<fout='path to file'>)
Default: fout=sys.stdout"""
# Set output
if kwarg:
f=open(kwarg['fout'],'w')
else:
f=sys.stdout
# Get variable names
frame = inspect.currentframe()
frame2 = inspect.getouterframes(frame)[1]
string = inspect.getframeinfo(frame2[0]).code_context[0].strip()
args = string[string.find('(') + 1:-1].split(',')
names = []
for i in args:
if i.find('=') != -1:
names.append(i.split('=')[1].strip())
else:
names.append(i)
Ncol=len(arg)
Nrow=np.zeros(Ncol)
for i in range(Ncol):
Nrow[i]=len(arg[i])
Nmax=int(np.max(Nrow))
# Print
print>>f,("#"),
for i in range(len(names)):
print>>f,("%12s\t"%names[i]),
print>>f,("\n#\n"),
for i in range(Nmax):
for j in range(Ncol):
if i<Nrow[j]:
print>>f,('%12g\t'%arg[j][i]),
else:
print>>f,('\t'),
print>>f,('\n'),
if kwarg:
f.close()
def wherein(x, y):
"""Returns indices of x which correspond to elements of y"""
xsorted = np.argsort(x)
ypos = np.searchsorted(x[xsorted], y)
indices = xsorted[ypos]
return indices
# manipulate astropy tables
def extract_column(t, names):
"""Return a list of columns from a table
Parameters:
t - table
names - column names to extract
Returns:
list of columns"""
lst=[]
for i in names:
lst.append(np.array(t[i]))
return lst
def add_npcolumn(t, vec=np.empty(0), name="", dtype='float', index=None):
"""Add numpy array as a table column
Parameters:
t - astropy table
vec - array to be added to the table, if None, adds an empty array (default: None)
name - column name
dtype - column type (default: float)
index - order index for the array in the output table (default: None)
Returns:
vec - array added to the table"""
if index==None:
index = len(t.columns)
if np.size(vec)==0:
vec = np.array(np.size(t))
tvec = astropy.table.Column(vec, name=name, dtype=dtype)
t.add_column(tvec, index=index)
return vec
# binning
def bincen(bined):
"""Returns bin centers from an input array of bin edge"""
N=np.size(bined)-1
binc=np.zeros(N)
for i in range(N):
binc[i]=(bined[i]+bined[i+1])/2.
return binc
def in2val(index, delta, initial):
return initial+index/delta
def val2in(value, delta, initial):
return np.int64((value-initial)/delta)
# equatorial <-> cartesian
def d2r(deg):
return deg*np.pi/180.
def r2d(rad):
return rad*180./np.pi
def eq2car(ra, dec):
"""Convert equatorial coordinates to cartesian
Assumes ra, dec in radians"""
x=np.cos(dec)*np.cos(ra)
y=np.cos(dec)*np.sin(ra)
z=np.sin(dec)
return(x,y,z)
def car2eq(x,y,z):
"""Convert cartesian coordinates to equatorial
Returns ra, dec in radians"""
N=np.size(z)
ra=np.zeros(N)
for i in range(N):
if(x[i]!=0):
ra[i]=np.arctan2(y[i], x[i])
else:
ra[i]=np.pi/2.
dec=np.arcsin(z)
return(ra,dec)
# interpolation
def between_lines(x, y, x1, y1, x2, y2):
"""check if points x,y are between lines defined with x1,y1 and x2,y2"""
if y1[0]>y1[-1]:
y1 = y1[::-1]
x1 = x1[::-1]
if y2[0]>y2[-1]:
y2 = y2[::-1]
x2 = x2[::-1]
xin1 = np.interp(y,y1,x1)
xin2 = np.interp(y,y2,x2)
indin = (x>=xin1) & (x<=xin2)
return indin
# Math
def points2line(p1, p2):
"""Returns coefficients of a line passing through points p1 and p2
Parameters:
p1 - tuple (x1, y1)
p2 - tuple (x2, y2)"""
a=(p2[1]-p1[1])/np.float(p2[0]-p1[0])
b=p1[1]-p1[0]*a
return [a,b]
def crossprodmat(a):
"""Returns a cross product matrix [a]_x
Assumes 3D"""
A=np.matrix([0, -a[2], a[1], a[2], 0, -a[0], -a[1], a[0], 0])
A.shape = (3,3)
return A
def rotmatrix(theta, i):
"""Returns 3x3 rotation matrix around axis i for angle theta (in deg)"""
theta = np.radians(theta)
cth = np.cos(theta)
sth = np.sin(theta)
sign = (-1)**i
R2 = np.array([[cth, -sign*sth], [sign*sth, cth]])
R = np.zeros((3,3))
R[i][i] = 1
if i==0:
R[1:,1:] = R2
elif i==1:
R[0][0] = R2[0][0]
R[0][2] = R2[0][1]
R[2][0] = R2[1][0]
R[2][2] = R2[1][1]
elif i==2:
R[:2,:2] = R2
return R
def sph2cart(ra, dec):
"""Convert two angles on a unit sphere to a 3d vector"""
x = np.cos(ra) * np.cos(dec)
y = np.sin(ra) * np.cos(dec)
z = np.sin(dec)
return (x, y, z)
def cart2sph(x, y, z):
"""Convert a 3d vector on a unit sphere to two angles"""
ra = np.arctan2(y, x)
dec = np.arcsin(z)
ra[ra<0] += 2*np.pi
return (ra, dec)
def rotate_angles(a, d, R):
"""Return angles a, d rotated by a 3x3 matrix R
All angles are in degrees"""
x_, y_, z_ = sph2cart(np.radians(a), np.radians(d))
X = np.column_stack((x_, y_, z_))
X_rot = np.zeros(np.shape(X))
for i in range(np.size(x_)):
X_rot[i] = np.dot(R, X[i])
a_rot, d_rot = cart2sph(X_rot[:, 0], X_rot[:, 1], X_rot[:, 2])
return (np.degrees(a_rot), np.degrees(d_rot))
# Numerical recipes
def gauleg(x1, x2, x, w, n):
eps=3.0e-11
m=int((n+1)/2.)
xm=0.5*(x2+x1)
xl=0.5*(x2-x1)
for i in range(1,m+1):
z=cos(np.pi*(i-0.25)/(n+0.5))
condition = True
while condition:
p1=1.0
p2=0.0
for j in range(1,n+1):
p3=p2
p2=p1
p1=((2.0*j-1.0)*z*p2-(j-1.0)*p3)/j
pp=n*(z*p1-p2)/(z*z-1.0)
z1=z
z=z1-p1/pp
condition = abs(z-z1)>eps
x[i-1]=xm-xl*z
x[n-i]=xm+xl*z
w[i-1]=2.0*xl/((1.0-z*z)*pp*pp)
w[n-i]=w[i-1]
def callgauleg():
n=20
x=np.zeros(n)
w=np.zeros(n)
a=0
b=np.pi/2.
gauleg(a, b, x, w, n)
fx=np.sin(x)
print(sum(fx*w))
|
|
"""Support for Dyson Pure Cool Link Sensors."""
import logging
from libpurecool.dyson_pure_cool import DysonPureCool
from libpurecool.dyson_pure_cool_link import DysonPureCoolLink
from homeassistant.const import PERCENTAGE, STATE_OFF, TEMP_CELSIUS, TIME_HOURS
from homeassistant.helpers.entity import Entity
from . import DYSON_DEVICES
SENSOR_UNITS = {
"air_quality": None,
"dust": None,
"filter_life": TIME_HOURS,
"humidity": PERCENTAGE,
}
SENSOR_ICONS = {
"air_quality": "mdi:fan",
"dust": "mdi:cloud",
"filter_life": "mdi:filter-outline",
"humidity": "mdi:water-percent",
"temperature": "mdi:thermometer",
}
DYSON_SENSOR_DEVICES = "dyson_sensor_devices"
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Dyson Sensors."""
if discovery_info is None:
return
hass.data.setdefault(DYSON_SENSOR_DEVICES, [])
unit = hass.config.units.temperature_unit
devices = hass.data[DYSON_SENSOR_DEVICES]
# Get Dyson Devices from parent component
device_ids = [device.unique_id for device in hass.data[DYSON_SENSOR_DEVICES]]
new_entities = []
for device in hass.data[DYSON_DEVICES]:
if isinstance(device, DysonPureCool):
if f"{device.serial}-temperature" not in device_ids:
new_entities.append(DysonTemperatureSensor(device, unit))
if f"{device.serial}-humidity" not in device_ids:
new_entities.append(DysonHumiditySensor(device))
elif isinstance(device, DysonPureCoolLink):
new_entities.append(DysonFilterLifeSensor(device))
new_entities.append(DysonDustSensor(device))
new_entities.append(DysonHumiditySensor(device))
new_entities.append(DysonTemperatureSensor(device, unit))
new_entities.append(DysonAirQualitySensor(device))
if not new_entities:
return
devices.extend(new_entities)
add_entities(devices)
class DysonSensor(Entity):
"""Representation of a generic Dyson sensor."""
def __init__(self, device, sensor_type):
"""Create a new generic Dyson sensor."""
self._device = device
self._old_value = None
self._name = None
self._sensor_type = sensor_type
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self.hass.async_add_executor_job(
self._device.add_message_listener, self.on_message
)
def on_message(self, message):
"""Handle new messages which are received from the fan."""
# Prevent refreshing if not needed
if self._old_value is None or self._old_value != self.state:
_LOGGER.debug("Message received for %s device: %s", self.name, message)
self._old_value = self.state
self.schedule_update_ha_state()
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the Dyson sensor name."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return SENSOR_UNITS[self._sensor_type]
@property
def icon(self):
"""Return the icon for this sensor."""
return SENSOR_ICONS[self._sensor_type]
@property
def unique_id(self):
"""Return the sensor's unique id."""
return f"{self._device.serial}-{self._sensor_type}"
class DysonFilterLifeSensor(DysonSensor):
"""Representation of Dyson Filter Life sensor (in hours)."""
def __init__(self, device):
"""Create a new Dyson Filter Life sensor."""
super().__init__(device, "filter_life")
self._name = f"{self._device.name} Filter Life"
@property
def state(self):
"""Return filter life in hours."""
if self._device.state:
return int(self._device.state.filter_life)
return None
class DysonDustSensor(DysonSensor):
"""Representation of Dyson Dust sensor (lower is better)."""
def __init__(self, device):
"""Create a new Dyson Dust sensor."""
super().__init__(device, "dust")
self._name = f"{self._device.name} Dust"
@property
def state(self):
"""Return Dust value."""
if self._device.environmental_state:
return self._device.environmental_state.dust
return None
class DysonHumiditySensor(DysonSensor):
"""Representation of Dyson Humidity sensor."""
def __init__(self, device):
"""Create a new Dyson Humidity sensor."""
super().__init__(device, "humidity")
self._name = f"{self._device.name} Humidity"
@property
def state(self):
"""Return Humidity value."""
if self._device.environmental_state:
if self._device.environmental_state.humidity == 0:
return STATE_OFF
return self._device.environmental_state.humidity
return None
class DysonTemperatureSensor(DysonSensor):
"""Representation of Dyson Temperature sensor."""
def __init__(self, device, unit):
"""Create a new Dyson Temperature sensor."""
super().__init__(device, "temperature")
self._name = f"{self._device.name} Temperature"
self._unit = unit
@property
def state(self):
"""Return Temperature value."""
if self._device.environmental_state:
temperature_kelvin = self._device.environmental_state.temperature
if temperature_kelvin == 0:
return STATE_OFF
if self._unit == TEMP_CELSIUS:
return float(f"{(temperature_kelvin - 273.15):.1f}")
return float(f"{(temperature_kelvin * 9 / 5 - 459.67):.1f}")
return None
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit
class DysonAirQualitySensor(DysonSensor):
"""Representation of Dyson Air Quality sensor (lower is better)."""
def __init__(self, device):
"""Create a new Dyson Air Quality sensor."""
super().__init__(device, "air_quality")
self._name = f"{self._device.name} AQI"
@property
def state(self):
"""Return Air Quality value."""
if self._device.environmental_state:
return int(self._device.environmental_state.volatil_organic_compounds)
return None
|
|
from . import _pyrest_core
class Identifier(_pyrest_core.BaseObject):
""""""
class Sequence(_pyrest_core.BaseObject):
""""""
class Feature(_pyrest_core.BaseObject):
""""""
seq_region_name = property(_pyrest_core.fget("_seq_region_name"), None, None, """Name of the chromosome, scaffold, etc the feature is on""")
feature_type = property(_pyrest_core.fget("_feature_type"), None, None, """Type of this feature, usually redundant with the class itself (e.g. ExonFeature, TranscriptFeature, etc)""")
start = property(_pyrest_core.fget("_start"), None, None, """Start coordinate""")
end = property(_pyrest_core.fget("_end"), None, None, """End coordinate""")
strand = property(_pyrest_core.fget("_strand"), None, None, """Strand""")
assembly_name = property(_pyrest_core.fget("_assembly_name"), None, None, """Name of the genome assembly""")
class FeatureWithID(Feature):
""""""
id = property(_pyrest_core.fget("_id"), None, None, """No documentation""")
class ExonFeature(FeatureWithID):
""""""
source = property(_pyrest_core.fget("_source"), None, None, """No documentation""")
constitutive = property(_pyrest_core.fget("_constitutive"), None, None, """No documentation""")
ensembl_phase = property(_pyrest_core.fget("_ensembl_phase"), None, None, """No documentation""")
ensembl_end_phase = property(_pyrest_core.fget("_ensembl_end_phase"), None, None, """No documentation""")
parent = property(_pyrest_core.fget("_parent"), None, None, """No documentation""")
version = property(_pyrest_core.fget("_version"), None, None, """No documentation""")
rank = property(_pyrest_core.fget("_rank"), None, None, """No documentation""")
class TranslationFeature(FeatureWithID):
""""""
description = property(_pyrest_core.fget("_description"), None, None, """No documentation""")
parent = property(_pyrest_core.fget("_parent"), None, None, """No documentation""")
interpro = property(_pyrest_core.fget("_interpro"), None, None, """No documentation""")
type = property(_pyrest_core.fget("_type"), None, None, """No documentation""")
class FeatureLikeBiotype(FeatureWithID):
""""""
biotype = property(_pyrest_core.fget("_biotype"), None, None, """No documentation""")
external_name = property(_pyrest_core.fget("_external_name"), None, None, """No documentation""")
description = property(_pyrest_core.fget("_description"), None, None, """No documentation""")
source = property(_pyrest_core.fget("_source"), None, None, """No documentation""")
version = property(_pyrest_core.fget("_version"), None, None, """No documentation""")
logic_name = property(_pyrest_core.fget("_logic_name"), None, None, """No documentation""")
class TranscriptFeature(FeatureLikeBiotype):
""""""
translation = property(_pyrest_core.fget("_translation"), None, None, """No documentation""")
exon = property(_pyrest_core.fget("_exon"), None, None, """No documentation""")
parent = property(_pyrest_core.fget("_parent"), None, None, """No documentation""")
TranscriptFeature._construction_rules = {"exon":ExonFeature, "translation":TranslationFeature}
class GeneFeature(FeatureLikeBiotype):
""""""
transcript = property(_pyrest_core.fget("_transcript"), None, None, """No documentation""")
GeneFeature._construction_rules = {"transcript":TranscriptFeature}
class ChipSeqFeature(Feature):
""""""
chipseq_feature_type = property(_pyrest_core.fget("_chipseq_feature_type"), None, None, """ChipSeq type""")
cell_type = property(_pyrest_core.fget("_cell_type"), None, None, """Cell type""")
class MotifFeature(Feature):
""""""
binding_matrix = property(_pyrest_core.fget("_binding_matrix"), None, None, """No documentation""")
score = property(_pyrest_core.fget("_score"), None, None, """No documentation""")
motif_feature_type = property(_pyrest_core.fget("_motif_feature_type"), None, None, """No documentation""")
class RegulatoryFeature(FeatureWithID):
""""""
description = property(_pyrest_core.fget("_description"), None, None, """No documentation""")
bound_start = property(_pyrest_core.fget("_bound_start"), None, None, """No documentation""")
cell_type = property(_pyrest_core.fget("_cell_type"), None, None, """No documentation""")
bound_end = property(_pyrest_core.fget("_bound_end"), None, None, """No documentation""")
activity_evidence = property(_pyrest_core.fget("_activity_evidence"), None, None, """No documentation""")
class ConstrainedElementFeature(FeatureWithID):
""""""
score = property(_pyrest_core.fget("_score"), None, None, """No documentation""")
class VariationFeature(FeatureWithID):
""""""
cell_type = property(_pyrest_core.fget("_cell_type"), None, None, """No documentation""")
alt_alleles = property(_pyrest_core.fget("_alt_alleles"), None, None, """No documentation""")
consequence_type = property(_pyrest_core.fget("_consequence_type"), None, None, """No documentation""")
class StructuralVariationFeature(FeatureWithID):
""""""
class MiscFeature(FeatureWithID):
""""""
misc_set_code = property(_pyrest_core.fget("_misc_set_code"), None, None, """No documentation""")
clone_name = property(_pyrest_core.fget("_clone_name"), None, None, """No documentation""")
misc_set_name = property(_pyrest_core.fget("_misc_set_name"), None, None, """No documentation""")
type = property(_pyrest_core.fget("_type"), None, None, """No documentation""")
name = property(_pyrest_core.fget("_name"), None, None, """No documentation""")
state = property(_pyrest_core.fget("_state"), None, None, """No documentation""")
class SimpleFeature(Feature):
""""""
score = property(_pyrest_core.fget("_score"), None, None, """No documentation""")
external_name = property(_pyrest_core.fget("_external_name"), None, None, """No documentation""")
logic_name = property(_pyrest_core.fget("_logic_name"), None, None, """No documentation""")
class RepeatFeature(Feature):
""""""
description = property(_pyrest_core.fget("_description"), None, None, """No documentation""")
class CDSFeature(FeatureWithID):
""""""
source = property(_pyrest_core.fget("_source"), None, None, """No documentation""")
parent = property(_pyrest_core.fget("_parent"), None, None, """No documentation""")
phase = property(_pyrest_core.fget("_phase"), None, None, """No documentation""")
class Location(_pyrest_core.BaseObject):
""""""
class CoordMapping(_pyrest_core.BaseObject):
""""""
mapped = property(_pyrest_core.fget("_mapped"), None, None, """No documentation""")
original = property(_pyrest_core.fget("_original"), None, None, """No documentation""")
CoordMapping._construction_rules = {"mapped":Location, "original":Location}
__feature_types = {
'gene' : GeneFeature,
'transcript' : TranscriptFeature,
'cds': CDSFeature,
'exon' : ExonFeature,
'repeat' : RepeatFeature,
'simple' : SimpleFeature,
'misc' : MiscFeature,
'variation' : VariationFeature,
'somatic_variation' : VariationFeature,
'structural_variation' : StructuralVariationFeature,
'somatic_structural_variation' : StructuralVariationFeature,
'constrained' : ConstrainedElementFeature,
'regulatory' : RegulatoryFeature,
'motif' : MotifFeature,
'chipseq' : ChipSeqFeature,
'translation' : TranslationFeature,
}
def feature_wrapper(d, r):
"""
Wrapper arround the various types of features.
It automatically selects the appropriate type for the fetched features.
"""
t = d.get('object_type')
if t is None:
t = d.get('feature_type')
if t is None:
print("Unable to find the type of", d)
t = Feature
else:
t = t.lower()
if t not in __feature_types:
print("Unrecognized feature type:", t)
t = Feature
else:
t = __feature_types[t]
return t(d,r)
|
|
""" Meta-analysis tools """
import logging
import numpy as np
from scipy.stats import norm
from neurosynth.base import imageutils
from neurosynth.analysis import stats
from os.path import join, exists
from os import makedirs
logger = logging.getLogger('neurosynth.meta')
def analyze_features(dataset, features=None, image_type='association-test_z',
threshold=0.001, q=0.01, output_dir=None, prefix=None):
""" Generate meta-analysis images for a set of features.
Args:
dataset: A Dataset instance containing feature and activation data.
features: A list of named features to generate meta-analysis maps for.
If None, analyzes all features in the current dataset.
image_type: The type of image to return. Specify one of the extensions
generated by the MetaAnalysis procedure--e.g., association-test_z,
uniformity-test_z, etc. By default, will use
association-test_z (i.e., z-scores reflecting the association
between presence of activation and presence of feature).
threshold: The threshold for determining whether or not a Mappable has
a feature. By default, this is 0.001, which is only sensible in the
case of term-based features (so be sure to specify it for other
kinds).
q: The FDR rate to use for multiple comparisons correction (default =
0.05).
output_dir: Directory to save all meta-analysis images to. If none,
returns all the data as a matrix.
prefix: All output images will be prepended with this string (if None,
defaults to the name of the feature).
Returns:
If output_dir is None, an n_voxels x n_features 2D numpy array.
"""
if features is None:
features = dataset.get_feature_names()
if output_dir is None:
result = np.zeros((dataset.masker.n_vox_in_mask, len(features)))
for i, f in enumerate(features):
ids = dataset.get_studies(features=f, frequency_threshold=threshold)
ma = MetaAnalysis(dataset, ids, q=q)
if output_dir is None:
result[:, i] = ma.images[image_type]
else:
pfx = f if prefix is None else prefix + '_' + f
ma.save_results(output_dir=output_dir, prefix=pfx)
if output_dir is None:
return result
class MetaAnalysis(object):
""" Meta-analysis of a Dataset. Currently contrasts two subsets of
studies within a Dataset and saves a bunch of statistical images.
Only one list of study IDs (ids) needs to be passed; the Universe will
be bisected into studies that are and are not included in the
list, and the contrast is then performed across these two groups.
If a second optional second study list is provided (ids2), the Dataset
is first constrained to the union of ids1 and ids2, and the standard
contrast is then performed."""
def __init__(self, dataset, ids, ids2=None, q=0.01, prior=0.5,
min_studies=1):
""" Initialize a new MetaAnalysis instance and run an analysis.
Args:
dataset: A Dataset instance.
ids: A list of Mappable IDs to include in the meta-analysis.
ids2: Optional second list of Mappable IDs. If passed, the set of
studies will be restricted to the union of ids and ids2 before
performing the meta-analysis. This is useful for meta-analytic
contrasts, as the resulting images will in effect identify
regions that are reported/activated more frequently in one
list than in the other.
q: The FDR threshold to use when correcting for multiple
comparisons. Set to .01 by default.
prior: The prior to use when calculating conditional probabilities.
This is the prior probability of a feature being used in a
study (i.e., p(F)). For example, if set to 0.25, the analysis
will assume that 1/4 of studies load on the target feature, as
opposed to the empirically estimated p(F), which is len(ids) /
total number of studies in the dataset. If prior is not passed,
defaults to 0.5, reflecting an effort to put all terms on level
footing and avoid undue influence of base rates (because some
terms are much more common than others). Note that modifying
the prior will only affect the effect size/probability maps,
and not the statistical inference (z-score) maps.
min_studies: Integer or float indicating which voxels to mask out
from results due to lack of stability. If an integer is passed,
all voxels that activate in fewer than this number of studies
will be ignored (i.e., a value of 0 will be assigned in all
output images). If a float in the range of 0 - 1 is passed,
this will be interpreted as a proportion to use as the cut-off
(e.g., passing 0.03 will exclude all voxels active in fewer
than 3% of the entire dataset). Defaults to 1, meaning all
voxels that activate at least one study will be kept.
"""
self.dataset = dataset
mt = dataset.image_table
self.selected_ids = list(set(mt.ids) & set(ids))
self.selected_id_indices = np.in1d(mt.ids, ids)
# If ids2 is provided, we only use mappables explicitly in either ids or ids2.
# Otherwise, all mappables not in the ids list are used as the control
# condition.
unselected_id_indices = ~self.selected_id_indices if ids2 is None \
else np.in1d(mt.ids, ids2)
mappable_id_indices = self.selected_id_indices | unselected_id_indices
# Calculate different count variables
logger.debug("Calculating counts...")
n_selected = len(self.selected_ids)
n_unselected = np.sum(unselected_id_indices)
n_mappables = n_selected + n_unselected
n_selected_active_voxels = mt.data.dot(self.selected_id_indices)
n_unselected_active_voxels = mt.data.dot(unselected_id_indices)
# Nomenclature for variables below: p = probability, F = feature present, g = given,
# U = unselected, A = activation. So, e.g., pAgF = p(A|F) = probability of activation
# in a voxel if we know that the feature is present in a study.
pF = (n_selected * 1.0) / n_mappables
mappable_data = mt.data if ids2 is None else mt.data[:, mappable_id_indices]
pA = np.array((mappable_data.sum(axis=1) * 1.0) / n_mappables).squeeze()
# Conditional probabilities
logger.debug("Calculating conditional probabilities...")
pAgF = n_selected_active_voxels * 1.0 / n_selected
pAgU = n_unselected_active_voxels * 1.0 / n_unselected
pFgA = pAgF * pF / pA
# Recompute conditionals with uniform prior
logger.debug("Recomputing with uniform priors...")
pA_prior = prior * pAgF + (1 - prior) * pAgU
pFgA_prior = pAgF * prior / pA_prior
def p_to_z(p, sign):
p = p / 2 # convert to two-tailed
# prevent underflow
p[p < 1e-240] = 1e-240
# Convert to z and assign tail
z = np.abs(norm.ppf(p)) * sign
# Set very large z's to max precision
z[np.isinf(z)] = norm.ppf(1e-240) * -1
return z
# One-way chi-square test for consistency of activation
p_vals = stats.one_way(
np.squeeze(n_selected_active_voxels), n_selected)
p_vals[p_vals < 1e-240] = 1e-240
z_sign = np.sign(
n_selected_active_voxels - np.mean(
n_selected_active_voxels)).ravel()
pAgF_z = p_to_z(p_vals, z_sign)
fdr_thresh = stats.fdr(p_vals, q)
pAgF_z_FDR = imageutils.threshold_img(
pAgF_z, fdr_thresh, p_vals, mask_out='above')
# Two-way chi-square for specificity of activation
cells = np.squeeze(
np.array([[n_selected_active_voxels, n_unselected_active_voxels],
[n_selected - n_selected_active_voxels, n_unselected -
n_unselected_active_voxels]]).T)
p_vals = stats.two_way(cells)
p_vals[p_vals < 1e-240] = 1e-240
z_sign = np.sign(pAgF - pAgU).ravel()
pFgA_z = p_to_z(p_vals, z_sign)
fdr_thresh = stats.fdr(p_vals, q)
pFgA_z_FDR = imageutils.threshold_img(
pFgA_z, fdr_thresh, p_vals, mask_out='above')
# Retain any images we may want to save or access later
self.images = {
'pA': pA,
'pAgF': pAgF,
'pFgA': pFgA,
('pA_given_pF=%0.2f' % prior): pA_prior,
('pFgA_given_pF=%0.2f' % prior): pFgA_prior,
'uniformity-test_z': pAgF_z,
'association-test_z': pFgA_z,
('uniformity-test_z_FDR_%s' % q): pAgF_z_FDR,
('association-test_z_FDR_%s' % q): pFgA_z_FDR
}
# Mask out all voxels below num_studies threshold
if min_studies > 0:
if isinstance(min_studies, int):
min_studies = float(
min_studies) / n_mappables # Recalculate as proportion
vox_to_exclude = np.where(pA < min_studies)[0] # Create mask
# Mask each image
for k in self.images:
self.images[k][vox_to_exclude] = 0
def save_results(self, output_dir='.', prefix='', prefix_sep='_',
image_list=None):
""" Write out any images generated by the meta-analysis.
Args:
output_dir (str): folder to write images to
prefix (str): all image files will be prepended with this string
prefix_sep (str): glue between the prefix and rest of filename
image_list (list): optional list of images to save--e.g.,
['pFgA_z', 'pAgF']. If image_list is None (default), will save
all images.
"""
if prefix == '':
prefix_sep = ''
if not exists(output_dir):
makedirs(output_dir)
logger.debug("Saving results...")
if image_list is None:
image_list = self.images.keys()
for suffix, img in self.images.items():
if suffix in image_list:
filename = prefix + prefix_sep + suffix + '.nii.gz'
outpath = join(output_dir, filename)
imageutils.save_img(img, outpath, self.dataset.masker)
|
|
# In[1]:
import sys
sys.path.append("/Users/rbiswas/src/interfacecosmology")
import halomass as hm
from interfaces import FCPL
import psutils as psu
import numpy as np
import matplotlib.ticker as ticker
import matplotlib.pyplot as plt
import camb_utils.cambio as cio
import psutils as psu
from interfaces import FCPL
import massfunctions as mf
import utils.plotutils as pu
import halomass as hmf
import sys
import matplotlib.gridspec as gridspec
# In[2]:
# INPUT FILES (CAMB outputs)
dirn = "./"
#M000 (LCDM)
tkfile = dirn +"example_data/M000/m000n0_transfer_fin_out.dat"
pkfile = dirn + "example_data/M000/m000n0_matterpower_fin.dat"
#M000n1 (same LCDM, with some fraction of CDM replaced by massive neutrinos)
ntkfile = dirn +"example_data/M000n1/m000n1_transfer_fin_out.dat"
npkfile = dirn +"example_data/M000n1/m000n1_matterpower_fin.dat"
# In[3]:
#Cosmological Models:
# Same cosmology represented differently
M000 = FCPL(H0 = 71. ,Om0 = 0.26479, Ob0 = 0.044793, ns = 0.963, As = 2.16e-9)
M000s = FCPL(H0 = 71., Om0 = 0.26479, Ob0 = 0.044793, ns = 0.963, sigma8 = 0.8)
#
M000n1 = FCPL(H0 = 71., Om0 = 0.26479, Ob0 = 0.044793, ns = 0.963, sigma8 = 0.8, sigmamnu=0.94)
# In[4]:
#powerspectrum
#use power spectrum from CAMB power spectrum output
psfrompk = psu.powerspectrum(koverh = None, asciifile = pkfile)
#use matter power spectrum from CAMB transfer function output, with As
psfromtkm = psu.powerspectrum(koverh = None, pstype ="matter", asciifile =tkfile, cosmo = M000)
#use matter power spectrum from CAMB transfer function output, with sigma8
psfromtkms = psu.powerspectrum(koverh = None, pstype ="matter", sigma8type = "matter", asciifile =tkfile, cosmo = M000s)
#use cb power spectrum from CAMB transfer function output, normalized to matter
psfromtkcbs = psu.powerspectrum(koverh = None, pstype ="cb", sigma8type = "matter", asciifile =tkfile, cosmo = M000s)
#use cb power spectrum from CAMB transfer function output, normalized to cb
psfromtkcbscb = psu.powerspectrum(koverh = None, pstype ="cb", sigma8type = "cb", asciifile =tkfile, cosmo = M000s)
#
#psfrompkn = psu.powerspectrum(koverh = None, asciifile = npkfile)
#psfromtknm = psu.powerspectrum(koverh = None, asciifile = ntkfile, pstype ="matter", cosmo = M000n1)
# In[5]:
simdir = "/Users/rbiswas/doc/projects/NeutrinoMF/doc/files/paperfiles/"
fname0 = simdir + "m000.499.fof_mf"
# In[13]:
#M000_z0 = np.loadtxt(simdir + "m000.499.fof_mf")
# In[15]:
#MinhinvMsun = M000_z0[:,0]
# In[25]:
def fracPoissonerrors(num ,asymmetric = True):
num = np.asarray(num)
sig = np.sqrt(num + 1.0/4.)
if asymmetric:
return np.array([(sig +0.5)/num, (sig - 0.5)/num])
else :
return 1./np.sqrt(num)
def get_errorbars(fname ):
data = np.loadtxt(fname)
MininvhMsun = data[:,0]
numcluster = data[:,1]
massfn = data[:,2]
calcB = data[:,-2]
calcM = data[:,-1]
return MininvhMsun , massfn, fracPoissonerrors(numcluster )*massfn, calcB, calcM
#MininvhMsun , massfn, massfnyerr = get_errorbars(fname)
#ax = plt.subplot()
def plotnormalized(fname , normals= None, normalnames=None, styles=None, usefile = True):#, ps = None, cosmo = None):
ax = plt.gca()
#if axesobj != None:
# ax = axesobj
MininvhMsun , massfn , massfnyerr,calcB, calcM = get_errorbars(fname)
#print "YERR", massfnyerr
#print np.shape(massfnyerr[0]), np.shape(massfnyerr[1]), np.shape( MininvhMsun), np.shape(massfn), np.shape(normals[0])
#Mass = MininvhMsun*cosmo.h
#normalB = hm.dndlnM(Mass , ps = ps , z= z, cosmo
#print yerrba
if normals == None:
normval = 1.
#ax.errorbar(MininvhMsun , massfn /normval , yerr = yerrbars/normval)
if usefile:
ax.errorbar( MininvhMsun, massfn/calcB , yerr = massfnyerr/calcB, fmt = "ks", label="Bhattacharya etal.")
ax.errorbar( MininvhMsun, massfn/calcM , yerr = massfnyerr/calcM, fmt = "ro", label="MICE")
else:
for i, normval in enumerate(normals):
name = normalnames [i]
stylename = styles[i]
#print name, np.shape(normval), np.shape(massfn), np.shape(MininvhMsun), np.shape(normval), name
ax.errorbar( MininvhMsun, massfn/normval , yerr = massfnyerr/normval, label = name, fmt = stylename)
#ax.errorbar( MininvhMsun, massfn/calcB , yerr = massfnyerr/calcB, fmt = "bd")
#ax.errorbar( MininvhMsun, massfn/hm.dndlnM(M = masses, ps = psfromtkcbscb , z = 0.,bgtype="cb",cosmo= M000s), yerr = massfnyerr/normval, label = name, fmt = stylename)
ax.set_xscale('log')
ax.grid(True)
xvals = np.linspace(1.0e13,1.1e15,2)
refval = 1.0
bandvals = [-0.1,0.1]
ax.plot(xvals, np.ones(len(xvals)),'k-',lw =2.0)
ax.fill_between (xvals , refval + bandvals[0], refval + bandvals[1],color = 'gray', alpha =0.25)
ax.set_xlim(1.0e13,1.0e15)
ax.xaxis.set_ticklabels("",visible=False)
ax.yaxis.set_ticks([0.9,1.0,1.1])
#ax.yaxis.set_ticklabels("",visible=False)
ax.set_ylim(0.85,1.15)
ax.grid(True)
return 0
# In[33]:
gs = gridspec.GridSpec(3,1, height_ratios=[0.33,0.33,0.33], width_ratios=[1.,0.,0.])
ax0 = plt.subplot(gs[0])
fname = simdir+ "m000.499.fof_mf"
masses = get_errorbars(fname)[0]/M000s.h
#sigmaM_0 = psu.
dndlnm_B0 = hmf.dndlnM0(M = masses, ps = psfromtkcbscb , z = 0.,bgtype="cb",cosmo= M000s)/M000s.h**3
dndlnm_M0 = hmf.dndlnM0(M = masses, ps = psfromtkcbscb , z = 0.,bgtype="cb",cosmo = M000s, fittingform="MICE")/M000s.h**3 #*M000s.h**3
plotnormalized(fname, normals = [dndlnm_B0,dndlnm_M0],normalnames = ["Bhattacharya", "MICE"],styles =["ks","ro"])
#ax0.set_xscale('log')
#ax0.set_yscale('log')
ax1 = plt.subplot(gs[1])
psfromtkcbscb1 = psu.powerspectrum(koverh = None, pstype ="cb", sigma8type = "cb", asciifile =tkfile, cosmo = M000s,z = 1.0, method = "CAMBoutgrowth")
fname1 = simdir+ "m000.247.fof_mf"
masses1 = get_errorbars(fname1)[0]/M000s.h
dndlnm_B1 = hmf.dndlnM0(M = masses1, ps = psfromtkcbscb , z = 1.,bgtype="cb",cosmo= M000s,deltac= 1.684)/M000s.h**3/2.**3.
dndlnm_M1 = hmf.dndlnM0(M = masses1, ps = psfromtkcbscb , z = 1.,bgtype="cb",cosmo = M000s, fittingform="MICE")/M000s.h**3./2**3.0 #*M000s.h**3
#dndlnm_B1 = hmf.dndlnM(M = masses1, ps = psfromtkcbscb1 , z = 1.,bgtype="cb",cosmo= M000s,deltac= 1.684)/M000s.h**3
#dndlnm_M1 = hmf.dndlnM(M = masses1, ps = psfromtkcbscb1 , z = 1.,bgtype="cb",cosmo = M000s, fittingform="MICE")/M000s.h**3. #*M000s.h**3
plotnormalized(fname1, normals = [dndlnm_B1,dndlnm_M1],normalnames = ["Bhattacharya", "MICE"],styles =["ks","ro"])
#plotnormalized(simdir+ "m000.247.fof_mf")
ax2 = plt.subplot(gs[2])
psfromtkcbscb2 = psu.powerspectrum(koverh = None, pstype ="cb", sigma8type = "cb", asciifile =tkfile, cosmo = M000s,z = 2.0, method = "CAMBoutgrowth")
fname2 = simdir+ "m000.163.fof_mf"
masses2 = get_errorbars(fname2)[0]/M000s.h
#print "Masses", masses2
#print "Eavltest", np.shape(masses2)
dndlnm_B2 = hmf.dndlnM0(M = masses2, ps = psfromtkcbscb , z = 2.,bgtype="cb",cosmo= M000s, deltac=1.686)/M000s.h**3/3**3.0
dndlnm_M2 = hmf.dndlnM0(M = masses2, ps = psfromtkcbscb , z = 2.,bgtype="cb",cosmo = M000s, fittingform="MICE")/M000s.h**3/3.0**3 #*M000s.h**3
#dndlnm_B2 = hmf.dndlnM(M = masses2, ps = psfromtkcbscb2 , z = 2.,bgtype="cb",cosmo= M000s, deltac=1.686)/M000s.h**3/27.0
#dndlnm_M2 = hmf.dndlnM(M = masses2, ps = psfromtkcbscb2 , z = 2.,bgtype="cb",cosmo = M000s, fittingform="MICE")/M000s.h**3/27. #*M000s.h**3
#print "Hello what is the problem?"
#print np.shape(masses2), np.shape(psfromtkcbscb2), np.shape(dndlnm_B2),np.shape(dndlnm_M2)
#plotnormalized(fname2, normals = [dndlnm_B1,dndlnm_M1],normalnames = ["Bhattacharya", "MICE"],styles =["ks","ro"])
#print "Bye"
#print "Eavl"#, np.shape(masses2) #,np.shape(psfromtkcbscb2), np.shape(dndlnm_B2) , np.shape(dndlnm_M2)
#plotnormalized(fname2, normals = [dndlnm_B2,dndlnm_M2],normalnames = ["Bhattacharya", "MICE"],styles =["ks","ro"])
plotnormalized(fname2, normals = [dndlnm_B2,dndlnm_M2],normalnames = ["Bhattacharya", "MICE"],styles =["ks","ro"])
#plotnormalized(simdir + "m000.163.fof_mf")
ax2.set_xscale('log')
ax2.set_xlabel(r'Mass ($h^{-1}M_\odot$)' )
ax1.set_ylabel("ratio of massfn")
ax2.legend(loc='upper right', numpoints =1)
ax1.text(1.0, 0.0,"z = 2.")
#axt2 = ax2.twiny()
#axt2.set_xlabel(r"z=2")
#axt1 = ax1.twiny()
#axt1.set_xlabel(r"z=1")
#axt0 = ax0.twiny()
#axt0.set_xlabel(r"z=0")
plt.tight_layout(h_pad = -0.1, w_pad = -0.1)
#plt.savefig("M000_Formula.pdf")
plt.savefig("M000_SumanFormula.pdf")
# Out[33]:
# 0.333 0.788 0.807 1.795
# 0.308553824609 0.782556886404 0.807 1.795
# 0.295094350742 0.779390315288 0.807 1.795
#
# image file:
# In[17]:
plt.plot(masses,1.0/hmf.dndlnM0(M = masses, ps = psfromtkcbscb , z = 2.,bgtype="cb",cosmo= M000s, deltac=1.686)*hmf.dndlnM(M = masses, ps = psfromtkcbscb2, z= 2.0, bgtype = "cb", cosmo= M000s, deltac=1.686),"ks")
plt.xscale('log')
# Out[17]:
# 0.295094350742 0.779390315288 0.807 1.795
# 0.295094350742 0.779390315288 0.807 1.795
#
# image file:
# In[ ]:
dndlnm_B1c = dndlnm_B1*0.12
plotnormalized(fname1, normals = [dndlnm_B1c,dndlnm_M1],normalnames = ["Bhattacharya", "MICE"],styles =["ks","ro"])
print dndlnm_B1 , dndlnm_B1c
# In[38]:
dndlnm_B2 = hm.dndlnM(M = masses2, ps = psfromtkcbscb2 , z = 2.,bgtype="cb",cosmo= M000s, deltac=1.686)/M000s.h**3
dndlnm_M2 = hm.dndlnM(M = masses2, ps = psfromtkcbscb2 , z = 2.,bgtype="cb",cosmo = M000s, fittingform="MICE")/M000s.h**3
print dndlnm_B2, dndlnm_M2
print np.shape(dndlnm_B2)
print 1./8.
# Out[38]:
# 0.295094350742 0.779390315288 0.807 1.795
# [ 8.69840997e-03 5.74194925e-03 3.62316898e-03 2.18669780e-03
# 1.27152405e-03 7.13534935e-04 3.82356121e-04 1.92706374e-04
# 9.00314410e-05 3.81860774e-05 1.49713792e-05 5.08301266e-06
# 1.40307142e-06 3.45923170e-07 6.94611686e-08 1.67266650e-08] [ 8.09230756e-03 5.33340159e-03 3.36490446e-03 2.03425732e-03
# 1.18760520e-03 6.71018535e-04 3.63331163e-04 1.85832054e-04
# 8.85716560e-05 3.85895834e-05 1.56552636e-05 5.55795206e-06
# 1.62826867e-06 4.31161409e-07 9.46077053e-08 2.47736866e-08]
# (16,)
# 0.125
#
# In[109]:
plt.plot(masses, hm.dndlnM(M = masses, ps = psfromtkcbscb, cosmo = M000s)/psu.dndlnM(masses, ps = psfromtkcbscb,cosmo = M000))
plt.xscale('log')
plt.ylim(0.9,1.1)
print M000s.h**3
|
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""This module contains object generation utilities."""
import datetime
import random
import names
from ggrc import db
from ggrc import models
from ggrc.app import app
from ggrc.services import common
from ggrc_basic_permissions import models as permissions_models
from integration.ggrc import api_helper
from integration.ggrc.models import factories
class Generator(object):
"""Generator base class."""
def __init__(self):
self.api = api_helper.Api()
self.resource = common.Resource()
@staticmethod
def random_date(start=datetime.date.today(), end=None):
"""Generate a random date between start and end."""
if not end or start > end:
end = start + datetime.timedelta(days=7)
return start + datetime.timedelta(
seconds=random.randint(0, int((end - start).total_seconds())))
def generate(self, obj_class, obj_name=None, data=None):
"""Generate `obj_class` instance with fields populated from `data`."""
# pylint: disable=protected-access
if obj_name is None:
obj_name = obj_class._inflector.table_plural
if data is None:
data = {}
response = self.api.post(obj_class, data)
response_obj = None
if response.json:
try:
response_obj = obj_class.query.get(response.json[obj_name]['id'])
except TypeError:
raise Exception("Invalid response.\nResponse: {}\nError: {}".format(
response,
response.data
))
return response, response_obj
def modify(self, obj, obj_name, data):
"""Make a PUT request to modify `obj` with new fields in `data`."""
obj_class = obj.__class__
response = self.api.put(obj, data)
response_obj = None
if response.json:
response_obj = obj_class.query.get(response.json[obj_name]['id'])
return response, response_obj
def obj_to_dict(self, obj, model_name=None):
with app.app_context():
return self.resource.object_for_json(obj, model_name)
class ObjectGenerator(Generator):
"""Main object generator class.
This class is used as a helper for generating ggrc objects via the API. This
is used for writing integration tests on thigs that attach on api callbacs,
such as model_posted, model_put and model_deleted.
"""
@staticmethod
def create_stub(obj):
# pylint: disable=protected-access
return {
"id": obj.id,
"href": "/api/{}/{}".format(obj._inflector.table_name, obj.id),
"type": obj.type,
}
def generate_object(self, obj_class, data=None, add_fields=True):
"""Generate an object of `obj_class` with fields from `data`.
This generator is used for creating objects with data. By default it will
add the first user in the DB as the object owner and it will create a
random title.
Args:
obj_class: Model that we want to generate.
add_fields: Flag for adding owners and title default field values. If
these are present in the data, default values will be overridden.
data: Dict containing generation data for the object.
Returns:
Tuple containing server response and the generated object.
"""
# pylint: disable=protected-access
if data is None:
data = {}
obj_name = obj_class._inflector.table_singular
obj = obj_class()
obj_dict = self.obj_to_dict(obj, obj_name)
if add_fields:
obj_dict[obj_name].update({
"owners": [self.create_stub(models.Person.query.first())],
"title": factories.random_str(),
})
obj_dict[obj_name].update(data)
return self.generate(obj_class, obj_name, obj_dict)
def generate_relationship(self, source, destination, context=None, **kwargs):
"""Create relationship between two objects.
Args:
source (db.Model): source model of the relationship.
destination (db.Model): destination model of the relationship.
context (Context): context of the relationship. Usually a context of one
of the related objects.
kwargs (dict): various arguments for the given relationship, such as
relationship attributes.
Returns:
response object and the actual relationship that was created.
"""
if context:
context = self.create_stub(context)
data = {
"source": self.create_stub(source),
"destination": self.create_stub(destination),
"context": context,
}
data.update(kwargs)
return self.generate_object(
models.Relationship, add_fields=False, data=data)
def generate_comment(self, commentable, assignee_type, description,
**kwargs):
"""Create a comment on a commentable object.
This function creates a comment for a given object and generates the
correct relationship to that object. The result of generating the
relationship is discarded and the user will only see if a comment is
created.
Args:
commentable (db.Model): Model that is commentable such as Request or
Assessment.
assignee_type (string): Assignee type of the person creating the comment.
description (string): Comment content.
kwargs (dict): Any additional data added to the comments.
Returns:
Server response and the generated comment.
"""
data = {
"description": description,
"assignee_type": assignee_type,
"context": self.create_stub(commentable),
}
data.update(kwargs)
response, comment_ = self.generate_object(
models.Comment, add_fields=False, data=data)
# Refresh the object after an API call.
commentable = commentable.__class__.query.get(commentable.id)
self.generate_relationship(commentable, comment_, commentable.context)
return response, comment_
def generate_user_role(self, person, role):
"""Generate a mapping between `role` and `person`."""
data = {
"user_role": {
"context": None,
"person": self.create_stub(person),
"role": self.create_stub(role),
}
}
return self.generate(permissions_models.UserRole, "user_role", data)
def generate_person(self, data=None, user_role=None):
"""Generate a person with fields from `data` and with an optional role."""
if data is None:
data = {}
obj_name = 'person'
name = names.get_full_name()
default = {
obj_name: {
"context": None,
"name": name,
"email": "%s@test.com" % name.replace(" ", ".").lower(),
}
}
default[obj_name].update(data)
response, person = self.generate(models.Person, obj_name, default)
if person and user_role:
role = db.session.query(permissions_models.Role).filter(
permissions_models.Role.name == user_role).first()
self.generate_user_role(person, role)
return response, person
def generate_random_objects(self, count=5):
"""Generate `count` objects of random types."""
random_objects = []
classes = [
models.Control,
models.Objective,
models.Standard,
models.System,
models.OrgGroup,
]
for _ in range(count):
obj_class = random.choice(classes)
obj = self.generate_object(obj_class)[1]
random_objects.append(obj)
return random_objects
def generate_random_people(self, count=5, **kwargs):
"""Generate `count` random people."""
random_people = []
for _ in range(count):
_, person = self.generate_person(**kwargs)
if person:
random_people.append(person)
return random_people
def generate_notification_setting(self, user_id, notif_type, enable_flag):
"""Generate notification setting for user `user_id` of `notif_type`."""
obj_name = "notification_config"
data = {
obj_name: {
"person_id": user_id,
"notif_type": notif_type,
"enable_flag": enable_flag,
"context": None,
"type": "NotificationConfig",
}
}
return self.generate(models.NotificationConfig, obj_name, data)
def generate_custom_attribute(self, definition_type, **kwargs):
"""Generate a CA definition of `definition_type`."""
obj_name = "custom_attribute_definition"
data = {
obj_name: {
"title": kwargs.get("title", factories.random_str()),
"custom_attribute_definitions": [],
"custom_attributes": {},
"definition_type": definition_type,
"modal_title": kwargs.get("modal_title", factories.random_str()),
"attribute_type": kwargs.get("attribute_type", "Text"),
"mandatory": kwargs.get("mandatory", False),
"helptext": kwargs.get("helptext", None),
"placeholder": kwargs.get("placeholder", None),
"context": {"id": None},
"multi_choice_options": kwargs.get("options", None),
}
}
data[obj_name].update(kwargs)
return self.generate(models.CustomAttributeDefinition, obj_name, data)
def generate_custom_attribute_value(self, custom_attribute_id, attributable,
**kwargs):
"""Generate a CA value in `attributable` for CA def with certain id."""
obj_name = "custom_attribute_value"
data = {
obj_name: {
"title": kwargs.get("title", factories.random_str()),
"custom_attribute_id": custom_attribute_id,
"attributable_type": attributable.__class__.__name__,
"attributable_id": attributable.id,
"attribute_value": kwargs.get("attribute_value"),
# "attribute_object": not implemented
"context": {"id": None},
},
}
data[obj_name].update(kwargs)
return self.generate(models.CustomAttributeValue, obj_name, data)
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from blazar import tests
from blazar.tests import fake_requests
from blazar.utils.openstack import exceptions
from blazar.utils.openstack import placement
from oslo_config import cfg
from oslo_config import fixture as conf_fixture
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
CONF = cfg.CONF
PLACEMENT_MICROVERSION = 1.29
class TestPlacementClient(tests.TestCase):
def setUp(self):
super(TestPlacementClient, self).setUp()
self.cfg = self.useFixture(conf_fixture.Config(CONF))
self.cfg.config(os_auth_host='foofoo')
self.cfg.config(os_auth_port='8080')
self.cfg.config(os_auth_prefix='identity')
self.cfg.config(os_auth_version='v3')
self.cfg.config(os_region_name='region_foo')
self.client = placement.BlazarPlacementClient()
def test_client_auth_url(self):
client = self.client._create_client()
self.assertEqual("http://foofoo:8080/identity/v3",
client.session.auth.auth_url)
def _add_default_kwargs(self, kwargs):
kwargs['endpoint_filter'] = {'service_type': 'placement',
'interface': 'public',
'region_name': 'region_foo'}
kwargs['headers'] = {'accept': 'application/json'}
kwargs['microversion'] = PLACEMENT_MICROVERSION
kwargs['raise_exc'] = False
kwargs['rate_semaphore'] = mock.ANY
def _assert_keystone_called_once(self, kss_req, url, method, **kwargs):
self._add_default_kwargs(kwargs)
kss_req.assert_called_once_with(url, method, **kwargs)
def _assert_keystone_called_any(self, kss_req, url, method, **kwargs):
self._add_default_kwargs(kwargs)
kss_req.assert_any_call(url, method, **kwargs)
@mock.patch('keystoneauth1.session.Session.request')
def test_get(self, kss_req):
kss_req.return_value = fake_requests.FakeResponse(200)
url = '/resource_providers'
resp = self.client.get(url)
self.assertEqual(200, resp.status_code)
self._assert_keystone_called_once(kss_req, url, 'GET')
@mock.patch('keystoneauth1.session.Session.request')
def test_post(self, kss_req):
kss_req.return_value = fake_requests.FakeResponse(200)
url = '/resource_providers'
data = {'name': 'unicorn'}
resp = self.client.post(url, data)
self.assertEqual(200, resp.status_code)
self._assert_keystone_called_once(kss_req, url, 'POST', json=data)
@mock.patch('keystoneauth1.session.Session.request')
def test_put(self, kss_req):
kss_req.return_value = fake_requests.FakeResponse(200)
url = '/resource_providers'
data = {'name': 'unicorn'}
resp = self.client.put(url, data)
self.assertEqual(200, resp.status_code)
self._assert_keystone_called_once(kss_req, url, 'PUT', json=data)
@mock.patch('keystoneauth1.session.Session.request')
def test_delete(self, kss_req):
kss_req.return_value = fake_requests.FakeResponse(200)
url = '/resource_providers'
resp = self.client.delete(url)
self.assertEqual(200, resp.status_code)
self._assert_keystone_called_once(kss_req, url, 'DELETE')
@mock.patch('keystoneauth1.session.Session.request')
def test_get_resource_provider(self, kss_req):
rp_name = 'blazar'
rp_uuid = uuidutils.generate_uuid()
parent_uuid = uuidutils.generate_uuid()
mock_json_data = {
'resource_providers': [
{
'uuid': rp_uuid,
'name': rp_name,
'generation': 0,
'parent_provider_uuid': parent_uuid
}
]
}
kss_req.return_value = fake_requests.FakeResponse(
200, content=jsonutils.dump_as_bytes(mock_json_data))
result = self.client.get_resource_provider(rp_name)
expected_url = '/resource_providers?name=blazar'
self._assert_keystone_called_once(kss_req, expected_url, 'GET')
expected = {'uuid': rp_uuid,
'name': rp_name,
'generation': 0,
'parent_provider_uuid': parent_uuid}
self.assertEqual(expected, result)
@mock.patch('keystoneauth1.session.Session.request')
def test_get_resource_provider_no_rp(self, kss_req):
rp_name = 'blazar'
mock_json_data = {
'resource_providers': []
}
kss_req.return_value = fake_requests.FakeResponse(
200, content=jsonutils.dump_as_bytes(mock_json_data))
result = self.client.get_resource_provider(rp_name)
expected_url = '/resource_providers?name=blazar'
self._assert_keystone_called_once(kss_req, expected_url, 'GET')
self.assertEqual(None, result)
@mock.patch('keystoneauth1.session.Session.request')
def test_get_resource_provider_fail(self, kss_req):
rp_name = 'blazar'
kss_req.return_value = fake_requests.FakeResponse(404)
self.assertRaises(
exceptions.ResourceProviderRetrievalFailed,
self.client.get_resource_provider, rp_name)
@mock.patch('keystoneauth1.session.Session.request')
def test_create_resource_provider(self, kss_req):
rp_name = 'Blazar'
rp_uuid = uuidutils.generate_uuid()
parent_uuid = uuidutils.generate_uuid()
mock_json_data = {'uuid': rp_uuid,
'name': rp_name,
'generation': 0,
'parent_provider_uuid': parent_uuid}
kss_req.return_value = fake_requests.FakeResponse(
200, content=jsonutils.dump_as_bytes(mock_json_data))
result = self.client.create_resource_provider(
rp_name, rp_uuid=rp_uuid, parent_uuid=parent_uuid)
expected_url = '/resource_providers'
expected_data = {'uuid': rp_uuid,
'name': rp_name,
'parent_provider_uuid': parent_uuid}
self._assert_keystone_called_once(kss_req, expected_url, 'POST',
json=expected_data)
self.assertEqual(mock_json_data, result)
@mock.patch('keystoneauth1.session.Session.request')
def test_create_resource_provider_fail(self, kss_req):
rp_name = 'Blazar'
kss_req.return_value = fake_requests.FakeResponse(404)
self.assertRaises(
exceptions.ResourceProviderCreationFailed,
self.client.create_resource_provider, rp_name)
@mock.patch('keystoneauth1.session.Session.request')
def test_delete_resource_provider(self, kss_req):
rp_uuid = uuidutils.generate_uuid()
kss_req.return_value = fake_requests.FakeResponse(200)
self.client.delete_resource_provider(rp_uuid)
expected_url = '/resource_providers/' + str(rp_uuid)
self._assert_keystone_called_once(kss_req, expected_url, 'DELETE')
@mock.patch('keystoneauth1.session.Session.request')
def test_delete_resource_provider_fail(self, kss_req):
rp_uuid = uuidutils.generate_uuid()
kss_req.return_value = fake_requests.FakeResponse(404)
self.assertRaises(
exceptions.ResourceProviderDeletionFailed,
self.client.delete_resource_provider, rp_uuid)
@mock.patch('keystoneauth1.session.Session.request')
def test_create_reservation_provider(self, kss_req):
host_uuid = uuidutils.generate_uuid()
host_name = "compute-1"
rp_uuid = uuidutils.generate_uuid()
rp_name = "blazar_compute-1"
get_json_mock = {
'resource_providers': [
{
'uuid': host_uuid,
'name': host_name,
'generation': 0,
'parent_provider_uuid': None
}
]
}
post_json_mock = {'uuid': rp_uuid,
'name': rp_name,
'generation': 0,
'parent_provider_uuid': host_uuid}
mock_call1 = fake_requests.FakeResponse(
200, content=jsonutils.dump_as_bytes(get_json_mock))
mock_call2 = fake_requests.FakeResponse(
200, content=jsonutils.dump_as_bytes(post_json_mock))
kss_req.side_effect = [mock_call1, mock_call2]
self.client.create_reservation_provider(host_name)
expected_url_get = "/resource_providers?name=%s" % host_name
self._assert_keystone_called_any(kss_req, expected_url_get, 'GET')
expected_url_post = "/resource_providers"
expected_data = {'name': 'blazar_compute-1',
'parent_provider_uuid': host_uuid}
self._assert_keystone_called_any(kss_req, expected_url_post, 'POST',
json=expected_data)
@mock.patch('keystoneauth1.session.Session.request')
def test_create_reservation_provider_fail(self, kss_req):
host_name = "compute-1"
get_json_mock = {'resource_providers': []}
kss_req.return_value = fake_requests.FakeResponse(
200, content=jsonutils.dump_as_bytes(get_json_mock))
self.assertRaises(
exceptions.ResourceProviderNotFound,
self.client.create_reservation_provider, host_name)
@mock.patch('keystoneauth1.session.Session.request')
def test_delete_reservation_provider(self, kss_req):
host_uuid = uuidutils.generate_uuid()
host_name = "compute-1"
rp_uuid = uuidutils.generate_uuid()
rp_name = "blazar_compute-1"
get_json_mock = {
'resource_providers': [
{
'uuid': rp_uuid,
'name': rp_name,
'generation': 0,
'parent_provider_uuid': host_uuid
}
]
}
mock_call1 = fake_requests.FakeResponse(
200, content=jsonutils.dump_as_bytes(get_json_mock))
mock_call2 = fake_requests.FakeResponse(200)
kss_req.side_effect = [mock_call1, mock_call2]
self.client.delete_reservation_provider(host_name)
expected_url_get = "/resource_providers?name=%s" % rp_name
self._assert_keystone_called_any(kss_req, expected_url_get, 'GET')
expected_url_post = "/resource_providers/%s" % rp_uuid
self._assert_keystone_called_any(kss_req, expected_url_post, 'DELETE')
@mock.patch('keystoneauth1.session.Session.request')
def test_delete_reservation_provider_no_rp(self, kss_req):
host_name = "compute-1"
rp_name = "blazar_compute-1"
get_json_mock = {
'resource_providers': []
}
mock_call1 = fake_requests.FakeResponse(
200, content=jsonutils.dump_as_bytes(get_json_mock))
mock_call2 = fake_requests.FakeResponse(200)
kss_req.side_effect = [mock_call1, mock_call2]
self.client.delete_reservation_provider(host_name)
expected_url_get = "/resource_providers?name=%s" % rp_name
self._assert_keystone_called_any(kss_req, expected_url_get, 'GET')
# Ensure that mock_call2 for delete is not called
self.assertEqual(kss_req.call_count, 1)
@mock.patch('keystoneauth1.session.Session.request')
def test_create_reservation_class(self, kss_req):
rc_name = 'abc-def'
kss_req.return_value = fake_requests.FakeResponse(200)
self.client.create_reservation_class(rc_name)
expected_url = '/resource_classes'
expected_data = {'name': 'CUSTOM_RESERVATION_ABC_DEF'}
self._assert_keystone_called_once(kss_req, expected_url, 'POST',
json=expected_data)
@mock.patch('keystoneauth1.session.Session.request')
def test_create_reservation_class_fail(self, kss_req):
rc_name = 'abc-def'
kss_req.return_value = fake_requests.FakeResponse(400)
self.assertRaises(
exceptions.ResourceClassCreationFailed,
self.client.create_reservation_class, rc_name)
@mock.patch('keystoneauth1.session.Session.request')
def test_delete_reservation_class(self, kss_req):
rc_name = 'abc-def'
kss_req.return_value = fake_requests.FakeResponse(200)
self.client.delete_reservation_class(rc_name)
expected_url = '/resource_classes/CUSTOM_RESERVATION_ABC_DEF'
self._assert_keystone_called_once(kss_req, expected_url, 'DELETE')
@mock.patch('keystoneauth1.session.Session.request')
def test_delete_reservation_class_fail(self, kss_req):
rc_name = 'abc-def'
# If no reservation class found, the placement API returns 404 error.
kss_req.return_value = fake_requests.FakeResponse(404)
# Ensure that no error is raised
self.client.delete_reservation_class(rc_name)
@mock.patch('blazar.utils.openstack.placement.'
'BlazarPlacementClient.get_resource_provider')
@mock.patch('blazar.utils.openstack.placement.'
'BlazarPlacementClient.get')
@mock.patch('keystoneauth1.session.Session.request')
def test_update_reservation_inventory(self, kss_req, client_get, get_rp):
host_uuid = uuidutils.generate_uuid()
host_name = "compute-1"
rp_uuid = uuidutils.generate_uuid()
rp_name = "blazar_compute-1"
# Build the mock of current resource provider
mock_get_rp_json = {'uuid': rp_uuid,
'name': rp_name,
'generation': 0,
'parent_provider_uuid': host_uuid}
get_rp.return_value = mock_get_rp_json
# Build the mock of "current" inventory for get_inventory()
curr_gen = 11
mock_get_inv_json = {
'inventories': {
'CUSTOM_RESERVATION_CURR': {
"allocation_ratio": 1.0,
"max_unit": 1,
"min_unit": 1,
"reserved": 0,
"step_size": 1,
"total": 1
},
},
"resource_provider_generation": curr_gen
}
client_get.return_value = fake_requests.FakeResponse(
200, content=jsonutils.dump_as_bytes(mock_get_inv_json))
# Build the mock of "updated" inventory for update_inventory()
update_gen = 12
mock_put_json = {
'inventories': {
'CUSTOM_RESERVATION_CURR': {
"allocation_ratio": 1.0,
"max_unit": 1,
"min_unit": 1,
"reserved": 0,
"step_size": 1,
"total": 1
},
'CUSTOM_RESERVATION_ADD': {
"allocation_ratio": 1.0,
"max_unit": 1,
"min_unit": 1,
"reserved": 0,
"step_size": 1,
"total": 3
},
},
"resource_provider_generation": update_gen
}
kss_req.return_value = fake_requests.FakeResponse(
200, content=jsonutils.dump_as_bytes(mock_put_json))
result = self.client.update_reservation_inventory(host_name, 'add', 3)
expected_data = {
'inventories': {
'CUSTOM_RESERVATION_CURR': {
"allocation_ratio": 1.0,
"max_unit": 1,
"min_unit": 1,
"reserved": 0,
"step_size": 1,
"total": 1
},
'CUSTOM_RESERVATION_ADD': {
"allocation_ratio": 1.0,
"max_unit": 1,
"min_unit": 1,
"reserved": 0,
"step_size": 1,
"total": 3
},
},
"resource_provider_generation": curr_gen
}
expected_url = '/resource_providers/%s/inventories' % rp_uuid
self._assert_keystone_called_once(kss_req, expected_url, 'PUT',
json=expected_data)
self.assertEqual(mock_put_json, result)
@mock.patch('blazar.utils.openstack.placement.'
'BlazarPlacementClient.get_resource_provider')
@mock.patch('blazar.utils.openstack.placement.'
'BlazarPlacementClient.get')
@mock.patch('keystoneauth1.session.Session.request')
def test_add_reservation_inventory(self, kss_req, client_get, get_rp):
host_uuid = uuidutils.generate_uuid()
host_name = "compute-1"
rp_uuid = uuidutils.generate_uuid()
rp_name = "blazar_compute-1"
# Build the mock of current resource provider
mock_get_rp_json = {'uuid': rp_uuid,
'name': rp_name,
'generation': 0,
'parent_provider_uuid': host_uuid}
get_rp.return_value = mock_get_rp_json
# Build the mock of "current" inventory for get_inventory()
curr_gen = 11
mock_get_inv_json = {
'inventories': {
'CUSTOM_RESERVATION_CURR': {
"allocation_ratio": 1.0,
"max_unit": 1,
"min_unit": 1,
"reserved": 0,
"step_size": 1,
"total": 1
},
},
"resource_provider_generation": curr_gen
}
client_get.return_value = fake_requests.FakeResponse(
200, content=jsonutils.dump_as_bytes(mock_get_inv_json))
# Build the mock of "updated" inventory for update_inventory()
update_gen = 12
mock_put_json = {
'inventories': {
'CUSTOM_RESERVATION_CURR': {
"allocation_ratio": 1.0,
"max_unit": 1,
"min_unit": 1,
"reserved": 0,
"step_size": 1,
"total": 3
},
},
"resource_provider_generation": update_gen
}
kss_req.return_value = fake_requests.FakeResponse(
200, content=jsonutils.dump_as_bytes(mock_put_json))
result = self.client.update_reservation_inventory(
host_name, 'curr', 2, additional=True)
expected_data = {
'inventories': {
'CUSTOM_RESERVATION_CURR': {
"allocation_ratio": 1.0,
"max_unit": 1,
"min_unit": 1,
"reserved": 0,
"step_size": 1,
"total": 3
}
},
"resource_provider_generation": curr_gen
}
expected_url = '/resource_providers/%s/inventories' % rp_uuid
self._assert_keystone_called_once(kss_req, expected_url, 'PUT',
json=expected_data)
self.assertEqual(mock_put_json, result)
@mock.patch('blazar.utils.openstack.placement.'
'BlazarPlacementClient.get_resource_provider')
@mock.patch('blazar.utils.openstack.placement.'
'BlazarPlacementClient.create_reservation_provider')
@mock.patch('blazar.utils.openstack.placement.'
'BlazarPlacementClient.get')
@mock.patch('keystoneauth1.session.Session.request')
def test_update_reservation_inventory_no_rp(
self, kss_req, client_get, create_rp, get_rp):
host_uuid = uuidutils.generate_uuid()
host_name = "compute-1"
rp_uuid = uuidutils.generate_uuid()
rp_name = "blazar_compute-1"
# Build the mock that there is no existing reservation provider
get_rp.return_value = None
# Build the mock of created resource provider
mock_post_rp_json = {'uuid': rp_uuid,
'name': rp_name,
'generation': 0,
'parent_provider_uuid': host_uuid}
create_rp.return_value = mock_post_rp_json
# Build the mock of "current" inventory for get_inventory()
curr_gen = 0
mock_get_inv_json = {
'inventories': {},
"resource_provider_generation": curr_gen
}
client_get.return_value = fake_requests.FakeResponse(
200, content=jsonutils.dump_as_bytes(mock_get_inv_json))
# Build the mock of "updated" inventory for update_inventory()
update_gen = 1
mock_put_json = {
'inventories': {
'CUSTOM_RESERVATION_ADD': {
"allocation_ratio": 1.0,
"max_unit": 1,
"min_unit": 1,
"reserved": 0,
"step_size": 1,
"total": 3
},
},
"resource_provider_generation": update_gen
}
kss_req.return_value = fake_requests.FakeResponse(
200, content=jsonutils.dump_as_bytes(mock_put_json))
result = self.client.update_reservation_inventory(host_name, 'add', 3)
# Ensure that the create_reservation_provider() is called.
create_rp.assert_called_once_with(host_name)
expected_data = {
'inventories': {
'CUSTOM_RESERVATION_ADD': {
"allocation_ratio": 1.0,
"max_unit": 1,
"min_unit": 1,
"reserved": 0,
"step_size": 1,
"total": 3
},
},
"resource_provider_generation": curr_gen
}
expected_url = '/resource_providers/%s/inventories' % rp_uuid
self._assert_keystone_called_once(kss_req, expected_url, 'PUT',
json=expected_data)
self.assertEqual(mock_put_json, result)
kss_req.reset_mock()
# Test retrying on 409 conflict
mock_json_data = {
"errors": [
{"status": 409,
"code": "placement.concurrent_update",
"title": "Conflict"}
]
}
kss_req.return_value = fake_requests.FakeResponse(
409, content=jsonutils.dump_as_bytes(mock_json_data))
self.assertRaises(
exceptions.InventoryConflict,
self.client.update_reservation_inventory, host_name, 'add', 3)
self.assertEqual(5, kss_req.call_count)
kss_req.reset_mock()
@mock.patch('blazar.utils.openstack.placement.'
'BlazarPlacementClient.get_resource_provider')
@mock.patch('keystoneauth1.session.Session.request')
def test_delete_reservation_inventory(self, kss_req, get_rp):
host_uuid = uuidutils.generate_uuid()
host_name = "compute-1"
rp_uuid = uuidutils.generate_uuid()
rp_name = "blazar_compute-1"
# Build the mock of current resource provider
mock_get_rp_json = {'uuid': rp_uuid,
'name': rp_name,
'generation': 0,
'parent_provider_uuid': host_uuid}
get_rp.return_value = mock_get_rp_json
kss_req.return_value = fake_requests.FakeResponse(200)
self.client.delete_reservation_inventory(host_name, "curr1")
expected_url = ('/resource_providers/%s/inventories'
'/CUSTOM_RESERVATION_CURR1' % rp_uuid)
self._assert_keystone_called_once(kss_req, expected_url, 'DELETE')
@mock.patch('blazar.utils.openstack.placement.'
'BlazarPlacementClient.get_resource_provider')
def test_delete_reservation_inventory_no_rp(self, get_rp):
host_name = "compute-1"
# Build the mock that there is no existing reservation provider
get_rp.return_value = None
self.assertRaises(
exceptions.ResourceProviderNotFound,
self.client.delete_reservation_inventory, host_name, "curr1")
@mock.patch('blazar.utils.openstack.placement.'
'BlazarPlacementClient.get_resource_provider')
@mock.patch('keystoneauth1.session.Session.request')
def test_delete_reservation_inventory_no_rc(self, kss_req, get_rp):
host_uuid = uuidutils.generate_uuid()
host_name = "compute-1"
rp_uuid = uuidutils.generate_uuid()
rp_name = "blazar_compute-1"
# Build the mock of current resource provider
mock_get_rp_json = {'uuid': rp_uuid,
'name': rp_name,
'generation': 0,
'parent_provider_uuid': host_uuid}
get_rp.return_value = mock_get_rp_json
# If no reservation class found or if no inventory found,
# then the placement API returns 404 error.
kss_req.return_value = fake_requests.FakeResponse(404)
# Ensure that no error is raised
self.client.delete_reservation_inventory(host_name, "curr1")
|
|
import operator
import sys
from spec import Spec, skip, eq_, ok_, raises, assert_raises
from invoke.collection import Collection
from invoke.tasks import task, Task
from invoke.vendor import six
from invoke.vendor.six.moves import reduce
from _utils import load, support_path
@task
def _mytask():
six.print_("woo!")
def _func():
pass
class Collection_(Spec):
class init:
"__init__"
def can_accept_task_varargs(self):
"can accept tasks as *args"
@task
def task1():
pass
@task
def task2():
pass
c = Collection(task1, task2)
assert 'task1' in c
assert 'task2' in c
def can_accept_collections_as_varargs_too(self):
sub = Collection('sub')
ns = Collection(sub)
eq_(ns.collections['sub'], sub)
def kwargs_act_as_name_args_for_given_objects(self):
sub = Collection()
@task
def task1():
pass
ns = Collection(loltask=task1, notsub=sub)
eq_(ns['loltask'], task1)
eq_(ns.collections['notsub'], sub)
def initial_string_arg_acts_as_name(self):
sub = Collection('sub')
ns = Collection(sub)
eq_(ns.collections['sub'], sub)
def initial_string_arg_meshes_with_varargs_and_kwargs(self):
@task
def task1():
pass
@task
def task2():
pass
sub = Collection('sub')
ns = Collection('root', task1, sub, sometask=task2)
for x, y in (
(ns.name, 'root'),
(ns['task1'], task1),
(ns.collections['sub'], sub),
(ns['sometask'], task2),
):
eq_(x, y)
def accepts_load_path_kwarg(self):
eq_(Collection().loaded_from, None)
eq_(Collection(loaded_from='a/path').loaded_from, 'a/path')
class useful_special_methods:
def _meh(self):
@task
def task1():
pass
@task
def task2():
pass
return Collection('meh', task1=task1, task2=task2)
def setup(self):
self.c = self._meh()
def repr_(self):
"__repr__"
eq_(repr(self.c), "<Collection 'meh': task1, task2>")
def equality_should_be_useful(self):
eq_(self.c, self._meh())
class from_module:
def setup(self):
self.c = Collection.from_module(load('integration'))
class parameters:
def setup(self):
self.mod = load('integration')
self.fm = Collection.from_module
def name_override(self):
eq_(self.fm(self.mod).name, 'integration')
eq_(
self.fm(self.mod, name='not-integration').name,
'not-integration'
)
def inline_configuration(self):
# No configuration given, none gotten
eq_(self.fm(self.mod).configuration(), {})
# Config kwarg given is reflected when config obtained
eq_(
self.fm(self.mod, config={'foo': 'bar'}).configuration(),
{'foo': 'bar'}
)
def name_and_config_simultaneously(self):
# Test w/ posargs to enforce ordering, just for safety.
c = self.fm(self.mod, 'the name', {'the': 'config'})
eq_(c.name, 'the name')
eq_(c.configuration(), {'the': 'config'})
def adds_tasks(self):
assert 'print_foo' in self.c
def derives_collection_name_from_module_name(self):
eq_(self.c.name, 'integration')
def submodule_names_are_stripped_to_last_chunk(self):
with support_path():
from package import module
c = Collection.from_module(module)
eq_(module.__name__, 'package.module')
eq_(c.name, 'module')
assert 'mytask' in c # Sanity
def honors_explicit_collections(self):
coll = Collection.from_module(load('explicit_root'))
assert 'top_level' in coll.tasks
assert 'sub' in coll.collections
# The real key test
assert 'sub_task' not in coll.tasks
def allows_tasks_with_explicit_names_to_override_bound_name(self):
coll = Collection.from_module(load('subcollection_task_name'))
assert 'explicit_name' in coll.tasks # not 'implicit_name'
def returns_unique_Collection_objects_for_same_input_module(self):
# Ignoring self.c for now, just in case it changes later.
# First, a module with no root NS
mod = load('integration')
c1 = Collection.from_module(mod)
c2 = Collection.from_module(mod)
assert c1 is not c2
# Now one *with* a root NS (which was previously buggy)
mod2 = load('explicit_root')
c3 = Collection.from_module(mod2)
c4 = Collection.from_module(mod2)
assert c3 is not c4
class explicit_root_ns:
def setup(self):
mod = load('explicit_root')
mod.ns.configure({
'key': 'builtin',
'otherkey': 'yup',
'subconfig': {'mykey': 'myvalue'}
})
mod.ns.name = 'builtin_name'
self.unchanged = Collection.from_module(mod)
self.changed = Collection.from_module(
mod,
name='override_name',
config={
'key': 'override',
'subconfig': {'myotherkey': 'myothervalue'}
}
)
def inline_config_with_root_namespaces_overrides_builtin(self):
eq_(self.unchanged.configuration()['key'], 'builtin')
eq_(self.changed.configuration()['key'], 'override')
def inline_config_overrides_via_merge_not_replacement(self):
ok_('otherkey' in self.changed.configuration())
def config_override_merges_recursively(self):
eq_(
self.changed.configuration()['subconfig']['mykey'],
'myvalue'
)
def inline_name_overrides_root_namespace_object_name(self):
eq_(self.unchanged.name, 'builtin_name')
eq_(self.changed.name, 'override_name')
def root_namespace_object_name_overrides_module_name(self):
# Duplicates part of previous test for explicitness' sake.
# I.e. proves that the name doesn't end up 'explicit_root'.
eq_(self.unchanged.name, 'builtin_name')
class add_task:
def setup(self):
self.c = Collection()
def associates_given_callable_with_given_name(self):
self.c.add_task(_mytask, 'foo')
eq_(self.c['foo'], _mytask)
def uses_function_name_as_implicit_name(self):
self.c.add_task(_mytask)
assert '_mytask' in self.c
def prefers_name_kwarg_over_task_name_attr(self):
self.c.add_task(Task(_func, name='notfunc'), name='yesfunc')
assert 'yesfunc' in self.c
assert 'notfunc' not in self.c
def prefers_task_name_attr_over_function_name(self):
self.c.add_task(Task(_func, name='notfunc'))
assert 'notfunc' in self.c
assert '_func' not in self.c
@raises(ValueError)
def raises_ValueError_if_no_name_found(self):
# Can't use a lambda here as they are technically real functions.
class Callable(object):
def __call__(self):
pass
self.c.add_task(Task(Callable()))
@raises(ValueError)
def raises_ValueError_on_multiple_defaults(self):
t1 = Task(_func, default=True)
t2 = Task(_func, default=True)
self.c.add_task(t1, 'foo')
self.c.add_task(t2, 'bar')
@raises(ValueError)
def raises_ValueError_if_task_added_mirrors_subcollection_name(self):
self.c.add_collection(Collection('sub'))
self.c.add_task(_mytask, 'sub')
def allows_specifying_task_defaultness(self):
self.c.add_task(_mytask, default=True)
eq_(self.c.default, '_mytask')
def specifying_default_False_overrides_task_setting(self):
@task(default=True)
def its_me():
pass
self.c.add_task(its_me, default=False)
eq_(self.c.default, None)
class add_collection:
def setup(self):
self.c = Collection()
def adds_collection_as_subcollection_of_self(self):
c2 = Collection('foo')
self.c.add_collection(c2)
assert 'foo' in self.c.collections
def can_take_module_objects(self):
self.c.add_collection(load('integration'))
assert 'integration' in self.c.collections
@raises(ValueError)
def raises_ValueError_if_collection_without_name(self):
# Aka non-root collections must either have an explicit name given
# via kwarg, have a name attribute set, or be a module with
# __name__ defined.
root = Collection()
sub = Collection()
root.add_collection(sub)
@raises(ValueError)
def raises_ValueError_if_collection_named_same_as_task(self):
self.c.add_task(_mytask, 'sub')
self.c.add_collection(Collection('sub'))
class getitem:
"__getitem__"
def setup(self):
self.c = Collection()
def finds_own_tasks_by_name(self):
# TODO: duplicates an add_task test above, fix?
self.c.add_task(_mytask, 'foo')
eq_(self.c['foo'], _mytask)
def finds_subcollection_tasks_by_dotted_name(self):
sub = Collection('sub')
sub.add_task(_mytask)
self.c.add_collection(sub)
eq_(self.c['sub._mytask'], _mytask)
def honors_aliases_in_own_tasks(self):
t = Task(_func, aliases=['bar'])
self.c.add_task(t, 'foo')
eq_(self.c['bar'], t)
def honors_subcollection_task_aliases(self):
self.c.add_collection(load('decorator'))
assert 'decorator.bar' in self.c
def honors_own_default_task_with_no_args(self):
t = Task(_func, default=True)
self.c.add_task(t)
eq_(self.c[''], t)
def honors_subcollection_default_tasks_on_subcollection_name(self):
sub = Collection.from_module(load('decorator'))
self.c.add_collection(sub)
# Sanity
assert self.c['decorator.biz'] is sub['biz']
# Real test
assert self.c['decorator'] is self.c['decorator.biz']
@raises(ValueError)
def raises_ValueError_for_no_name_and_no_default(self):
self.c['']
@raises(ValueError)
def ValueError_for_empty_subcol_task_name_and_no_default(self):
self.c.add_collection(Collection('whatever'))
self.c['whatever']
class to_contexts:
def setup(self):
@task
def mytask(text, boolean=False, number=5):
six.print_(text)
@task(aliases=['mytask27'])
def mytask2():
pass
@task(aliases=['othertask'], default=True)
def subtask():
pass
sub = Collection('sub', subtask)
self.c = Collection(mytask, mytask2, sub)
self.contexts = self.c.to_contexts()
alias_tups = [list(x.aliases) for x in self.contexts]
self.aliases = reduce(operator.add, alias_tups, [])
# Focus on 'mytask' as it has the more interesting sig
self.context = [x for x in self.contexts if x.name == 'mytask'][0]
def returns_iterable_of_Contexts_corresponding_to_tasks(self):
eq_(self.context.name, 'mytask')
eq_(len(self.contexts), 3)
def allows_flaglike_access_via_flags(self):
assert '--text' in self.context.flags
def positional_arglist_preserves_order_given(self):
@task(positional=('second', 'first'))
def mytask(first, second, third):
pass
c = Collection()
c.add_task(mytask)
ctx = c.to_contexts()[0]
eq_(ctx.positional_args, [ctx.args['second'], ctx.args['first']])
def exposes_namespaced_task_names(self):
assert 'sub.subtask' in [x.name for x in self.contexts]
def exposes_namespaced_task_aliases(self):
assert 'sub.othertask' in self.aliases
def exposes_subcollection_default_tasks(self):
assert 'sub' in self.aliases
def exposes_aliases(self):
assert 'mytask27' in self.aliases
class task_names:
def setup(self):
self.c = Collection.from_module(load('explicit_root'))
def returns_all_task_names_including_subtasks(self):
eq_(set(self.c.task_names.keys()), set(['top_level', 'sub.sub_task']))
def includes_aliases_and_defaults_as_values(self):
names = self.c.task_names
eq_(names['top_level'], ['othertop'])
eq_(names['sub.sub_task'], ['sub.othersub', 'sub'])
class configuration:
"Configuration methods"
def setup(self):
self.root = Collection()
self.task = Task(_func, name='task')
def basic_set_and_get(self):
self.root.configure({'foo': 'bar'})
eq_(self.root.configuration(), {'foo': 'bar'})
def configure_performs_merging(self):
self.root.configure({'foo': 'bar'})
eq_(self.root.configuration()['foo'], 'bar')
self.root.configure({'biz': 'baz'})
eq_(set(self.root.configuration().keys()), set(['foo', 'biz']))
def configure_merging_is_recursive_for_nested_dicts(self):
self.root.configure({'foo': 'bar', 'biz': {'baz': 'boz'}})
self.root.configure({'biz': {'otherbaz': 'otherboz'}})
c = self.root.configuration()
eq_(c['biz']['baz'], 'boz')
eq_(c['biz']['otherbaz'], 'otherboz')
def configure_allows_overwriting(self):
self.root.configure({'foo': 'one'})
eq_(self.root.configuration()['foo'], 'one')
self.root.configure({'foo': 'two'})
eq_(self.root.configuration()['foo'], 'two')
def call_returns_dict(self):
eq_(self.root.configuration(), {})
self.root.configure({'foo': 'bar'})
eq_(self.root.configuration(), {'foo': 'bar'})
def access_merges_from_subcollections(self):
inner = Collection('inner', self.task)
inner.configure({'foo': 'bar'})
self.root.configure({'biz': 'baz'})
# With no inner collection
eq_(set(self.root.configuration().keys()), set(['biz']))
# With inner collection
self.root.add_collection(inner)
eq_(
set(self.root.configuration('inner.task').keys()),
set(['foo', 'biz'])
)
def parents_overwrite_children_in_path(self):
inner = Collection('inner', self.task)
inner.configure({'foo': 'inner'})
self.root.add_collection(inner)
# Before updating root collection's config, reflects inner
eq_(self.root.configuration('inner.task')['foo'], 'inner')
self.root.configure({'foo': 'outer'})
# After, reflects outer (since that now overrides)
eq_(self.root.configuration('inner.task')['foo'], 'outer')
def sibling_subcollections_ignored(self):
inner = Collection('inner', self.task)
inner.configure({'foo': 'hi there'})
inner2 = Collection('inner2', Task(_func, name='task2'))
inner2.configure({'foo': 'nope'})
root = Collection(inner, inner2)
eq_(root.configuration('inner.task')['foo'], 'hi there')
eq_(root.configuration('inner2.task2')['foo'], 'nope')
def subcollection_paths_may_be_dotted(self):
leaf = Collection('leaf', self.task)
leaf.configure({'key': 'leaf-value'})
middle = Collection('middle', leaf)
root = Collection('root', middle)
eq_(root.configuration('middle.leaf.task'), {'key': 'leaf-value'})
def invalid_subcollection_paths_result_in_KeyError(self):
# Straight up invalid
assert_raises(KeyError,
Collection('meh').configuration,
'nope.task'
)
# Exists but wrong level (should be 'root.task', not just
# 'task')
inner = Collection('inner', self.task)
assert_raises(KeyError,
Collection('root', inner).configuration, 'task')
def keys_dont_have_to_exist_in_full_path(self):
# Kinda duplicates earlier stuff; meh
# Key only stored on leaf
leaf = Collection('leaf', self.task)
leaf.configure({'key': 'leaf-value'})
middle = Collection('middle', leaf)
root = Collection('root', middle)
eq_(root.configuration('middle.leaf.task'), {'key': 'leaf-value'})
# Key stored on mid + leaf but not root
middle.configure({'key': 'whoa'})
eq_(root.configuration('middle.leaf.task'), {'key': 'whoa'})
|
|
"""Subcluster core module"""
__author__ = "Jens Thomas, and Felix Simkovic"
__date__ = "01 Oct 2016"
__version__ = "1.0"
from collections import namedtuple
import itertools
import logging
import mmtbx.superpose
import numpy
import re
import os
import shutil
from ample.util import ample_util, pdb_edit
logger = logging.getLogger()
SCORE_MATRIX_NAME = 'score.matrix'
FILE_LIST_NAME = 'files.list'
RMSD_MAX = 50
QSCORE_MIN = 0.01
class SubClusterer(object):
"""Base class for clustering pdbs by distance
Sub-classes just need to provide a generate_distance_matrix class
"""
def __init__(self, executable=None, nproc=1):
if executable and not os.path.exists(executable) and os.access(executable, os.X_OK):
raise RuntimeError("Cannot find subclusterer executable: {0}".format(executable))
self.executable = executable
self.nproc = nproc
self.distance_matrix = None
self.index2pdb = []
self.cluster_score = None
def generate_distance_matrix(self, *args, **kwargs):
raise NotImplementedError
def cluster_by_radius(self, radius):
"""Return a list of pdbs clustered by the given radius"""
if self.distance_matrix is None:
raise RuntimeError("Need to call generate_distance_matrix before cluster_by_radius!")
cluster_indices, cluster_score = self._cluster_indices(radius)
self.cluster_score = cluster_score
if cluster_indices:
return [self.index2pdb[i] for i in cluster_indices]
else:
return None
def _cluster_indices(self, thresh):
"""Return the indices of the largest cluster that have distances < thresh.
We loop through each row of the distance matrix and for each row (pdb) see
how many pdbs are < thresh to this pdb. We return the largest cluster.
"""
# self.dump_matrix("maxcluster.csv")
thresh = float(thresh)
# get mask of all elements where condition is true. We exclude 0.0 to ensure we don't get the
# index of the model that the row is compared with, as this needs to be the first model in the
# ensemble. This means we would also exclude models that had an rmsd of zero to the centroid, but
# as these are likely to be identical (and this occurrence rare), this should be ok
condition = numpy.logical_and(self.distance_matrix <= thresh, self.distance_matrix != 0.0)
# Array of sums of each row - largest number is a row where most items satisfy condition
condition_sum = condition.sum(axis=1)
# Find all rows that have the maximum of the condition true and then select the first one
row_index = numpy.where(condition_sum == numpy.max(condition_sum))[0][0]
# Select all values from that row where the condition is true and insert the first index so that
# it becomes the centroid of that cluster
max_cluster = numpy.insert(numpy.where(condition[row_index])[0], 0, row_index)
if len(max_cluster) == 1:
return None, None
else:
cluster_score = self.calculate_score(max_cluster)
return sorted(max_cluster), cluster_score
def calculate_score(self, cluster):
"""Given a list of indices of a cluster, calculate the rmsd we want to give to phaser
"""
ALL_BY_ALL = True
if ALL_BY_ALL:
rmsds = [self.distance_matrix[i] for i in itertools.combinations(cluster, 2)]
else:
# Just use the rmsds of the decoys to the the cluster centroid - assumes
# the centroid approximates the native
row = cluster[0]
rmsds = [self.distance_matrix[row][j] for j in cluster[1:]]
return max(rmsds)
def dump_raw_matrix(self, file_name):
with open(file_name, 'w') as f:
for row in self.distance_matrix:
f.write(",".join(map(str, row)) + "\n")
f.write("\n")
return
def dump_pdb_matrix(self, file_name=SCORE_MATRIX_NAME, offset=0):
with open(file_name, 'w') as f:
l = len(self.distance_matrix) + offset
for i in range(offset, l):
for j in range(i, l):
f.write(
"{0: > 4d} {1: > 4d} {2: > 8.3F}\n".format(i, j, self.distance_matrix[i - offset][j - offset])
)
f.write("\n")
return os.path.abspath(file_name)
class CctbxClusterer(SubClusterer):
"""Class to cluster files with CCTBX"""
def generate_distance_matrix(self, pdb_list):
"""Run cctbx to generate the distance distance_matrix"""
num_models = len(pdb_list)
if not num_models:
raise RuntimeError("generate_distance_matrix got empty pdb_list!")
# Index is just the order of the pdb in the file
self.index2pdb = sorted(pdb_list)
# Create a square matrix storing the rmsd distances between models
self.distance_matrix = numpy.zeros([num_models, num_models])
for m1, m2 in itertools.combinations(pdb_list, 2):
i, j = pdb_list.index(m1), pdb_list.index(m2)
fixed = mmtbx.superpose.SuperposePDB(m1, preset='ca', log=None, quiet=True)
moving = mmtbx.superpose.SuperposePDB(m2, preset='ca', log=None, quiet=True)
rmsd, _ = moving.superpose(fixed)
self.distance_matrix[i, j] = self.distance_matrix[j, i] = float(rmsd)
# # Might be deleted when confirmed that above code works
# for i, m1 in enumerate(pdb_list):
# fixed = mmtbx.superpose.SuperposePDB(m1, preset='ca', log=None, quiet=True)
# for j, m2 in enumerate(pdb_list):
# if j <= i: continue
# moving = mmtbx.superpose.SuperposePDB(m2, preset='ca', log=None, quiet=True)
# rmsd, _ = moving.superpose(fixed)
# self.distance_matrix[i][j] = float(rmsd)
# # Copy in other half of matrix - we use a full matrix as it's easier to scan for clusters
# for x in range(len(self.distance_matrix)):
# for y in range(len(self.distance_matrix)):
# self.distance_matrix[y][x] = self.distance_matrix[x][y]
return
class FpcClusterer(SubClusterer):
"""Class to cluster files with fast_protein_clusterer"""
def generate_distance_matrix(self, pdb_list):
# Create list of pdb files
fname = os.path.join(os.getcwd(), "files.list")
with open(fname, 'w') as f:
f.write("\n".join(pdb_list) + "\n")
# Index is just the order of the pdb in the file
self.index2pdb = sorted(pdb_list)
# Run fast_protein_cluster - this is just to generate the distance matrix, but there
# doesn't seem to be a way to stop it clustering as well - not a problem as it just
# generates more files
log_name = os.path.abspath("fast_protein_cluster.log")
matrix_file = "fpc.matrix"
cmd = [self.executable, "--cluster_write_text_matrix", matrix_file, "-i", fname]
retcode = ample_util.run_command(cmd, logfile=log_name)
if retcode != 0:
raise RuntimeError(
"non-zero return code for fast_protein_cluster in generate_distance_matrix!\nCheck logfile:{0}".format(
log_name
)
)
mlen = 0
data = []
with open(matrix_file) as f:
for l in f:
l = l.strip().split()
x = int(l[0])
y = int(l[1])
d = float(l[2])
mlen = max(mlen, x + 1) # +1 as we want the length
data.append((x, y, d))
# create empty matrix - we use None's but this means we need to check for then when
# looking through the matrix
# use square matrix to make indexing easier as we're unlikely to be very big
m = numpy.zeros([mlen, mlen])
# Fill in all values (upper triangle)
for i, j, d in data:
if i > j:
m[j][i] = d
else:
m[i][j] = d
# Copy to lower
for x in range(mlen):
for y in range(mlen):
if x == y:
continue
m[y][x] = m[x][y]
self.distance_matrix = m
return
class GesamtClusterer(SubClusterer):
"""Class to cluster files with Gesamt"""
def generate_distance_matrix(self, pdb_list, purge=False):
if True:
self._generate_pairwise_rmsd_matrix(pdb_list, purge=purge)
else:
self._generate_distance_matrix_generic(self, pdb_list, purge=purge, purge_all=False, metric='qscore')
return
def _generate_pairwise_rmsd_matrix(self, models, purge=False):
"""
Use gesamt to generate an all-by-all pairwise rmsd matrix of a list of pdb models
Notes:
gesamt -input-list inp_list.dat -sheaf-x
where inp_list.dat contains:
1ADZ.pdb -s /1/A
1ADZ.pdb -s /2/A
1ADZ.pdb -s /3/A
"""
# Index is just the order of the pdbs
models = sorted(models)
self.index2pdb = models
# Create file with list of pdbs and model/chain
glist = 'gesamt_models.dat'
with open(glist, 'w') as w:
for m in models:
w.write("{0} -s /1/A \n".format(m))
w.write('\n')
cmd = [self.executable, '-input-list', glist, '-sheaf-x', '-nthreads={0}'.format(self.nproc)]
logfile = os.path.abspath('gesamt_archive.log')
rtn = ample_util.run_command(cmd, logfile)
if rtn != 0:
raise RuntimeError("Error running gesamt - check logfile: {0}".format(logfile))
# Create a square distance_matrix no_models in size filled with None
num_models = len(models)
self.distance_matrix = numpy.zeros([num_models, num_models])
# Read in the rmsds calculated
self._parse_gesamt_rmsd_log(logfile, num_models)
if purge:
os.unlink(glist)
os.unlink(logfile)
return
def _parse_gesamt_rmsd_log(self, logfile, num_models):
reading = -1
nmodel = 0
with open(logfile) as f:
for line in f:
if line.startswith(' ===== CROSS-RMSDs') or reading == 0:
# find start of RMSDS and skip blank line
reading += 1
continue
if reading == 1:
fields = line.strip().split('|')
nmodel = int(fields[0])
rmsd_txt = fields[2].strip()
# poke into distance matrix
rmsds = [float(r) for r in rmsd_txt.split()]
for j in range(len(rmsds)):
if j == nmodel:
continue
self.distance_matrix[nmodel - 1][j] = rmsds[j]
if nmodel == num_models:
reading = -1
if nmodel != num_models:
raise RuntimeError("Could not generate distance matrix with gesamt")
return
def _generate_distance_matrix_generic(self, models, purge=True, purge_all=False, metric='qscore'):
# Make sure all the files are in the same directory otherwise we wont' work
mdir = os.path.dirname(models[0])
if not all([os.path.dirname(p) == mdir for p in models]):
raise RuntimeError("All pdb files are not in the same directory!")
models = sorted(models)
self.index2pdb = models
nmodels = len(models)
# Create list of pdb files
fname = os.path.join(os.getcwd(), FILE_LIST_NAME)
with open(fname, 'w') as f:
f.write("\n".join(models) + "\n")
# Make the archive
logger.debug("Generating gesamt archive from models in directory %s", mdir)
garchive = 'gesamt.archive'
if not os.path.isdir(garchive):
os.mkdir(garchive)
logfile = os.path.abspath('gesamt_archive.log')
cmd = [self.executable, '--make-archive', garchive, '-pdb', mdir]
# cmd += [ '-nthreads=auto' ]
cmd += ['-nthreads={0}'.format(self.nproc)]
print(" ".join(cmd))
# HACK FOR DYLD!!!!
env = None
# env = {'DYLD_LIBRARY_PATH' : '/opt/ccp4-devtools/install/lib'}
rtn = ample_util.run_command(cmd, logfile, env=env)
if rtn != 0:
raise RuntimeError("Error running gesamt - check logfile: {0}".format(logfile))
if purge_all:
os.unlink(logfile)
# Now loop through each file creating the matrix
if metric == 'rmsd':
parity = 0.0
elif metric == 'qscore':
parity = 1
else:
raise RuntimeError("Unrecognised metric: {0}".format(metric))
# m = [[parity for _ in range(nmodels)] for _ in range(nmodels)]
m = numpy.full([nmodels, nmodels], parity, dtype=numpy.float)
for i, model in enumerate(models):
mname = os.path.basename(model)
gesamt_out = '{0}_gesamt.out'.format(mname)
logfile = '{0}_gesamt.log'.format(mname)
cmd = [self.executable, model, '-archive', garchive, '-o', gesamt_out]
cmd += ['-nthreads={0}'.format(self.nproc)]
rtn = ample_util.run_command(cmd, logfile)
if rtn != 0:
raise RuntimeError("Error running gesamt!")
else:
if purge:
os.unlink(logfile)
gdata = self._parse_gesamt_out(gesamt_out)
assert gdata[0].file_name == mname, gdata[0].file_name + " " + mname
score_dict = {g.file_name: (g.rmsd, g.q_score) for g in gdata}
for j in range(i + 1, nmodels):
# Try and get the rmsd and qscore for this model. If it's missing we assume the model was
# too divergent for gesamt to find it and we set the rmsd and qscore to fixed values
model2 = os.path.basename(models[j])
try:
rmsd, qscore = score_dict[model2]
except KeyError:
rmsd = RMSD_MAX
qscore = QSCORE_MIN
if metric == 'rmsd':
score = rmsd
elif metric == 'qscore':
score = qscore
else:
raise RuntimeError("Unrecognised metric: {0}".format(metric))
m[i, j] = score
if purge_all:
os.unlink(gesamt_out)
# Copy upper half of matrix to lower
i_lower = numpy.tril_indices(nmodels, -1)
m[i_lower] = m.T[i_lower] # make the matrix symmetric
self.distance_matrix = m
# Remove the gesamt archive
if purge:
shutil.rmtree(garchive)
# Write out the matrix in a form spicker can use
self.dump_pdb_matrix(SCORE_MATRIX_NAME)
return
def _parse_gesamt_out(self, out_file):
# Assumption is there are no pdb_codes
GesamtData = namedtuple(
'GesamtData', ['count', 'chain_id', 'q_score', 'rmsd', 'seq_id', 'nalign', 'nres', 'file_name']
)
data = []
with open(out_file) as f:
for i, line in enumerate(f):
if i < 2:
continue # First 2 lines are headers
if not line.strip():
continue # ignore blanks
try:
tmp = GesamtData(*line.split())
# Convert from strings to correct types
data.append(
GesamtData(
int(tmp.count),
tmp.chain_id,
float(tmp.q_score),
float(tmp.rmsd),
tmp.seq_id,
int(tmp.nalign),
int(tmp.nres),
os.path.basename(tmp.file_name),
)
)
except Exception as e:
raise RuntimeError('Error parsing line {0}: {1}\n{2}'.format(i, line, e.message))
assert len(data), "Failed to read any data!"
return data
class LsqkabClusterer(SubClusterer):
"""Class to cluster files with Lsqkab"""
def calc_rmsd(self, model1, model2, nresidues=None, logfile='lsqkab.out', purge=False):
if not nresidues:
_, nresidues = pdb_edit.num_atoms_and_residues(model1, first=True)
stdin = """FIT RESIDUE CA 1 TO {0} CHAIN {1}
MATCH 1 to {0} CHAIN {1}
output RMS
end""".format(
nresidues, 'A'
)
cmd = ['lsqkab', 'XYZINM', model1, 'XYZINF', model2]
ample_util.run_command(cmd, logfile=logfile, stdin=stdin)
rmsd = self.parse_lsqkab_output(logfile)
# cleanup
if purge:
os.unlink(logfile)
os.unlink('RMSTAB')
return rmsd
def generate_distance_matrix(self, models):
# Index is just the order of the pdb in the file
self.index2pdb = sorted(models)
num_models = len(models)
# Assume all models are the same size and only have a single chain
# We also assume that the chain is called 'A' (not relevant here)
_, nresidues = pdb_edit.num_atoms_and_residues(models[0], first=True)
# Create a square distance_matrix no_models in size filled with None
self.distance_matrix = numpy.zeros([num_models, num_models])
logfile = 'lsqkab.out'
parity = 0.0
for i, fixed in enumerate(models):
for j, model2 in enumerate(models):
if j < i:
continue
if j == i:
rmsd = parity
elif j > i:
rmsd = self.calc_rmsd(fixed, model2, nresidues=nresidues, logfile=logfile)
self.distance_matrix[i][j] = rmsd
# Clean up output files from lsqkab
os.unlink(logfile)
os.unlink('RMSTAB')
# Copy in other half of matrix - we use a full matrix as it's easier to scan for clusters
for x in range(len(self.distance_matrix)):
for y in range(len(self.distance_matrix)):
self.distance_matrix[y][x] = self.distance_matrix[x][y]
return
def parse_lsqkab_output(self, output_file):
with open(output_file) as f:
for l in f.readlines():
if l.startswith(" RMS XYZ DISPLACEMENT ="):
return float(l.split()[4])
assert False
|
|
from __future__ import unicode_literals
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponseForbidden
from django.shortcuts import redirect, get_object_or_404
from django.utils.http import base36_to_int, int_to_base36
from django.utils.translation import ugettext_lazy as _
from django.views.generic.base import TemplateResponseMixin, View
from django.views.generic.edit import FormView
from django.contrib import auth, messages
from django.contrib.auth import get_user_model
from django.contrib.auth.hashers import make_password
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.shortcuts import get_current_site
from account import signals
from account.conf import settings
from account.forms import SignupForm, LoginUsernameForm
from account.forms import ChangePasswordForm, PasswordResetForm, PasswordResetTokenForm
from account.forms import SettingsForm
from account.hooks import hookset
from account.mixins import LoginRequiredMixin
from account.models import SignupCode, EmailAddress, EmailConfirmation, Account, AccountDeletion, PasswordHistory
from account.utils import check_password_expired, default_redirect, get_form_data
class PasswordMixin(object):
"""
Mixin handling common elements of password change.
Required attributes in inheriting class:
form_password_field - example: "password"
fallback_url_setting - example: "ACCOUNT_PASSWORD_RESET_REDIRECT_URL"
Required methods in inheriting class:
get_user()
change_password()
after_change_password()
get_redirect_field_name()
"""
redirect_field_name = "next"
messages = {
"password_changed": {
"level": messages.SUCCESS,
"text": _("Password successfully changed.")
}
}
def get_context_data(self, **kwargs):
ctx = super(PasswordMixin, self).get_context_data(**kwargs)
redirect_field_name = self.get_redirect_field_name()
ctx.update({
"redirect_field_name": redirect_field_name,
"redirect_field_value": self.request.POST.get(redirect_field_name, self.request.GET.get(redirect_field_name, "")),
})
return ctx
def change_password(self, form):
user = self.get_user()
user.set_password(form.cleaned_data[self.form_password_field])
user.save()
return user
def after_change_password(self):
user = self.get_user()
signals.password_changed.send(sender=self, user=user)
if settings.ACCOUNT_NOTIFY_ON_PASSWORD_CHANGE:
self.send_password_email(user)
if self.messages.get("password_changed"):
messages.add_message(
self.request,
self.messages["password_changed"]["level"],
self.messages["password_changed"]["text"]
)
def get_redirect_field_name(self):
return self.redirect_field_name
def get_success_url(self, fallback_url=None, **kwargs):
if fallback_url is None:
fallback_url = getattr(settings, self.fallback_url_setting, None)
kwargs.setdefault("redirect_field_name", self.get_redirect_field_name())
return default_redirect(self.request, fallback_url, **kwargs)
def send_password_email(self, user):
protocol = getattr(settings, "DEFAULT_HTTP_PROTOCOL", "http")
current_site = get_current_site(self.request)
ctx = {
"user": user,
"protocol": protocol,
"current_site": current_site,
}
hookset.send_password_change_email([user.email], ctx)
def create_password_history(self, form):
if settings.ACCOUNT_PASSWORD_USE_HISTORY:
password = form.cleaned_data[self.form_password_field]
PasswordHistory.objects.create(
user=self.request.user,
password=make_password(password)
)
class SignupView(PasswordMixin, FormView):
template_name = "account/signup.html"
template_name_ajax = "account/ajax/signup.html"
template_name_email_confirmation_sent = "account/email_confirmation_sent.html"
template_name_email_confirmation_sent_ajax = "account/ajax/email_confirmation_sent.html"
template_name_signup_closed = "account/signup_closed.html"
template_name_signup_closed_ajax = "account/ajax/signup_closed.html"
form_class = SignupForm
form_kwargs = {}
form_password_field = "password"
redirect_field_name = "next"
identifier_field = "username"
messages = {
"email_confirmation_sent": {
"level": messages.INFO,
"text": _("Confirmation email sent to {email}.")
},
"invalid_signup_code": {
"level": messages.WARNING,
"text": _("The code {code} is invalid.")
}
}
fallback_url_setting = "ACCOUNT_SIGNUP_REDIRECT_URL"
def __init__(self, *args, **kwargs):
self.created_user = None
kwargs["signup_code"] = None
super(SignupView, self).__init__(*args, **kwargs)
def dispatch(self, request, *args, **kwargs):
self.request = request
self.args = args
self.kwargs = kwargs
self.setup_signup_code()
return super(SignupView, self).dispatch(request, *args, **kwargs)
def setup_signup_code(self):
code = self.get_code()
if code:
try:
self.signup_code = SignupCode.check_code(code)
except SignupCode.InvalidCode:
self.signup_code = None
self.signup_code_present = True
else:
self.signup_code = None
self.signup_code_present = False
def get(self, *args, **kwargs):
if self.request.user.is_authenticated():
return redirect(default_redirect(self.request, settings.ACCOUNT_LOGIN_REDIRECT_URL))
if not self.is_open():
return self.closed()
return super(SignupView, self).get(*args, **kwargs)
def post(self, *args, **kwargs):
if self.request.user.is_authenticated():
raise Http404()
if not self.is_open():
return self.closed()
return super(SignupView, self).post(*args, **kwargs)
def get_initial(self):
initial = super(SignupView, self).get_initial()
if self.signup_code:
initial["code"] = self.signup_code.code
if self.signup_code.email:
initial["email"] = self.signup_code.email
return initial
def get_template_names(self):
if self.request.is_ajax():
return [self.template_name_ajax]
else:
return [self.template_name]
def get_form_kwargs(self):
kwargs = super(SignupView, self).get_form_kwargs()
kwargs.update(self.form_kwargs)
return kwargs
def form_invalid(self, form):
signals.user_sign_up_attempt.send(
sender=SignupForm,
username=get_form_data(form, self.identifier_field),
email=get_form_data(form, "email"),
result=form.is_valid()
)
return super(SignupView, self).form_invalid(form)
def form_valid(self, form):
self.created_user = self.create_user(form, commit=False)
# prevent User post_save signal from creating an Account instance
# we want to handle that ourself.
self.created_user._disable_account_creation = True
self.created_user.save()
self.use_signup_code(self.created_user)
email_address = self.create_email_address(form)
if settings.ACCOUNT_EMAIL_CONFIRMATION_REQUIRED and not email_address.verified:
self.created_user.is_active = False
self.created_user.save()
self.create_account(form)
self.create_password_history(form)
self.after_signup(form)
if settings.ACCOUNT_EMAIL_CONFIRMATION_EMAIL and not email_address.verified:
self.send_email_confirmation(email_address)
if settings.ACCOUNT_EMAIL_CONFIRMATION_REQUIRED and not email_address.verified:
return self.email_confirmation_required_response()
else:
show_message = [
settings.ACCOUNT_EMAIL_CONFIRMATION_EMAIL,
self.messages.get("email_confirmation_sent"),
not email_address.verified
]
if all(show_message):
messages.add_message(
self.request,
self.messages["email_confirmation_sent"]["level"],
self.messages["email_confirmation_sent"]["text"].format(**{
"email": form.cleaned_data["email"]
})
)
# attach form to self to maintain compatibility with login_user
# API. this should only be relied on by d-u-a and it is not a stable
# API for site developers.
self.form = form
self.login_user()
return redirect(self.get_success_url())
def create_user(self, form, commit=True, model=None, **kwargs):
User = model
if User is None:
User = get_user_model()
user = User(**kwargs)
username = form.cleaned_data.get("username")
if username is None:
username = self.generate_username(form)
user.username = username
user.email = form.cleaned_data["email"].strip()
password = form.cleaned_data.get("password")
if password:
user.set_password(password)
else:
user.set_unusable_password()
if commit:
user.save()
return user
def create_account(self, form):
return Account.create(request=self.request, user=self.created_user, create_email=False)
def generate_username(self, form):
raise NotImplementedError(
"Unable to generate username by default. "
"Override SignupView.generate_username in a subclass."
)
def create_email_address(self, form, **kwargs):
kwargs.setdefault("primary", True)
kwargs.setdefault("verified", False)
if self.signup_code:
kwargs["verified"] = self.created_user.email == self.signup_code.email if self.signup_code.email else False
return EmailAddress.objects.add_email(self.created_user, self.created_user.email, **kwargs)
def use_signup_code(self, user):
if self.signup_code:
self.signup_code.use(user)
def send_email_confirmation(self, email_address):
email_address.send_confirmation(site=get_current_site(self.request))
def after_signup(self, form):
signals.user_signed_up.send(sender=SignupForm, user=self.created_user, form=form)
def login_user(self):
user = auth.authenticate(**self.user_credentials())
auth.login(self.request, user)
self.request.session.set_expiry(0)
def user_credentials(self):
return hookset.get_user_credentials(self.form, self.identifier_field)
def get_code(self):
return self.request.POST.get("code", self.request.GET.get("code"))
def is_open(self):
if self.signup_code:
return True
else:
if self.signup_code_present:
if self.messages.get("invalid_signup_code"):
messages.add_message(
self.request,
self.messages["invalid_signup_code"]["level"],
self.messages["invalid_signup_code"]["text"].format(**{
"code": self.get_code(),
})
)
return settings.ACCOUNT_OPEN_SIGNUP
def email_confirmation_required_response(self):
if self.request.is_ajax():
template_name = self.template_name_email_confirmation_sent_ajax
else:
template_name = self.template_name_email_confirmation_sent
response_kwargs = {
"request": self.request,
"template": template_name,
"context": {
"email": self.created_user.email,
"success_url": self.get_success_url(),
}
}
return self.response_class(**response_kwargs)
def closed(self):
if self.request.is_ajax():
template_name = self.template_name_signup_closed_ajax
else:
template_name = self.template_name_signup_closed
response_kwargs = {
"request": self.request,
"template": template_name,
}
return self.response_class(**response_kwargs)
class LoginView(FormView):
template_name = "account/login.html"
template_name_ajax = "account/ajax/login.html"
form_class = LoginUsernameForm
form_kwargs = {}
redirect_field_name = "next"
def get(self, *args, **kwargs):
if self.request.user.is_authenticated():
# Check for password expiration, redirect if needed.
if check_password_expired(self.request.user):
return redirect("account_password")
return redirect(self.get_success_url())
return super(LoginView, self).get(*args, **kwargs)
def get_template_names(self):
if self.request.is_ajax():
return [self.template_name_ajax]
else:
return [self.template_name]
def get_context_data(self, **kwargs):
ctx = super(LoginView, self).get_context_data(**kwargs)
redirect_field_name = self.get_redirect_field_name()
ctx.update({
"redirect_field_name": redirect_field_name,
"redirect_field_value": self.request.POST.get(redirect_field_name, self.request.GET.get(redirect_field_name, "")),
})
return ctx
def get_form_kwargs(self):
kwargs = super(LoginView, self).get_form_kwargs()
kwargs.update(self.form_kwargs)
return kwargs
def form_invalid(self, form):
signals.user_login_attempt.send(
sender=LoginView,
username=get_form_data(form, form.identifier_field),
result=form.is_valid()
)
return super(LoginView, self).form_invalid(form)
def form_valid(self, form):
self.login_user(form)
self.after_login(form)
return redirect(self.get_success_url())
def after_login(self, form):
signals.user_logged_in.send(sender=LoginView, user=form.user, form=form)
def get_success_url(self, fallback_url=None, **kwargs):
if fallback_url is None:
fallback_url = settings.ACCOUNT_LOGIN_REDIRECT_URL
kwargs.setdefault("redirect_field_name", self.get_redirect_field_name())
return default_redirect(self.request, fallback_url, **kwargs)
def get_redirect_field_name(self):
return self.redirect_field_name
def login_user(self, form):
auth.login(self.request, form.user)
expiry = settings.ACCOUNT_REMEMBER_ME_EXPIRY if form.cleaned_data.get("remember") else 0
self.request.session.set_expiry(expiry)
class LogoutView(TemplateResponseMixin, View):
template_name = "account/logout.html"
redirect_field_name = "next"
def get(self, *args, **kwargs):
if not self.request.user.is_authenticated():
return redirect(self.get_redirect_url())
ctx = self.get_context_data()
return self.render_to_response(ctx)
def post(self, *args, **kwargs):
if self.request.user.is_authenticated():
auth.logout(self.request)
return redirect(self.get_redirect_url())
def get_context_data(self, **kwargs):
ctx = kwargs
redirect_field_name = self.get_redirect_field_name()
ctx.update({
"redirect_field_name": redirect_field_name,
"redirect_field_value": self.request.POST.get(redirect_field_name, self.request.GET.get(redirect_field_name, "")),
})
return ctx
def get_redirect_field_name(self):
return self.redirect_field_name
def get_redirect_url(self, fallback_url=None, **kwargs):
if fallback_url is None:
fallback_url = settings.ACCOUNT_LOGOUT_REDIRECT_URL
kwargs.setdefault("redirect_field_name", self.get_redirect_field_name())
return default_redirect(self.request, fallback_url, **kwargs)
class ConfirmEmailView(TemplateResponseMixin, View):
http_method_names = ["get", "post"]
messages = {
"email_confirmed": {
"level": messages.SUCCESS,
"text": _("You have confirmed {email}.")
}
}
def get_template_names(self):
return {
"GET": ["account/email_confirm.html"],
"POST": ["account/email_confirmed.html"],
}[self.request.method]
def get(self, *args, **kwargs):
self.object = self.get_object()
ctx = self.get_context_data()
return self.render_to_response(ctx)
def post(self, *args, **kwargs):
self.object = confirmation = self.get_object()
confirmation.confirm()
self.after_confirmation(confirmation)
redirect_url = self.get_redirect_url()
if not redirect_url:
ctx = self.get_context_data()
return self.render_to_response(ctx)
if self.messages.get("email_confirmed"):
messages.add_message(
self.request,
self.messages["email_confirmed"]["level"],
self.messages["email_confirmed"]["text"].format(**{
"email": confirmation.email_address.email
})
)
return redirect(redirect_url)
def get_object(self, queryset=None):
if queryset is None:
queryset = self.get_queryset()
try:
return queryset.get(key=self.kwargs["key"].lower())
except EmailConfirmation.DoesNotExist:
raise Http404()
def get_queryset(self):
qs = EmailConfirmation.objects.all()
qs = qs.select_related("email_address__user")
return qs
def get_context_data(self, **kwargs):
ctx = kwargs
ctx["confirmation"] = self.object
return ctx
def get_redirect_url(self):
if self.request.user.is_authenticated():
if not settings.ACCOUNT_EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL:
return settings.ACCOUNT_LOGIN_REDIRECT_URL
return settings.ACCOUNT_EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL
else:
return settings.ACCOUNT_EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL
def after_confirmation(self, confirmation):
user = confirmation.email_address.user
user.is_active = True
user.save()
class ChangePasswordView(PasswordMixin, FormView):
template_name = "account/password_change.html"
form_class = ChangePasswordForm
redirect_field_name = "next"
messages = {
"password_changed": {
"level": messages.SUCCESS,
"text": _("Password successfully changed.")
}
}
form_password_field = "password_new"
fallback_url_setting = "ACCOUNT_PASSWORD_CHANGE_REDIRECT_URL"
def get(self, *args, **kwargs):
if not self.request.user.is_authenticated():
return redirect("account_password_reset")
return super(ChangePasswordView, self).get(*args, **kwargs)
def post(self, *args, **kwargs):
if not self.request.user.is_authenticated():
return HttpResponseForbidden()
return super(ChangePasswordView, self).post(*args, **kwargs)
def form_valid(self, form):
self.change_password(form)
self.create_password_history(form)
self.after_change_password()
return redirect(self.get_success_url())
def get_user(self):
return self.request.user
def get_form_kwargs(self):
"""
Returns the keyword arguments for instantiating the form.
"""
kwargs = {"user": self.request.user, "initial": self.get_initial()}
if self.request.method in ["POST", "PUT"]:
kwargs.update({
"data": self.request.POST,
"files": self.request.FILES,
})
return kwargs
def change_password(self, form):
user = super(ChangePasswordView, self).change_password(form)
# required on Django >= 1.7 to keep the user authenticated
if hasattr(auth, "update_session_auth_hash"):
auth.update_session_auth_hash(self.request, user)
class PasswordResetView(FormView):
template_name = "account/password_reset.html"
template_name_sent = "account/password_reset_sent.html"
form_class = PasswordResetForm
token_generator = default_token_generator
def get_context_data(self, **kwargs):
context = super(PasswordResetView, self).get_context_data(**kwargs)
if self.request.method == "POST" and "resend" in self.request.POST:
context["resend"] = True
return context
def form_valid(self, form):
self.send_email(form.cleaned_data["email"])
response_kwargs = {
"request": self.request,
"template": self.template_name_sent,
"context": self.get_context_data(form=form)
}
return self.response_class(**response_kwargs)
def send_email(self, email):
User = get_user_model()
protocol = getattr(settings, "DEFAULT_HTTP_PROTOCOL", "http")
current_site = get_current_site(self.request)
email_qs = EmailAddress.objects.filter(email__iexact=email)
for user in User.objects.filter(pk__in=email_qs.values("user")):
uid = int_to_base36(user.id)
token = self.make_token(user)
password_reset_url = "{0}://{1}{2}".format(
protocol,
current_site.domain,
reverse("account_password_reset_token", kwargs=dict(uidb36=uid, token=token))
)
ctx = {
"user": user,
"current_site": current_site,
"password_reset_url": password_reset_url,
}
hookset.send_password_reset_email([user.email], ctx)
def make_token(self, user):
return self.token_generator.make_token(user)
class PasswordResetTokenView(PasswordMixin, FormView):
template_name = "account/password_reset_token.html"
template_name_fail = "account/password_reset_token_fail.html"
form_class = PasswordResetTokenForm
token_generator = default_token_generator
form_password_field = "password"
fallback_url_setting = "ACCOUNT_PASSWORD_RESET_REDIRECT_URL"
def get(self, request, **kwargs):
form_class = self.get_form_class()
form = self.get_form(form_class)
ctx = self.get_context_data(form=form)
if not self.check_token(self.get_user(), self.kwargs["token"]):
return self.token_fail()
return self.render_to_response(ctx)
def get_context_data(self, **kwargs):
ctx = super(PasswordResetTokenView, self).get_context_data(**kwargs)
ctx.update({
"uidb36": self.kwargs["uidb36"],
"token": self.kwargs["token"],
})
return ctx
def form_valid(self, form):
self.change_password(form)
self.create_password_history(form)
self.after_change_password()
return redirect(self.get_success_url())
def get_user(self):
try:
uid_int = base36_to_int(self.kwargs["uidb36"])
except ValueError:
raise Http404()
return get_object_or_404(get_user_model(), id=uid_int)
def check_token(self, user, token):
return self.token_generator.check_token(user, token)
def token_fail(self):
response_kwargs = {
"request": self.request,
"template": self.template_name_fail,
"context": self.get_context_data()
}
return self.response_class(**response_kwargs)
class SettingsView(LoginRequiredMixin, FormView):
template_name = "account/settings.html"
form_class = SettingsForm
redirect_field_name = "next"
messages = {
"settings_updated": {
"level": messages.SUCCESS,
"text": _("Account settings updated.")
},
}
def get_form_class(self):
# @@@ django: this is a workaround to not having a dedicated method
# to initialize self with a request in a known good state (of course
# this only works with a FormView)
self.primary_email_address = EmailAddress.objects.get_primary(self.request.user)
return super(SettingsView, self).get_form_class()
def get_initial(self):
initial = super(SettingsView, self).get_initial()
if self.primary_email_address:
initial["email"] = self.primary_email_address.email
initial["timezone"] = self.request.user.account.timezone
initial["language"] = self.request.user.account.language
return initial
def form_valid(self, form):
self.update_settings(form)
if self.messages.get("settings_updated"):
messages.add_message(
self.request,
self.messages["settings_updated"]["level"],
self.messages["settings_updated"]["text"]
)
return redirect(self.get_success_url())
def update_settings(self, form):
self.update_email(form)
self.update_account(form)
def update_email(self, form, confirm=None):
user = self.request.user
if confirm is None:
confirm = settings.ACCOUNT_EMAIL_CONFIRMATION_EMAIL
# @@@ handle multiple emails per user
email = form.cleaned_data["email"].strip()
if not self.primary_email_address:
user.email = email
EmailAddress.objects.add_email(self.request.user, email, primary=True, confirm=confirm)
user.save()
else:
if email != self.primary_email_address.email:
self.primary_email_address.change(email, confirm=confirm)
def get_context_data(self, **kwargs):
ctx = super(SettingsView, self).get_context_data(**kwargs)
redirect_field_name = self.get_redirect_field_name()
ctx.update({
"redirect_field_name": redirect_field_name,
"redirect_field_value": self.request.POST.get(redirect_field_name, self.request.GET.get(redirect_field_name, "")),
})
return ctx
def update_account(self, form):
fields = {}
if "timezone" in form.cleaned_data:
fields["timezone"] = form.cleaned_data["timezone"]
if "language" in form.cleaned_data:
fields["language"] = form.cleaned_data["language"]
if fields:
account = self.request.user.account
for k, v in fields.items():
setattr(account, k, v)
account.save()
def get_redirect_field_name(self):
return self.redirect_field_name
def get_success_url(self, fallback_url=None, **kwargs):
if fallback_url is None:
fallback_url = settings.ACCOUNT_SETTINGS_REDIRECT_URL
kwargs.setdefault("redirect_field_name", self.get_redirect_field_name())
return default_redirect(self.request, fallback_url, **kwargs)
class DeleteView(LogoutView):
template_name = "account/delete.html"
messages = {
"account_deleted": {
"level": messages.WARNING,
"text": _("Your account is now inactive and your data will be expunged in the next {expunge_hours} hours.")
},
}
def post(self, *args, **kwargs):
AccountDeletion.mark(self.request.user)
auth.logout(self.request)
messages.add_message(
self.request,
self.messages["account_deleted"]["level"],
self.messages["account_deleted"]["text"].format(**{
"expunge_hours": settings.ACCOUNT_DELETION_EXPUNGE_HOURS,
})
)
return redirect(self.get_redirect_url())
def get_context_data(self, **kwargs):
ctx = super(DeleteView, self).get_context_data(**kwargs)
ctx.update(kwargs)
ctx["ACCOUNT_DELETION_EXPUNGE_HOURS"] = settings.ACCOUNT_DELETION_EXPUNGE_HOURS
return ctx
|
|
from collections import namedtuple,OrderedDict
import re
import sys
from inspect import isgeneratorfunction,getargspec
import csv
from io import StringIO
from datetime import datetime
def print_error(err_string):
"""Function to write to stderr"""
sys.stderr.write("ERROR[UDF]: " + str(err_string) + "\n")
# PostgreSQL COPY TO text Format parser
# See: http://www.postgresql.org/docs/9.1/static/sql-copy.html#AEN64302
escapeCodeToSpecial = {
'\\': '\\',
'b': '\b',
'f': '\f',
'r': '\r',
't': '\t',
'n': '\n',
'v': '\v',
}
specialToEscapeCode = {v: k for k, v in escapeCodeToSpecial.items()}
def decode_pg_text_escapes(m):
c = m.group(1)
if c in escapeCodeToSpecial:
return escapeCodeToSpecial[c]
elif c.startswith("x"):
return chr(int(c, base=16))
elif c.startswith("0"):
return chr(int(c, base=8))
else:
return c
def unescape_postgres_text_format(s):
# unescape PostgreSQL text format
return re.sub(r"\\(.|0[0-7]{1,2}|x[0-9A-Fa-f]{1,2})", decode_pg_text_escapes, s)
BOOL_PARSER = {
't' : True,
'f' : False,
'NULL' : None,
'\\N' : None
}
def timestamp(timestamp_str):
"""Given a timestamp string, return a timestamp string in ISO 8601 format to emulate
Postgres 9.5's to_json timestamp formatting.
This supports the `timestamp without time zone` PostgreSQL type.
Time zone offsets are not supported. http://bugs.python.org/issue6641
Examples:
>>> timestamp('2016-06-17 20:10:38')
'2016-06-17T20:10:38'
>>> timestamp('2016-06-17 20:10:37.9293')
'2016-06-17T20:10:37.929300'
"""
try:
parsed = datetime.strptime(timestamp_str, '%Y-%m-%d %H:%M:%S.%f')
except ValueError:
parsed = datetime.strptime(timestamp_str, '%Y-%m-%d %H:%M:%S')
except ValueError:
return timestamp_str
return parsed.isoformat()
TYPE_PARSERS = {
'text' : lambda x : str(x),
'int' : lambda x : int(x.strip()),
'float' : lambda x : float(x.strip()),
'boolean' : lambda x : BOOL_PARSER[x.lower().strip()],
'timestamp': timestamp,
}
# how to normalize type names
CANONICAL_TYPE_BY_NAME = {
'integer' : 'int',
'bigint' : 'int',
'double' : 'float',
'double precision' : 'float',
'numeric' : 'float',
'unknown' : 'text',
}
CANONICAL_TYPE_BY_REGEX = {
re.compile(r'timestamp(\(\d\))? without time zone'): 'timestamp',
}
def normalize_type_name(ty):
ty = ty.lower()
if ty.endswith('[]'):
return normalize_type_name(ty[:-2]) + '[]'
if ty in CANONICAL_TYPE_BY_NAME:
return CANONICAL_TYPE_BY_NAME[ty]
else:
for patt,ty_canonical in CANONICAL_TYPE_BY_REGEX.items():
if patt.match(ty):
return ty_canonical
return ty
def check_supported_type(nm, ty, array_nesting=0):
if ty.endswith('[]'):
if array_nesting == 0:
check_supported_type(nm, ty[:-2], array_nesting=array_nesting+1)
else: # XXX the parser cannot parse nested arrays correctly
raise TypeError("column '%s' is of unsupported nested array type: %s" % (nm, ty + '[]'))
elif not ty in TYPE_PARSERS:
raise TypeError("column '%s' is of unsupported type: %s" % (nm, ty))
return nm, ty
def parse_pgtsv_element(s, t, array_nesting_depth=0):
"""
Parse an element in psql-compatible tsv format, i.e. {-format arrays
based on provided type and type-parser dictionary
"""
if s is None:
return s
if array_nesting_depth == 0:
if s == '\\N':
# NULLs outside arrays are represented as \N
# unless specified otherwise in the SQL statement (COPY ... NULL ...)
return None
elif not t.endswith('[]'):
# Need to handle PG TSV escape sequences for primitive types here,
# escapes for array elements are handled during array parsing
s = unescape_postgres_text_format(s)
if t.endswith('[]'): # Handle lists recursively
if s[0] == '{' and s[-1] == '}':
s_orig = s
s = s[1:-1] # to strip curly braces
def unescapeTSVBackslashes(matches):
c = matches.group(1)
return escapeCodeToSpecial[c] if c in escapeCodeToSpecial else c
s = re.sub(r'\\(.)', unescapeTSVBackslashes, s)
s = re.sub(r'\\(.)', lambda m : '""' if m.group(1) == '"' else m.group(1), s) # XXX quotes and backslashes in arrays are escaped another time
values = []
v = None
is_quoted = False
def null_or_itself(v): return None if not is_quoted and v == 'NULL' else v
while len(s) > 0:
if s[0] == ',': # found the end of a value
values.append(null_or_itself(v))
v = None
is_quoted = False
s = s[1:]
elif s[0] == '"': # found a quote
# e.g.: 1,this"is an error",2,3
if v is None: # this is a new value
v = ""
else: # this an escaped quote, append to the current value
v += '"'
# find the other end of the quote and consume
m = re.match(r'^"([^"]*)"', s)
if m:
v += m.group(1)
is_quoted = True # TODO error if quoting mixed
s = s[len(m.group(0)):]
else:
raise Exception("Unterminated quote in '%s'" % s_orig)
else:
m = re.match(r'^([^,]*)', s)
if m: # find the next comma to consume up to it
v = m.group(1)
else: # or consume the rest of the string as the value
v = s
s = s[len(v):]
values.append(null_or_itself(v))
split = values
else:
raise Exception("Surrounding curly braces ({...}) expected for array type %(type)s but found: '%(value)s'" % dict(
type=t,
value=s,
))
return [parse_pgtsv_element(ss, t[:-2], array_nesting_depth=array_nesting_depth+1) for ss in split]
else: # parse correct value using the parser corresponding to the type
try:
parser = TYPE_PARSERS[t]
except KeyError:
raise Exception("Unsupported type: %s" % t)
return parser(s)
class Row:
def __str__(self):
return '<Row(' + ', '.join("%s=%s" % x for x in self.__dict__.items()) + ')>'
def __repr__(self):
return str(self)
def _asdict(self):
return self.__dict__
class PGTSVParser:
"""
Initialized with a list of duples (field_name, field_type)
Is a factory for simple Row class
Parsed from Postgres-style TSV input lines
"""
def __init__(self, fields):
self.fields = [check_supported_type(nm,normalize_type_name(ty)) for nm,ty in fields]
def parse_line(self, line):
row = Row()
attribs = line.rstrip().split('\t')
if len(attribs) != len(self.fields):
raise ValueError("Expected %(num_rows_declared)d attributes, but found %(num_rows_found)d in input row:\n%(row)s" % dict(
num_rows_declared=len(self.fields), num_rows_found=len(attribs), row=row,
))
for i,attrib in enumerate(attribs):
field_name, field_type = self.fields[i]
setattr(row, field_name, parse_pgtsv_element(attrib, field_type))
return row
def parse_stdin(self):
for line in sys.stdin:
yield self.parse_line(line)
TYPE_CHECKERS = {
'text' : lambda x : type(x) == str,
'int' : lambda x : type(x) == int,
'float' : lambda x : type(x) == float,
'boolean' : lambda x : type(x) == bool,
# TODO timestamp
}
def print_pgtsv_element(x, n, t, array_nesting_depth=0):
"""Checks element x against type string t, then prints in PG-TSV format if a match"""
# Handle NULLs first
if x is None:
if array_nesting_depth == 0:
return r'\N'
else:
return 'NULL'
# Handle lists recursively
if '[]' in t:
if not hasattr(x, '__iter__'):
raise ValueError("Mismatch between array type and non-iterable in output row:\n%s" % x)
else:
return '{%s}' % ','.join(print_pgtsv_element(e, n, t[:-2], array_nesting_depth=array_nesting_depth+1) for e in x)
# Else check type & print, hanlding special case of string in array
try:
checker = TYPE_CHECKERS[t]
except KeyError:
raise Exception("Unsupported type: %s" % t)
if not checker(x):
raise Exception("Output column '%(name)s' of type %(declared_type)s has incorrect value of %(value_type)s: '%(value)s'" % dict(
name=n, declared_type=t, value_type=type(x), value=x,
))
if t == 'text':
x = str(x)
def escapeWithTSVBackslashes(x):
return re.sub(r'[\b\f\n\r\t\\]', lambda m : "\\" + specialToEscapeCode[m.group(0)], x)
if array_nesting_depth == 0:
# primitive types just need TSV escaping
return escapeWithTSVBackslashes(x)
else:
if re.search(r'^[a-zA-Z0-9_.\b\x1c\x1d\x1e\x1f\x7f\[\]()]+$', x) \
and x not in ["", "NULL", "null"]:
# we don't need to quote the value in some special cases
return escapeWithTSVBackslashes(x)
else: # otherwise, surround value with quotes
x = re.sub(r'[\\"]', lambda m : '\\' + m.group(0), x) # XXX quotes and backslashes in arrays are escaped another time
return '"%s"' % escapeWithTSVBackslashes(x) # then, the TSV escaping
elif t == 'boolean':
return 't' if x else 'f'
# TODO timestamp
else:
return str(x)
class PGTSVPrinter:
"""
Initialized with a list of type strings
Prints out Postgres-format TSV output lines
"""
def __init__(self, fields):
self.fields = fields
def write(self, out):
if len(out) != len(self.fields):
raise ValueError("Expected %(num_rows_declared)d attributes, but found %(num_rows_found)d in output row:\n%(row)s" % dict(
num_rows_declared=len(self.fields), num_rows_found=len(out), row=out,
))
else:
print('\t'.join(print_pgtsv_element(x, n, t) for x,(n,t) in zip(out, self.fields)))
# how to get types specified as default values of a function
def format_from_args_defaults_of(aFunctionOrFormat):
if hasattr(aFunctionOrFormat, '__call__'):
# TODO in Python3, support types in function annotations (PEP 3107: https://www.python.org/dev/peps/pep-3107/)
spec = getargspec(aFunctionOrFormat)
return zip(spec.args, spec.defaults)
else:
return aFunctionOrFormat
## function decorators to be used directly in UDF implementations
# decorators for input and output formats
def format_decorator(attrName):
def decorator(*name_type_pairs, **name_type_dict):
"""
When a function is decorated with this (e.g., @returns(...) or @over(...)
preceding the def line), the pairs of column name and type given as
arguments are kept as the function's attribute to supply other decorators,
such as @tsv_extractor, with information for deciding how to parse the
input lines or format the output lines.
"""
# check single argument case with a function or dict
if len(name_type_pairs) == 1:
if hasattr(name_type_pairs[0], '__call__'):
name_type_pairs = format_from_args_defaults_of(name_type_pairs[0])
elif type(name_type_pairs[0]) in [dict, OrderedDict]:
name_type_pairs = name_type_pairs[0]
# XXX @over(collection.OrderedDict(foo="type", bar="type", ...)) doesn't work
# as Python forgets the order when calling with keyword argument binding.
# merge dictionaries
name_type_pairs = list(name_type_pairs) + list(name_type_dict.items())
def decorate(f):
setattr(f, attrName, name_type_pairs)
return f
return decorate
return decorator
over = format_decorator("input_format")
returns = format_decorator("output_format")
# decorators that initiate the main extractor loop
def tsv_extractor(generator):
"""
When a generator function is decorated with this (i.e., @tsv_extractor
preceding the def line), standard input is parsed as Postgres-style TSV
(PGTSV) input rows, the function is applied to generate output rows, and then
checks that each line of this generator is in the output format before
printing back as PGTSV rows.
"""
# Expects the input and output formats to have been decorated with @over and @returns
try:
# @over has precedence over default values of function arguments
input_format = generator.input_format
except AttributeError:
input_format = format_from_args_defaults_of(generator)
try:
output_format = generator.output_format
# also support function argument defaults for output_format for symmetry
output_format = format_from_args_defaults_of(output_format)
except AttributeError:
raise ValueError("The function must be decorated with @returns")
# TODO or maybe just skip type checking if @returns isn't present?
# Check generator function
if not isgeneratorfunction(generator):
raise ValueError("The function must be a *generator*, i.e., use yield not return")
# Create the input parser
parser = PGTSVParser(input_format)
# Create the output parser
printer = PGTSVPrinter(output_format)
for row in parser.parse_stdin():
for out_row in generator(**row._asdict()):
printer.write(out_row)
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
import shutil
import sys
import tempfile
from collections import defaultdict
from pex.interpreter import PythonInterpreter
from pex.pex_builder import PEXBuilder
from pex.platforms import Platform
from twitter.common.collections import OrderedSet
from pants.backend.codegen.targets.python_antlr_library import PythonAntlrLibrary
from pants.backend.codegen.targets.python_thrift_library import PythonThriftLibrary
from pants.backend.core.targets.dependencies import Dependencies
from pants.backend.core.targets.prep_command import PrepCommand
from pants.backend.python.antlr_builder import PythonAntlrBuilder
from pants.backend.python.python_requirement import PythonRequirement
from pants.backend.python.resolver import resolve_multi
from pants.backend.python.targets.python_binary import PythonBinary
from pants.backend.python.targets.python_library import PythonLibrary
from pants.backend.python.targets.python_requirement_library import PythonRequirementLibrary
from pants.backend.python.targets.python_tests import PythonTests
from pants.backend.python.thrift_builder import PythonThriftBuilder
from pants.base.build_environment import get_buildroot
from pants.base.build_invalidator import BuildInvalidator, CacheKeyGenerator
from pants.util.dirutil import safe_mkdir, safe_rmtree
logger = logging.getLogger(__name__)
class PythonChroot(object):
_VALID_DEPENDENCIES = {
PrepCommand: 'prep',
PythonLibrary: 'libraries',
PythonRequirementLibrary: 'reqs',
PythonBinary: 'binaries',
PythonThriftLibrary: 'thrifts',
PythonAntlrLibrary: 'antlrs',
PythonTests: 'tests'
}
MEMOIZED_THRIFTS = {}
class InvalidDependencyException(Exception):
def __init__(self, target):
Exception.__init__(self, "Not a valid Python dependency! Found: {}".format(target))
# TODO: A little extra push and we can get rid of the 'context' argument.
def __init__(self,
context,
python_setup,
python_repos,
targets,
extra_requirements=None,
builder=None,
platforms=None,
interpreter=None):
self.context = context
self._python_setup = python_setup
self._python_repos = python_repos
self._targets = targets
self._extra_requirements = list(extra_requirements) if extra_requirements else []
self._platforms = platforms
self._interpreter = interpreter or PythonInterpreter.get()
self._builder = builder or PEXBuilder(os.path.realpath(tempfile.mkdtemp()),
interpreter=self._interpreter)
# Note: unrelated to the general pants artifact cache.
self._egg_cache_root = os.path.join(
self._python_setup.scratch_dir, 'artifacts', str(self._interpreter.identity))
self._key_generator = CacheKeyGenerator()
self._build_invalidator = BuildInvalidator(self._egg_cache_root)
def delete(self):
"""Deletes this chroot from disk if it has been dumped."""
safe_rmtree(self.path())
def __del__(self):
if os.getenv('PANTS_LEAVE_CHROOT') is None:
self.delete()
else:
self.debug('Left chroot at {}'.format(self.path()))
@property
def builder(self):
return self._builder
def debug(self, msg, indent=0):
if os.getenv('PANTS_VERBOSE') is not None:
print('{}{}'.format(' ' * indent, msg))
def path(self):
return os.path.realpath(self._builder.path())
def _dump_library(self, library):
def copy_to_chroot(base, path, add_function):
src = os.path.join(get_buildroot(), base, path)
add_function(src, path)
self.debug(' Dumping library: {}'.format(library))
for relpath in library.sources_relative_to_source_root():
try:
copy_to_chroot(library.target_base, relpath, self._builder.add_source)
except OSError as e:
logger.error("Failed to copy {path} for library {library}"
.format(path=os.path.join(library.target_base, relpath),
library=library))
raise
for resources_tgt in library.resources:
for resource_file_from_source_root in resources_tgt.sources_relative_to_source_root():
try:
copy_to_chroot(resources_tgt.target_base, resource_file_from_source_root,
self._builder.add_resource)
except OSError as e:
logger.error("Failed to copy {path} for resource {resource}"
.format(path=os.path.join(resources_tgt.target_base,
resource_file_from_source_root),
resource=resources_tgt.address.spec))
raise
def _dump_requirement(self, req):
self.debug(' Dumping requirement: {}'.format(req))
self._builder.add_requirement(req)
def _dump_distribution(self, dist):
self.debug(' Dumping distribution: .../{}'.format(os.path.basename(dist.location)))
self._builder.add_distribution(dist)
def _generate_requirement(self, library, builder_cls):
library_key = self._key_generator.key_for_target(library)
builder = builder_cls(library, get_buildroot(),
self.context.options, '-' + library_key.hash[:8])
cache_dir = os.path.join(self._egg_cache_root, library_key.id)
if self._build_invalidator.needs_update(library_key):
sdist = builder.build(interpreter=self._interpreter)
safe_mkdir(cache_dir)
shutil.copy(sdist, os.path.join(cache_dir, os.path.basename(sdist)))
self._build_invalidator.update(library_key)
return PythonRequirement(builder.requirement_string(), repository=cache_dir, use_2to3=True)
def _generate_thrift_requirement(self, library):
return self._generate_requirement(library, PythonThriftBuilder)
def _generate_antlr_requirement(self, library):
return self._generate_requirement(library, PythonAntlrBuilder)
def resolve(self, targets):
children = defaultdict(OrderedSet)
def add_dep(trg):
for target_type, target_key in self._VALID_DEPENDENCIES.items():
if isinstance(trg, target_type):
children[target_key].add(trg)
return
elif isinstance(trg, Dependencies):
return
raise self.InvalidDependencyException(trg)
for target in targets:
target.walk(add_dep)
return children
def dump(self):
self.debug('Building chroot for {}:'.format(self._targets))
targets = self.resolve(self._targets)
for lib in targets['libraries'] | targets['binaries']:
self._dump_library(lib)
generated_reqs = OrderedSet()
if targets['thrifts']:
for thr in set(targets['thrifts']):
if thr not in self.MEMOIZED_THRIFTS:
self.MEMOIZED_THRIFTS[thr] = self._generate_thrift_requirement(thr)
generated_reqs.add(self.MEMOIZED_THRIFTS[thr])
generated_reqs.add(PythonRequirement('thrift', use_2to3=True))
for antlr in targets['antlrs']:
generated_reqs.add(self._generate_antlr_requirement(antlr))
reqs_from_libraries = OrderedSet()
for req_lib in targets['reqs']:
for req in req_lib.payload.requirements:
reqs_from_libraries.add(req)
reqs_to_build = OrderedSet()
find_links = []
for req in reqs_from_libraries | generated_reqs | self._extra_requirements:
if not req.should_build(self._interpreter.python, Platform.current()):
self.debug('Skipping {} based upon version filter'.format(req))
continue
reqs_to_build.add(req)
self._dump_requirement(req.requirement)
if req.repository:
find_links.append(req.repository)
distributions = resolve_multi(
self._python_setup,
self._python_repos,
reqs_to_build,
interpreter=self._interpreter,
platforms=self._platforms,
ttl=self.context.options.for_global_scope().python_chroot_requirements_ttl,
find_links=find_links)
locations = set()
for platform, dist_set in distributions.items():
for dist in dist_set:
if dist.location not in locations:
self._dump_distribution(dist)
locations.add(dist.location)
if len(targets['binaries']) > 1:
print('WARNING: Target has multiple python_binary targets!', file=sys.stderr)
return self._builder
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
from typing import Type
from unittest.mock import MagicMock, PropertyMock, patch
import pytest
from azure.identity import ClientSecretCredential, DefaultAzureCredential
from azure.mgmt.datafactory.models import FactoryListResponse
from pytest import fixture
from airflow.exceptions import AirflowException
from airflow.models.connection import Connection
from airflow.providers.microsoft.azure.hooks.data_factory import (
AzureDataFactoryHook,
AzureDataFactoryPipelineRunException,
AzureDataFactoryPipelineRunStatus,
provide_targeted_factory,
)
from airflow.utils import db
DEFAULT_RESOURCE_GROUP = "defaultResourceGroup"
RESOURCE_GROUP = "testResourceGroup"
DEFAULT_FACTORY = "defaultFactory"
FACTORY = "testFactory"
DEFAULT_CONNECTION_CLIENT_SECRET = "azure_data_factory_test_client_secret"
DEFAULT_CONNECTION_DEFAULT_CREDENTIAL = "azure_data_factory_test_default_credential"
MODEL = object()
NAME = "testName"
ID = "testId"
def setup_module():
connection_client_secret = Connection(
conn_id=DEFAULT_CONNECTION_CLIENT_SECRET,
conn_type="azure_data_factory",
login="clientId",
password="clientSecret",
extra=json.dumps(
{
"extra__azure_data_factory__tenantId": "tenantId",
"extra__azure_data_factory__subscriptionId": "subscriptionId",
"extra__azure_data_factory__resource_group_name": DEFAULT_RESOURCE_GROUP,
"extra__azure_data_factory__factory_name": DEFAULT_FACTORY,
}
),
)
connection_default_credential = Connection(
conn_id=DEFAULT_CONNECTION_DEFAULT_CREDENTIAL,
conn_type="azure_data_factory",
extra=json.dumps(
{
"extra__azure_data_factory__subscriptionId": "subscriptionId",
"extra__azure_data_factory__resource_group_name": DEFAULT_RESOURCE_GROUP,
"extra__azure_data_factory__factory_name": DEFAULT_FACTORY,
}
),
)
connection_missing_subscription_id = Connection(
conn_id="azure_data_factory_missing_subscription_id",
conn_type="azure_data_factory",
login="clientId",
password="clientSecret",
extra=json.dumps(
{
"extra__azure_data_factory__tenantId": "tenantId",
"extra__azure_data_factory__resource_group_name": DEFAULT_RESOURCE_GROUP,
"extra__azure_data_factory__factory_name": DEFAULT_FACTORY,
}
),
)
connection_missing_tenant_id = Connection(
conn_id="azure_data_factory_missing_tenant_id",
conn_type="azure_data_factory",
login="clientId",
password="clientSecret",
extra=json.dumps(
{
"extra__azure_data_factory__subscriptionId": "subscriptionId",
"extra__azure_data_factory__resource_group_name": DEFAULT_RESOURCE_GROUP,
"extra__azure_data_factory__factory_name": DEFAULT_FACTORY,
}
),
)
db.merge_conn(connection_client_secret)
db.merge_conn(connection_default_credential)
db.merge_conn(connection_missing_subscription_id)
db.merge_conn(connection_missing_tenant_id)
@fixture
def hook():
client = AzureDataFactoryHook(azure_data_factory_conn_id=DEFAULT_CONNECTION_CLIENT_SECRET)
client._conn = MagicMock(
spec=[
"factories",
"linked_services",
"datasets",
"pipelines",
"pipeline_runs",
"triggers",
"trigger_runs",
]
)
return client
def parametrize(explicit_factory, implicit_factory):
def wrapper(func):
return pytest.mark.parametrize(
("user_args", "sdk_args"),
(explicit_factory, implicit_factory),
ids=("explicit factory", "implicit factory"),
)(func)
return wrapper
def test_provide_targeted_factory():
def echo(_, resource_group_name=None, factory_name=None):
return resource_group_name, factory_name
conn = MagicMock()
hook = MagicMock()
hook.get_connection.return_value = conn
conn.extra_dejson = {}
assert provide_targeted_factory(echo)(hook, RESOURCE_GROUP, FACTORY) == (RESOURCE_GROUP, FACTORY)
conn.extra_dejson = {
"extra__azure_data_factory__resource_group_name": DEFAULT_RESOURCE_GROUP,
"extra__azure_data_factory__factory_name": DEFAULT_FACTORY,
}
assert provide_targeted_factory(echo)(hook) == (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY)
assert provide_targeted_factory(echo)(hook, RESOURCE_GROUP, None) == (RESOURCE_GROUP, DEFAULT_FACTORY)
assert provide_targeted_factory(echo)(hook, None, FACTORY) == (DEFAULT_RESOURCE_GROUP, FACTORY)
assert provide_targeted_factory(echo)(hook, None, None) == (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY)
with pytest.raises(AirflowException):
conn.extra_dejson = {}
provide_targeted_factory(echo)(hook)
@pytest.mark.parametrize(
("connection_id", "credential_type"),
[
(DEFAULT_CONNECTION_CLIENT_SECRET, ClientSecretCredential),
(DEFAULT_CONNECTION_DEFAULT_CREDENTIAL, DefaultAzureCredential),
],
)
def test_get_connection_by_credential_client_secret(connection_id: str, credential_type: Type):
hook = AzureDataFactoryHook(connection_id)
with patch.object(hook, "_create_client") as mock_create_client:
mock_create_client.return_value = MagicMock()
connection = hook.get_conn()
assert connection is not None
mock_create_client.assert_called_once()
assert isinstance(mock_create_client.call_args[0][0], credential_type)
assert mock_create_client.call_args[0][1] == "subscriptionId"
@parametrize(
explicit_factory=((RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY)),
implicit_factory=((), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY)),
)
def test_get_factory(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.get_factory(*user_args)
hook._conn.factories.get.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, MODEL)),
implicit_factory=((MODEL,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, MODEL)),
)
def test_create_factory(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.create_factory(*user_args)
hook._conn.factories.create_or_update.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, MODEL)),
implicit_factory=((MODEL,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, MODEL)),
)
def test_update_factory(hook: AzureDataFactoryHook, user_args, sdk_args):
with patch.object(hook, "_factory_exists") as mock_factory_exists:
mock_factory_exists.return_value = True
hook.update_factory(*user_args)
hook._conn.factories.create_or_update.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, MODEL)),
implicit_factory=((MODEL,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, MODEL)),
)
def test_update_factory_non_existent(hook: AzureDataFactoryHook, user_args, sdk_args):
with patch.object(hook, "_factory_exists") as mock_factory_exists:
mock_factory_exists.return_value = False
with pytest.raises(AirflowException, match=r"Factory .+ does not exist"):
hook.update_factory(*user_args)
@parametrize(
explicit_factory=((RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY)),
implicit_factory=((), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY)),
)
def test_delete_factory(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.delete_factory(*user_args)
hook._conn.factories.delete.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_get_linked_service(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.get_linked_service(*user_args)
hook._conn.linked_services.get.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_create_linked_service(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.create_linked_service(*user_args)
hook._conn.linked_services.create_or_update(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_update_linked_service(hook: AzureDataFactoryHook, user_args, sdk_args):
with patch.object(hook, "_linked_service_exists") as mock_linked_service_exists:
mock_linked_service_exists.return_value = True
hook.update_linked_service(*user_args)
hook._conn.linked_services.create_or_update(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_update_linked_service_non_existent(hook: AzureDataFactoryHook, user_args, sdk_args):
with patch.object(hook, "_linked_service_exists") as mock_linked_service_exists:
mock_linked_service_exists.return_value = False
with pytest.raises(AirflowException, match=r"Linked service .+ does not exist"):
hook.update_linked_service(*user_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_delete_linked_service(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.delete_linked_service(*user_args)
hook._conn.linked_services.delete.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_get_dataset(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.get_dataset(*user_args)
hook._conn.datasets.get.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_create_dataset(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.create_dataset(*user_args)
hook._conn.datasets.create_or_update.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_update_dataset(hook: AzureDataFactoryHook, user_args, sdk_args):
with patch.object(hook, "_dataset_exists") as mock_dataset_exists:
mock_dataset_exists.return_value = True
hook.update_dataset(*user_args)
hook._conn.datasets.create_or_update.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_update_dataset_non_existent(hook: AzureDataFactoryHook, user_args, sdk_args):
with patch.object(hook, "_dataset_exists") as mock_dataset_exists:
mock_dataset_exists.return_value = False
with pytest.raises(AirflowException, match=r"Dataset .+ does not exist"):
hook.update_dataset(*user_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_delete_dataset(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.delete_dataset(*user_args)
hook._conn.datasets.delete.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_get_pipeline(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.get_pipeline(*user_args)
hook._conn.pipelines.get.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_create_pipeline(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.create_pipeline(*user_args)
hook._conn.pipelines.create_or_update.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_update_pipeline(hook: AzureDataFactoryHook, user_args, sdk_args):
with patch.object(hook, "_pipeline_exists") as mock_pipeline_exists:
mock_pipeline_exists.return_value = True
hook.update_pipeline(*user_args)
hook._conn.pipelines.create_or_update.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_update_pipeline_non_existent(hook: AzureDataFactoryHook, user_args, sdk_args):
with patch.object(hook, "_pipeline_exists") as mock_pipeline_exists:
mock_pipeline_exists.return_value = False
with pytest.raises(AirflowException, match=r"Pipeline .+ does not exist"):
hook.update_pipeline(*user_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_delete_pipeline(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.delete_pipeline(*user_args)
hook._conn.pipelines.delete.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_run_pipeline(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.run_pipeline(*user_args)
hook._conn.pipelines.create_run.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((ID, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, ID)),
implicit_factory=((ID,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, ID)),
)
def test_get_pipeline_run(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.get_pipeline_run(*user_args)
hook._conn.pipeline_runs.get.assert_called_with(*sdk_args)
_wait_for_pipeline_run_status_test_args = [
(AzureDataFactoryPipelineRunStatus.SUCCEEDED, AzureDataFactoryPipelineRunStatus.SUCCEEDED, True),
(AzureDataFactoryPipelineRunStatus.FAILED, AzureDataFactoryPipelineRunStatus.SUCCEEDED, False),
(AzureDataFactoryPipelineRunStatus.CANCELLED, AzureDataFactoryPipelineRunStatus.SUCCEEDED, False),
(AzureDataFactoryPipelineRunStatus.IN_PROGRESS, AzureDataFactoryPipelineRunStatus.SUCCEEDED, "timeout"),
(AzureDataFactoryPipelineRunStatus.QUEUED, AzureDataFactoryPipelineRunStatus.SUCCEEDED, "timeout"),
(AzureDataFactoryPipelineRunStatus.CANCELING, AzureDataFactoryPipelineRunStatus.SUCCEEDED, "timeout"),
(AzureDataFactoryPipelineRunStatus.SUCCEEDED, AzureDataFactoryPipelineRunStatus.TERMINAL_STATUSES, True),
(AzureDataFactoryPipelineRunStatus.FAILED, AzureDataFactoryPipelineRunStatus.TERMINAL_STATUSES, True),
(AzureDataFactoryPipelineRunStatus.CANCELLED, AzureDataFactoryPipelineRunStatus.TERMINAL_STATUSES, True),
]
@pytest.mark.parametrize(
argnames=("pipeline_run_status", "expected_status", "expected_output"),
argvalues=_wait_for_pipeline_run_status_test_args,
ids=[
f"run_status_{argval[0]}_expected_{argval[1]}"
if isinstance(argval[1], str)
else f"run_status_{argval[0]}_expected_AnyTerminalStatus"
for argval in _wait_for_pipeline_run_status_test_args
],
)
def test_wait_for_pipeline_run_status(hook, pipeline_run_status, expected_status, expected_output):
config = {"run_id": ID, "timeout": 3, "check_interval": 1, "expected_statuses": expected_status}
with patch.object(AzureDataFactoryHook, "get_pipeline_run") as mock_pipeline_run:
mock_pipeline_run.return_value.status = pipeline_run_status
if expected_output != "timeout":
assert hook.wait_for_pipeline_run_status(**config) == expected_output
else:
with pytest.raises(AzureDataFactoryPipelineRunException):
hook.wait_for_pipeline_run_status(**config)
@parametrize(
explicit_factory=((ID, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, ID)),
implicit_factory=((ID,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, ID)),
)
def test_cancel_pipeline_run(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.cancel_pipeline_run(*user_args)
hook._conn.pipeline_runs.cancel.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_get_trigger(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.get_trigger(*user_args)
hook._conn.triggers.get.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_create_trigger(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.create_trigger(*user_args)
hook._conn.triggers.create_or_update.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_update_trigger(hook: AzureDataFactoryHook, user_args, sdk_args):
with patch.object(hook, "_trigger_exists") as mock_trigger_exists:
mock_trigger_exists.return_value = True
hook.update_trigger(*user_args)
hook._conn.triggers.create_or_update.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_update_trigger_non_existent(hook: AzureDataFactoryHook, user_args, sdk_args):
with patch.object(hook, "_trigger_exists") as mock_trigger_exists:
mock_trigger_exists.return_value = False
with pytest.raises(AirflowException, match=r"Trigger .+ does not exist"):
hook.update_trigger(*user_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_delete_trigger(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.delete_trigger(*user_args)
hook._conn.triggers.delete.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_start_trigger(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.start_trigger(*user_args)
hook._conn.triggers.begin_start.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_stop_trigger(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.stop_trigger(*user_args)
hook._conn.triggers.begin_stop.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, ID, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, ID)),
implicit_factory=((NAME, ID), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, ID)),
)
def test_rerun_trigger(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.rerun_trigger(*user_args)
hook._conn.trigger_runs.rerun.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, ID, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, ID)),
implicit_factory=((NAME, ID), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, ID)),
)
def test_cancel_trigger(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.cancel_trigger(*user_args)
hook._conn.trigger_runs.cancel.assert_called_with(*sdk_args)
@pytest.mark.parametrize(
argnames="factory_list_result",
argvalues=[iter([FactoryListResponse]), iter([])],
ids=["factory_exists", "factory_does_not_exist"],
)
def test_connection_success(hook, factory_list_result):
hook.get_conn().factories.list.return_value = factory_list_result
status, msg = hook.test_connection()
assert status is True
assert msg == "Successfully connected to Azure Data Factory."
def test_connection_failure(hook):
hook.get_conn().factories.list = PropertyMock(side_effect=Exception("Authentication failed."))
status, msg = hook.test_connection()
assert status is False
assert msg == "Authentication failed."
def test_connection_failure_missing_subscription_id():
hook = AzureDataFactoryHook("azure_data_factory_missing_subscription_id")
status, msg = hook.test_connection()
assert status is False
assert msg == "A Subscription ID is required to connect to Azure Data Factory."
def test_connection_failure_missing_tenant_id():
hook = AzureDataFactoryHook("azure_data_factory_missing_tenant_id")
status, msg = hook.test_connection()
assert status is False
assert msg == "A Tenant ID is required when authenticating with Client ID and Secret."
|
|
'''
Copyright (C) 2016 Turi
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
import unittest
import numpy as np
from numpy import nan
import array
import datetime as dt
from ..data_structures import image
from .. import SArray
import os
from ..cython.cy_flexible_type import _translate_through_flexible_type as _flexible_type
from ..cython.cy_flexible_type import _translate_through_flex_list as _tr_flex_list
from ..cython.cy_flexible_type import infer_type_of_list
from ..cython.cy_flexible_type import _get_inferred_column_type, _all_convertable
from ..cython.cy_flexible_type import _check_ft_pyobject_hint_path
from ..cython.cy_flexible_type import pytype_from_type_name
from ..util.timezone import GMT
import datetime
from itertools import product
from copy import copy
import sys
if sys.version_info.major > 2:
long = int
unicode = str
NoneType = type(None)
current_file_dir = os.path.dirname(os.path.realpath(__file__))
def from_lambda(v):
from ..connect import main as glconnect
u = glconnect.get_unity()
return u.eval_lambda(lambda x: x, v)
special_types = set()
IntegerValue = (
[int(0), long(1)]
+ [_dt(0) for _dt in (np.sctypes['int'] + np.sctypes['uint']
+ [np.bool, bool, np.bool_])])
special_types.add(id(IntegerValue))
# 2**63 and -2**63-1 are not representable by a C int64_t, so it's
# treated as a float.
FloatValue = [float(0)] + [_dt(0) for _dt in np.sctypes['float']] + [2**63, -2**63 - 1]
special_types.add(id(FloatValue))
StringValue = ([str('bork'), unicode('bork'), b'bork', b'']
+ [_dt('bork') for _dt in
[np.unicode, np.unicode_, str, unicode, np.str,
np.str_, np.string_]]
+ [str(''), unicode('')]
+ [_dt('') for _dt in
[np.unicode, np.unicode_, str, unicode, np.str,
np.str_, np.string_]])
special_types.add(id(StringValue))
DictValue = [{'a' : 12}, dict()]
special_types.add(id(DictValue))
DatetimeValue = [datetime.date(2000, 6, 12),
datetime.date(1100, 1, 1),
datetime.datetime(2000, 6, 12)]
special_types.add(id(DatetimeValue))
AnyValue = IntegerValue + FloatValue + StringValue + DatetimeValue + DictValue
special_types.add(id(AnyValue))
# All the different types of float sequences we support
FloatSequence = (
[[0.5, 1.5, 2.5], (0.5, 1.5, 2.5),
{0.5, 1.5, 2.5}, frozenset([0.5, 1.5, 2.5])]
+ [array.array(c, [0.5, 1.5, 2.5]) for c in 'fd']
+ [np.array([0.5, 1.5, 2.5], dtype= _dt) for _dt in np.sctypes['float']])
special_types.add(id(FloatSequence))
# All the different types of float sequences we support
FloatSequenceWithNAN = (
[[0.5, 1.5, 2.5, nan], (0.5, 1.5, 2.5, nan),
{0.5, 1.5, 2.5, nan}, frozenset([0.5, 1.5, 2.5, nan])]
+ [array.array(c, [0.5, 1.5, 2.5, nan]) for c in 'fd']
+ [np.array([0.5, 1.5, 2.5, nan], dtype= _dt) for _dt in np.sctypes['float']])
special_types.add(id(FloatSequenceWithNAN))
# All the different types of float sequences we support
FloatSequenceWithNone = (
[[0.5, 1.5, 2.5, None], (0.5, 1.5, 2.5, None)])
special_types.add(id(FloatSequenceWithNone))
# All the different integer sequences we support
IntegerSequence = (
[[int(i) for i in range(3)]
, [long(i) for i in range(3)]
, tuple(range(3))
, tuple(long(i) for i in range(3))
, set(range(3))
, frozenset(range(3))
]
+ [array.array(c, range(3)) for c in 'bBhHiIlL']
+ [np.array(range(3), dtype = _dt) for _dt in np.sctypes['int']]
+ [np.array(range(3), dtype = _dt) for _dt in np.sctypes['uint']])
special_types.add(id(IntegerSequence))
# All the different integer sequences we support, with a Nan
IntegerSequenceWithNAN = (
[[int(i) for i in range(3)] + [nan]
, [long(i) for i in range(3)] + [nan]
, tuple(range(3)) + (nan,)
, tuple(long(i) for i in range(3)) + (nan,)
, set([long(i) for i in range(3)] + [nan])
, frozenset([long(i) for i in range(3)] + [nan])])
special_types.add(id(IntegerSequenceWithNAN))
# All the different types of string
IntegerSequenceWithNone = (
[[int(i) for i in range(3)] + [None]
, [long(i) for i in range(3)] + [None]
, tuple(range(3)) + (None,)
, tuple(long(i) for i in range(3)) + (None,)
, set([long(i) for i in range(3)] + [None])
, frozenset([long(i) for i in range(3)] + [None])])
special_types.add(id(IntegerSequenceWithNone))
# Empty but typed float arrays
EmptyFloatArray = (
[array.array(c, []) for c in 'fd']
+ [np.array([], dtype= _dt) for _dt in np.sctypes['float']])
special_types.add(id(EmptyFloatArray))
# Empty but typed integer arrays
type_codes = 'bBhHiIlL'
if sys.version_info.major == 2:
type_codes += 'c'
EmptyIntegerArray = (
[array.array(c, []) for c in type_codes]
+ [np.array([], dtype= _dt) for _dt in np.sctypes['int']]
+ [np.array([], dtype= _dt) for _dt in np.sctypes['uint']])
special_types.add(id(EmptyIntegerArray))
# All empty arrays
EmptyArray = EmptyIntegerArray + EmptyFloatArray
special_types.add(id(EmptyArray))
EmptySequence = [[], tuple(), set()]
special_types.add(id(EmptySequence))
# Boolean Sequences
BooleanSequence = (
[ list( (i%2 == 0) for i in range(3))
, tuple( (i%2 == 0) for i in range(3))
, set([True]), set([False]), set([True, False])]
+ [np.array([i%2==0 for i in range(3)], dtype= _dt)
for _dt in [np.bool, np.bool_, bool]])
special_types.add(id(BooleanSequence))
# String sequences
StringSequence = (
[ list( str(i) for i in range(3))
, tuple( str(i) for i in range(3))
, set( str(i) for i in range(3))
, frozenset( str(i) for i in range(3))]
+ [np.array([_dt('a'), _dt('b')], dtype = _dt)
for _dt in [np.unicode, np.unicode_, str, unicode, np.str, np.str_, np.string_]]
+ [np.array([_dt('a'), _dt('b')], dtype = object)
for _dt in [np.unicode, np.unicode_, str, unicode, np.str, np.str_, np.string_]])
special_types.add(id(StringSequence))
AnySequence = (EmptySequence + BooleanSequence + StringSequence
+ IntegerSequence + IntegerSequenceWithNone + IntegerSequenceWithNAN
+ FloatSequence + FloatSequenceWithNone + FloatSequenceWithNAN
+ EmptyArray)
special_types.add(id(AnySequence))
def verify_inference(values, expected_type):
# Go through and build a list of all the possible value enumerations that need to be tested.
def build_lookups(values, L):
for v in values:
if id(v) in special_types:
L.append(range(len(v)))
elif type(v) is list:
build_lookups(v, L)
indices = []
build_lookups(values, indices)
def get_value(values, idx_set):
ret = copy(values)
for i, v in enumerate(values):
if id(v) in special_types:
ret[i] = v[idx_set[-1]]
del idx_set[-1]
elif type(v) is list:
ret[i] = get_value(v, idx_set)
return ret
for idx_set in product(*reversed(indices)):
_v_list = get_value(values, list(idx_set))
for add_none in [True, False]:
v_list = _v_list + [None] if add_none else _v_list
inferred_type, result = _get_inferred_column_type(v_list)
if inferred_type != expected_type:
assert False, ("Expected type %s, got type %s; input value = %s."
% (str(expected_type), str(inferred_type), str(v_list)))
if inferred_type != NoneType:
reconverted_result = _tr_flex_list(result, inferred_type)
assert str(result) == str(reconverted_result), \
(("Values in type translated inconsistently: "
"\nInput value = %s"
"\nOutput value = %s"
"\nReconverted = %s")
% (str(v_list), str(result), reconverted_result))
class FlexibleTypeInference(unittest.TestCase):
def test_int_float(self):
verify_inference([IntegerValue], int)
verify_inference([IntegerValue, IntegerValue], int)
verify_inference([IntegerValue, FloatValue], float)
verify_inference([IntegerValue, nan], float)
verify_inference([], float)
verify_inference([None], float)
verify_inference([IntegerValue, nan], float)
verify_inference([IntegerValue, None, nan], float)
verify_inference([IntegerValue, None, FloatValue], float)
verify_inference([IntegerValue, None, FloatValue, nan], float)
def test_string(self):
verify_inference([StringValue], str)
verify_inference([StringValue, StringValue], str)
verify_inference([StringValue, IntegerValue], NoneType)
verify_inference([StringValue, FloatValue], NoneType)
def test_dict(self):
verify_inference([DictValue], dict)
verify_inference([DictValue, DictValue], dict)
def test_mixed_types(self):
verify_inference([AnySequence, AnyValue], NoneType)
verify_inference([AnySequence, AnyValue, AnySequence], NoneType)
verify_inference([AnySequence, AnyValue, AnyValue], NoneType)
verify_inference([DatetimeValue, StringValue], NoneType)
verify_inference([DatetimeValue, IntegerValue], NoneType)
verify_inference([DatetimeValue, FloatValue], NoneType)
def test_array_list(self):
tests = [
# Individual types
([EmptySequence], list),
([IntegerSequence], array.array),
([IntegerSequenceWithNone], list),
([IntegerSequenceWithNAN], array.array),
([FloatSequence], array.array),
([FloatSequenceWithNAN], array.array),
([FloatSequenceWithNone], list),
([EmptyIntegerArray], array.array),
([EmptyFloatArray], array.array),
([BooleanSequence], array.array),
([StringSequence], list),
# Multiple types
([IntegerSequence, FloatSequence], array.array),
([IntegerSequence, FloatSequence], array.array),
# Multiple types
([EmptySequence, EmptyFloatArray], array.array),
([EmptySequence, EmptyIntegerArray], array.array),
([EmptySequence, IntegerSequence], array.array),
([EmptySequence, FloatSequence], array.array),
# Multiple types
([EmptySequence, EmptyFloatArray], array.array),
([EmptySequence, EmptyIntegerArray], array.array),
([EmptySequence, IntegerSequence], array.array),
([EmptySequence, FloatSequence], array.array),
# Arrays and lists
([StringSequence, EmptyFloatArray], list),
([StringSequence, EmptyIntegerArray], list),
([StringSequence, IntegerSequence], list),
([StringSequence, FloatSequence], list)]
# Add in additional rules for testing
for tv, res in copy(tests):
tests.append( (tv + [EmptySequence], res) )
for tv, res in copy(tests):
tests.append( (tv + [[None]], list) )
for tv, res in copy(tests):
tests.append( (tv + [StringSequence], list) )
# Run the tests
for tv, res in tests:
verify_inference(tv, res)
class FlexibleTypeTest(unittest.TestCase):
# On lambda return, if the return value is a non-empty of list of
# all numerical values, we try hard to use array.array
def numeric_list_to_array(self, v):
if (type(v) is list) and (len(v) > 0) and all((type(x) is int) or (type(x) is float) for x in v):
return array.array('d', v)
elif (type(v) is list):
return [self.numeric_list_to_array(x) for x in v]
else:
return v
def assert_equal_with_lambda_check(self, translated, correct):
self.assertEqual(translated, correct)
self.assertEqual(from_lambda(translated), self.numeric_list_to_array(correct))
def test_none(self):
self.assert_equal_with_lambda_check(_flexible_type(None), None)
def test_date_time(self):
d = datetime.datetime(2010, 10, 10, 10, 10, 10);
self.assert_equal_with_lambda_check(_flexible_type(d),d)
def test_int(self):
self.assert_equal_with_lambda_check(_flexible_type(1), 1)
self.assert_equal_with_lambda_check(_flexible_type(long(1)), 1)
self.assert_equal_with_lambda_check(_flexible_type(True), 1)
self.assert_equal_with_lambda_check(_flexible_type(False), 0)
# numpy types
self.assert_equal_with_lambda_check(_flexible_type(np.int_(1)), 1)
self.assert_equal_with_lambda_check(_flexible_type(np.int64(1)), 1)
self.assert_equal_with_lambda_check(_flexible_type(np.int32(1)), 1)
self.assert_equal_with_lambda_check(_flexible_type(np.int16(1)), 1)
self.assert_equal_with_lambda_check(_flexible_type(np.uint64(1)), 1)
self.assert_equal_with_lambda_check(_flexible_type(np.uint32(1)), 1)
self.assert_equal_with_lambda_check(_flexible_type(np.uint16(1)), 1)
self.assert_equal_with_lambda_check(_flexible_type(np.bool(1)), 1)
self.assert_equal_with_lambda_check(_flexible_type(np.bool(0)), 0)
self.assert_equal_with_lambda_check(_flexible_type(np.bool_(1)), 1)
self.assert_equal_with_lambda_check(_flexible_type(np.bool_(0)), 0)
self.assert_equal_with_lambda_check(_flexible_type(np.bool8(1)), 1)
self.assert_equal_with_lambda_check(_flexible_type(np.bool8(0)), 0)
def test_float(self):
self.assert_equal_with_lambda_check(_flexible_type(0.25), 0.25)
# numpy types
self.assert_equal_with_lambda_check(_flexible_type(np.float(0.25)), 0.25)
self.assert_equal_with_lambda_check(_flexible_type(np.float_(0.25)), 0.25)
self.assert_equal_with_lambda_check(_flexible_type(np.float16(0.25)), 0.25)
self.assert_equal_with_lambda_check(_flexible_type(np.float32(0.25)), 0.25)
self.assert_equal_with_lambda_check(_flexible_type(np.float64(0.25)), 0.25)
def test_string(self):
self.assert_equal_with_lambda_check(_flexible_type("a"), "a")
if sys.version_info.major == 2:
self.assert_equal_with_lambda_check(_flexible_type(unicode("a")), "a")
# numpy types
self.assert_equal_with_lambda_check(_flexible_type(np.string_("a")), "a")
self.assert_equal_with_lambda_check(_flexible_type(np.unicode_("a")), "a")
def test_array(self):
# float array
expected = array.array('d', [.1, .2, .3])
self.assert_equal_with_lambda_check(_flexible_type(expected), expected)
# int array
expected = array.array('d', [1, 2, 3])
self.assert_equal_with_lambda_check(_flexible_type([1, 2, 3]), expected)
self.assert_equal_with_lambda_check(_flexible_type([1.0, 2.0, 3.0]), expected)
self.assert_equal_with_lambda_check(_flexible_type([1, 2, 3.0]), expected)
# numpy ndarray
expected = np.asarray([1, 2, 3])
self.assertSequenceEqual(_flexible_type(expected), list(expected))
self.assertEquals(from_lambda(expected), array.array('d', expected))
expected = np.asarray([.1, .2, .3])
self.assertSequenceEqual(_flexible_type(expected), list(expected))
self.assertEquals(from_lambda(expected), array.array('d', expected))
def test_dict(self):
d = dt.datetime(2010, 10, 10, 10, 10, 10)
img = image.Image(current_file_dir + "/images/nested/sample_grey.jpg","JPG")
expected = {'int': 0, 'float': 0.1, 'str': 'str',
'list': ['a', 'b', 'c'], 'array': array.array('d', [1, 2, 3]),'datetime':[d],
'image': img ,'none': None}
self.assert_equal_with_lambda_check(_flexible_type(expected), expected)
self.assert_equal_with_lambda_check(_flexible_type({}), {})
expected = [{'a': 1, 'b': 20, 'c': None}, {"b": 4, None: 5}, None, {'a': 0}]
self.assert_equal_with_lambda_check(_flexible_type(expected), expected)
def test_list(self):
d = dt.datetime(2010, 10, 10, 10, 10, 10)
img = image.Image(current_file_dir + "/images/nested/sample_grey.jpg","JPG")
expected = [None, img, 1, 0.1, '1',d,array.array('d', [1, 2, 3]), {'foo': array.array('d', [1, 2,3])}]
self.assert_equal_with_lambda_check(_flexible_type(expected), expected)
self.assert_equal_with_lambda_check(_flexible_type([]), [])
self.assert_equal_with_lambda_check(_flexible_type([[], []]), [[], []])
def test_image(self):
img_gray_jpg = image.Image(current_file_dir + "/images/nested/sample_grey.jpg","JPG")
img_gray_png = image.Image(current_file_dir + "/images/nested/sample_grey.png","PNG")
img_gray_auto_jpg = image.Image(current_file_dir + "/images/nested/sample_grey.jpg")
img_gray_auto_png = image.Image(current_file_dir + "/images/nested/sample_grey.png")
img_color_jpg = image.Image(current_file_dir + "/images/sample.jpg","JPG")
img_color_png = image.Image(current_file_dir + "/images/sample.png","PNG")
img_color_auto_jpg = image.Image(current_file_dir + "/images/sample.jpg")
img_color_auto_png = image.Image(current_file_dir + "/images/sample.png")
self.assert_equal_with_lambda_check(_flexible_type(img_gray_jpg),img_gray_jpg)
self.assert_equal_with_lambda_check(_flexible_type(img_gray_png),img_gray_png)
self.assert_equal_with_lambda_check(_flexible_type(img_gray_auto_jpg),img_gray_auto_jpg)
self.assert_equal_with_lambda_check(_flexible_type(img_gray_auto_png),img_gray_png)
self.assert_equal_with_lambda_check(_flexible_type(img_color_jpg),img_color_jpg)
self.assert_equal_with_lambda_check(_flexible_type(img_color_png),img_color_png)
self.assert_equal_with_lambda_check(_flexible_type(img_color_auto_jpg),img_color_auto_jpg)
self.assert_equal_with_lambda_check(_flexible_type(img_color_auto_png),img_color_auto_png)
def test_tr_flex_list(self):
expected = []
self.assert_equal_with_lambda_check(_tr_flex_list(expected), expected)
# test int list
expected = [1, 2, 3, 4, 5, None]
self.assert_equal_with_lambda_check(_tr_flex_list(expected), expected)
self.assert_equal_with_lambda_check(_tr_flex_list(expected, int), expected)
self.assert_equal_with_lambda_check(_tr_flex_list(expected, int, ignore_cast_failure=True), expected)
# test datetime list
from_zone = GMT(0)
to_zone = GMT(4.5)
d1 = dt.datetime(2010, 10, 10, 10, 10, 10).replace(tzinfo=from_zone)
d2 = d1.astimezone(to_zone)
expected = [d1,d2, None]
self.assert_equal_with_lambda_check(_tr_flex_list(expected), expected)
self.assert_equal_with_lambda_check(_tr_flex_list(expected, dt.datetime), expected)
self.assert_equal_with_lambda_check(_tr_flex_list(expected, dt.datetime, ignore_cast_failure=True), expected)
# test image list
img_gray_auto_png = image.Image(current_file_dir + "/images/nested/sample_grey.png")
img_color_jpg = image.Image(current_file_dir + "/images/sample.jpg","JPG")
expected = [img_gray_auto_png, img_color_jpg, None]
self.assert_equal_with_lambda_check(_tr_flex_list(expected), expected)
self.assert_equal_with_lambda_check(_tr_flex_list(expected, image.Image), expected)
self.assert_equal_with_lambda_check(_tr_flex_list(expected, image.Image, ignore_cast_failure=True), expected)
# test str list
expected = ['a', 'b', 'c', None]
self.assert_equal_with_lambda_check(_tr_flex_list(expected), expected)
self.assert_equal_with_lambda_check(_tr_flex_list(expected, str), expected)
# test array list
expected = [array.array('d', range(5)), array.array('d', range(5)), None]
self.assert_equal_with_lambda_check(_tr_flex_list(expected), expected)
self.assert_equal_with_lambda_check(_tr_flex_list(expected, array.array), expected)
expected = [[float(i) for i in range(5)], range(5), None]
self.assert_equal_with_lambda_check(_tr_flex_list(expected), [array.array('d', range(5)),
array.array('d', range(5)), None])
# test int array
expected = array.array('d', range(5))
self.assert_equal_with_lambda_check(_tr_flex_list(expected), list(range(5)))
expected = [1, 1.0, '1', [1., 1., 1.], ['a', 'b', 'c'], {}, {'a': 1}, None]
self.assert_equal_with_lambda_check(_tr_flex_list(expected, int, ignore_cast_failure=True), [1, 1, None])
self.assert_equal_with_lambda_check(_tr_flex_list(expected, float, ignore_cast_failure=True), [1.0, 1.0, None])
# Anything can be cast to a string
# self.assert_equal_with_lambda_check(_tr_flex_list(expected, str, ignore_cast_failure=True), ['1', '1', None])
self.assert_equal_with_lambda_check(_tr_flex_list(expected, array.array, ignore_cast_failure=True), [array.array('d', [1., 1., 1.]), None])
self.assert_equal_with_lambda_check(_tr_flex_list(expected, list, ignore_cast_failure=True),
[[1., 1., 1.], ['a', 'b', 'c'], None])
self.assert_equal_with_lambda_check(_tr_flex_list(expected, dict, ignore_cast_failure=True),
[{}, {'a': 1}, None])
def test_infer_list_type(self):
self.assertEquals(infer_type_of_list([image.Image(current_file_dir + "/images/nested/sample_grey.png"), image.Image(current_file_dir + "/images/sample.jpg","JPG"), image.Image(current_file_dir + "/images/sample.png")
]), image.Image)
self.assertEquals(infer_type_of_list([dt.datetime(2010, 10, 10, 10, 10, 10), dt.datetime(2000, 5, 7, 10, 4, 10),dt.datetime(1845, 5, 7, 4, 4, 10)]), dt.datetime)
self.assertEquals(infer_type_of_list([0, 1, 2]), int)
self.assertEquals(infer_type_of_list([0, 1, 2.0]), float)
self.assertEquals(infer_type_of_list(['foo', u'bar']), str)
self.assertEquals(infer_type_of_list([array.array('d', [1, 2, 3]), array.array('d', [1, 2, 3])]), array.array)
self.assertEquals(infer_type_of_list([[], [1.0, 2.0, 3.0], array.array('d', [1, 2, 3])]), array.array)
self.assertEquals(infer_type_of_list([[], [1, 2, 3], array.array('d', [1, 2, 3])]), array.array)
self.assertEquals(infer_type_of_list([{'a': 1}, {'b': 2}]), dict)
def test_datetime_lambda(self):
d = dt.datetime.now()
sa = SArray([d])
# Lambda returning self
sa_self = sa.apply(lambda x: x)
for i in range(len(sa_self)):
self.assertEqual(sa[i], sa_self[i])
# Lambda returning year
sa_year = sa.apply(lambda x: x.year)
for i in range(len(sa_year)):
self.assertEqual(sa[i].year, sa_year[i])
# Lambda returning second
sa_sec = sa.apply(lambda x: x.second)
for i in range(len(sa_sec)):
self.assertEqual(sa[i].second, sa_sec[i])
def test_flexible_type_hint(self):
_check_ft_pyobject_hint_path(1, int)
_check_ft_pyobject_hint_path(1, float)
_check_ft_pyobject_hint_path(1.5, float)
_check_ft_pyobject_hint_path([], list)
_check_ft_pyobject_hint_path([1], list)
_check_ft_pyobject_hint_path((1,2), list)
_check_ft_pyobject_hint_path({1:1}, dict)
_check_ft_pyobject_hint_path(array.array('i', [1,2]), array.array)
_check_ft_pyobject_hint_path(array.array('d', [1,2]), array.array)
def test_pytype_from_type_name(self):
self.assertEquals(pytype_from_type_name("str"), str)
self.assertEquals(pytype_from_type_name("string"), str)
self.assertEquals(pytype_from_type_name("float"), float)
self.assertEquals(pytype_from_type_name("datetime"), datetime.datetime)
self.assertEquals(pytype_from_type_name("image"), image.Image)
self.assertEquals(pytype_from_type_name("list"), list)
self.assertEquals(pytype_from_type_name("undefined"), type(None))
self.assertRaises(ValueError, lambda: pytype_from_type_name("happiness"))
|
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide Python descriptors for delegating to Bokeh properties.
The Python `descriptor protocol`_ allows fine-grained control over all
attribute access on instances ("You control the dot"). Bokeh uses the
descriptor protocol to provide easy-to-use, declarative, type-based
class properties that can automatically validate and serialize their
values, as well as help provide sophisticated documentation.
A Bokeh property really consist of two parts: a familiar "property"
portion, such as ``Int``, ``String``, etc., as well as an associated
Python descriptor that delegates attribute access to the property instance.
For example, a very simplified definition of a range-like object might
be:
.. code-block:: python
from bokeh.model import Model
from bokeh.core.properties import Float
class Range(Model):
start = Float(help="start point")
end = Float(help="end point")
When this class is created, the ``MetaHasProps`` metaclass wires up both
the ``start`` and ``end`` attributes to a ``Float`` property. Then, when
a user accesses those attributes, the descriptor delegates all get and
set operations to the ``Float`` property.
.. code-block:: python
rng = Range()
# The descriptor __set__ method delegates to Float, which can validate
# the value 10.3 as a valid floating point value
rng.start = 10.3
# But can raise a validation exception if an attempt to set to a list
# is made
rng.end = [1,2,3] # ValueError !
More sophisticated properties such as ``DataSpec`` and its subclasses can
exert control over how values are serialized. Consider this example with
the ``Circle`` glyph and its ``x`` attribute that is a ``NumberSpec``:
.. code-block:: python
from bokeh.models import Circle
c = Circle()
c.x = 10 # serializes to {'value': 10}
c.x = 'foo' # serializes to {'field': 'foo'}
There are many other examples like this throughout Bokeh. In this way users
may operate simply and naturally, and not be concerned with the low-level
details around validation, serialization, and documentation.
This module provides the class |PropertyDescriptor| and various subclasses
that can be used to attach Bokeh properties to Bokeh models.
.. note::
These classes form part of the very low-level machinery that implements
the Bokeh model and property system. It is unlikely that any of these
classes or their methods will be applicable to any standard usage or to
anyone who is not directly developing on Bokeh's own infrastructure.
.. _descriptor protocol: https://docs.python.org/3/howto/descriptor.html
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from copy import copy
# Bokeh imports
from .wrappers import PropertyValueColumnData, PropertyValueContainer
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'BasicPropertyDescriptor',
'ColumnDataPropertyDescriptor',
'DataSpecPropertyDescriptor',
'PropertyDescriptor',
'UnitsSpecPropertyDescriptor',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class PropertyDescriptor(object):
''' Base class for a python descriptor that delegates access for a named
attribute to a Bokeh |Property| instance.
'''
def __init__(self, name):
''' Create a descriptor for a hooking up a named Bokeh property
as an attribute on a |HasProps| class.
Args:
name (str) : the attribute name that this descriptor is for
'''
self.name = name
def __str__(self):
''' Basic string representation of ``PropertyDescriptor``.
**Subclasses must implement this to serve their specific needs.**
'''
return "PropertyDescriptor(%s)" % (self.name)
def __get__(self, obj, owner):
''' Implement the getter for the Python `descriptor protocol`_.
Args:
obj (HasProps or None) :
The instance to set a new property value on (for instance
attribute access), or None (for class attribute access)
owner (obj) :
The new value to set the property to
Returns:
None
Raises:
NotImplementedError
**Subclasses must implement this to serve their specific needs.**
'''
raise NotImplementedError("Implement __get__")
def __set__(self, obj, value, setter=None):
''' Implement the setter for the Python `descriptor protocol`_.
.. note::
An optional argument ``setter`` has been added to the standard
setter arguments. When needed, this value should be provided by
explicitly invoking ``__set__``. See below for more information.
Args:
obj (HasProps) :
The instance to set a new property value on
value (obj) :
The new value to set the property to
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
In the context of a Bokeh server application, incoming updates
to properties will be annotated with the session that is
doing the updating. This value is propagated through any
subsequent change notifications that the update triggers.
The session can compare the event setter to itself, and
suppress any updates that originate from itself.
Returns:
None
Raises:
NotImplementedError
**Subclasses must implement this to serve their specific needs.**
'''
raise NotImplementedError("Implement __set__")
def __delete__(self, obj):
''' Implement the deleter for the Python `descriptor protocol`_.
Args:
obj (HasProps) : An instance to delete this property from
Raises:
NotImplementedError
**Subclasses must implement this to serve their specific needs.**
'''
raise NotImplementedError("Implement __delete__")
# This would probably be cleaner with some form of multiple dispatch
# on (descriptor, property). Currently this method has to know about
# various other classes, and that is annoying.
def add_prop_descriptor_to_class(self, class_name, new_class_attrs, names_with_refs, container_names, dataspecs):
''' ``MetaHasProps`` calls this during class creation as it iterates
over properties to add, to update its registry of new properties.
The parameters passed in are mutable and this function is expected to
update them accordingly.
Args:
class_name (str) :
name of the class this descriptor is added to
new_class_attrs(dict[str, PropertyDescriptor]) :
mapping of attribute names to PropertyDescriptor that this
function will update
names_with_refs (set[str]) :
set of all property names for properties that also have
references, that this function will update
container_names (set[str]) :
set of all property names for properties that are
container props, that this function will update
dataspecs(dict[str, PropertyDescriptor]) :
mapping of attribute names to PropertyDescriptor for DataSpec
properties that this function will update
Return:
None
'''
from .bases import ContainerProperty
from .dataspec import DataSpec
name = self.name
if name in new_class_attrs:
raise RuntimeError("Two property generators both created %s.%s" % (class_name, name))
new_class_attrs[name] = self
if self.has_ref:
names_with_refs.add(name)
if isinstance(self, BasicPropertyDescriptor):
if isinstance(self.property, ContainerProperty):
container_names.add(name)
if isinstance(self.property, DataSpec):
dataspecs[name] = self
def class_default(self, cls):
''' The default as computed for a certain class, ignoring any
per-instance theming.
Raises:
NotImplementedError
**Subclasses must implement this to serve their specific needs.**
'''
raise NotImplementedError("Implement class_default()")
def serializable_value(self, obj):
''' Produce the value as it should be serialized.
Sometimes it is desirable for the serialized value to differ from
the ``__get__`` in order for the ``__get__`` value to appear simpler
for user or developer convenience.
Args:
obj (HasProps) : the object to get the serialized attribute for
Returns:
JSON-like
'''
value = self.__get__(obj, obj.__class__)
return self.property.serialize_value(value)
def set_from_json(self, obj, json, models=None, setter=None):
'''Sets the value of this property from a JSON value.
Args:
obj: (HasProps) : instance to set the property value on
json: (JSON-value) : value to set to the attribute to
models (dict or None, optional) :
Mapping of model ids to models (default: None)
This is needed in cases where the attributes to update also
have values that have references.
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
In the context of a Bokeh server application, incoming updates
to properties will be annotated with the session that is
doing the updating. This value is propagated through any
subsequent change notifications that the update triggers.
The session can compare the event setter to itself, and
suppress any updates that originate from itself.
Returns:
None
'''
self._internal_set(obj, json, setter=setter)
def trigger_if_changed(self, obj, old):
''' Send a change event notification if the property is set to a
value is not equal to ``old``.
Args:
obj (HasProps)
The object the property is being set on.
old (obj) :
The previous value of the property to compare
Raises:
NotImplementedError
**Subclasses must implement this to serve their specific needs.**
'''
raise NotImplementedError("Implement trigger_if_changed()")
@property
def has_ref(self):
''' Whether the property can refer to another ``HasProps`` instance.
This is typically True for container properties, ``Instance``, etc.
Raises:
NotImplementedError
**Subclasses must implement this to serve their specific needs.**
'''
raise NotImplementedError("Implement has_ref()")
@property
def readonly(self):
''' Whether this property is read-only.
Read-only properties may only be modified by the client (i.e., by
BokehJS, in the browser). Read only properties are useful for
quantities that originate or that can only be computed in the
browser, for instance the "inner" plot dimension of a plot area,
which depend on the current layout state. It is useful for Python
callbacks to be able to know these values, but they can only be
computed in the actual browser.
Raises:
NotImplementedError
**Subclasses must implement this to serve their specific needs.**
'''
raise NotImplementedError("Implement readonly()")
@property
def serialized(self):
''' Whether the property should be serialized when serializing
an object.
This would be False for a "virtual" or "convenience" property that
duplicates information already available in other properties, for
example.
Raises:
NotImplementedError
**Subclasses must implement this to serve their specific needs.**
'''
raise NotImplementedError("Implement serialized()")
def _internal_set(self, obj, value, hint=None, setter=None):
''' Internal implementation to set property values, that is used
by __set__, set_from_json, etc.
Args:
obj (HasProps)
The object the property is being set on.
old (obj) :
The previous value of the property to compare
hint (event hint or None, optional)
An optional update event hint, e.g. ``ColumnStreamedEvent``
(default: None)
Update event hints are usually used at times when better
update performance can be obtained by special-casing in
some way (e.g. streaming or patching column data sources)
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
In the context of a Bokeh server application, incoming updates
to properties will be annotated with the session that is
doing the updating. This value is propagated through any
subsequent change notifications that the update triggers.
The session can compare the event setter to itself, and
suppress any updates that originate from itself.
Raises:
NotImplementedError
**Subclasses must implement this to serve their specific needs.**
'''
raise NotImplementedError("Implement _internal_set()")
class BasicPropertyDescriptor(PropertyDescriptor):
''' A ``PropertyDescriptor`` for basic Bokeh properties (e.g, ``Int``,
``String``, ``Float``, etc.) with simple get/set and serialization
behavior.
'''
def __init__(self, name, property):
''' Create a PropertyDescriptor for basic Bokeh properties.
Args:
name (str) : The attribute name that this property is for
property (Property) : A basic property to create a descriptor for
'''
super().__init__(name)
self.property = property
self.__doc__ = self.property.__doc__
def __str__(self):
''' Basic string representation of ``BasicPropertyDescriptor``.
Delegates to ``self.property.__str__``
'''
return "%s" % self.property
def __get__(self, obj, owner):
''' Implement the getter for the Python `descriptor protocol`_.
For instance attribute access, we delegate to the |Property|. For
class attribute access, we return ourself.
Args:
obj (HasProps or None) :
The instance to set a new property value on (for instance
attribute access), or None (for class attribute access)
owner (obj) :
The new value to set the property to
Returns:
None
Examples:
.. code-block:: python
>>> from bokeh.models import Range1d
>>> r = Range1d(start=10, end=20)
# instance attribute access, returns the property value
>>> r.start
10
# class attribute access, returns the property descriptor
>>> Range1d.start
<bokeh.core.property.descriptors.BasicPropertyDescriptor at 0x1148b3390>
'''
if obj is not None:
return self._get(obj)
elif owner is not None:
return self
else:
# This should really never happen. If it does, it means we've
# called __get__ explicitly but in a bad way.
raise ValueError("both 'obj' and 'owner' are None, don't know what to do")
def __set__(self, obj, value, setter=None):
''' Implement the setter for the Python `descriptor protocol`_.
.. note::
An optional argument ``setter`` has been added to the standard
setter arguments. When needed, this value should be provided by
explicitly invoking ``__set__``. See below for more information.
Args:
obj (HasProps) :
The instance to set a new property value on
value (obj) :
The new value to set the property to
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
In the context of a Bokeh server application, incoming updates
to properties will be annotated with the session that is
doing the updating. This value is propagated through any
subsequent change notifications that the update triggers.
The session can compare the event setter to itself, and
suppress any updates that originate from itself.
Returns:
None
'''
if not hasattr(obj, '_property_values'):
# Initial values should be passed in to __init__, not set directly
raise RuntimeError("Cannot set a property value '%s' on a %s instance before HasProps.__init__" %
(self.name, obj.__class__.__name__))
if self.property._readonly:
raise RuntimeError("%s.%s is a readonly property" % (obj.__class__.__name__, self.name))
self._internal_set(obj, value, setter=setter)
def __delete__(self, obj):
''' Implement the deleter for the Python `descriptor protocol`_.
Args:
obj (HasProps) : An instance to delete this property from
'''
if self.name in obj._property_values:
old_value = obj._property_values[self.name]
del obj._property_values[self.name]
self.trigger_if_changed(obj, old_value)
if self.name in obj._unstable_default_values:
del obj._unstable_default_values[self.name]
def class_default(self, cls):
''' Get the default value for a specific subtype of ``HasProps``,
which may not be used for an individual instance.
Args:
cls (class) : The class to get the default value for.
Returns:
object
'''
return self.property.themed_default(cls, self.name, None)
def instance_default(self, obj):
''' Get the default value that will be used for a specific instance.
Args:
obj (HasProps) : The instance to get the default value for.
Returns:
object
'''
return self.property.themed_default(obj.__class__, self.name, obj.themed_values())
def set_from_json(self, obj, json, models=None, setter=None):
''' Sets the value of this property from a JSON value.
This method first
Args:
obj (HasProps) :
json (JSON-dict) :
models(seq[Model], optional) :
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
In the context of a Bokeh server application, incoming updates
to properties will be annotated with the session that is
doing the updating. This value is propagated through any
subsequent change notifications that the update triggers.
The session can compare the event setter to itself, and
suppress any updates that originate from itself.
Returns:
None
'''
return super().set_from_json(obj,
self.property.from_json(json, models),
models, setter)
def trigger_if_changed(self, obj, old):
''' Send a change event notification if the property is set to a
value is not equal to ``old``.
Args:
obj (HasProps)
The object the property is being set on.
old (obj) :
The previous value of the property to compare
Returns:
None
'''
new_value = self.__get__(obj, obj.__class__)
if not self.property.matches(old, new_value):
self._trigger(obj, old, new_value)
@property
def has_ref(self):
''' Whether the property can refer to another ``HasProps`` instance.
For basic properties, delegate to the ``has_ref`` attribute on the
|Property|.
'''
return self.property.has_ref
@property
def readonly(self):
''' Whether this property is read-only.
Read-only properties may only be modified by the client (i.e., by BokehJS
in the browser).
'''
return self.property.readonly
@property
def serialized(self):
''' Whether the property should be serialized when serializing an
object.
This would be False for a "virtual" or "convenience" property that
duplicates information already available in other properties, for
example.
'''
return self.property.serialized
def _get(self, obj):
''' Internal implementation of instance attribute access for the
``BasicPropertyDescriptor`` getter.
If the value has not been explicitly set by a user, return that
value. Otherwise, return the default.
Args:
obj (HasProps) : the instance to get a value of this property for
Returns:
object
Raises:
RuntimeError
If the |HasProps| instance has not yet been initialized, or if
this descriptor is on a class that is not a |HasProps|.
'''
if not hasattr(obj, '_property_values'):
raise RuntimeError("Cannot get a property value '%s' from a %s instance before HasProps.__init__" %
(self.name, obj.__class__.__name__))
if self.name not in obj._property_values:
return self._get_default(obj)
else:
return obj._property_values[self.name]
def _get_default(self, obj):
''' Internal implementation of instance attribute access for default
values.
Handles bookeeping around |PropertyContainer| value, etc.
'''
if self.name in obj._property_values:
# this shouldn't happen because we should have checked before _get_default()
raise RuntimeError("Bokeh internal error, does not handle the case of self.name already in _property_values")
is_themed = obj.themed_values() is not None and self.name in obj.themed_values()
default = self.instance_default(obj)
if is_themed:
unstable_dict = obj._unstable_themed_values
else:
unstable_dict = obj._unstable_default_values
if self.name in unstable_dict:
return unstable_dict[self.name]
if self.property._may_have_unstable_default():
if isinstance(default, PropertyValueContainer):
default._register_owner(obj, self)
unstable_dict[self.name] = default
return default
def _internal_set(self, obj, value, hint=None, setter=None):
''' Internal implementation to set property values, that is used
by __set__, set_from_json, etc.
Delegate to the |Property| instance to prepare the value appropriately,
then `set.
Args:
obj (HasProps)
The object the property is being set on.
old (obj) :
The previous value of the property to compare
hint (event hint or None, optional)
An optional update event hint, e.g. ``ColumnStreamedEvent``
(default: None)
Update event hints are usually used at times when better
update performance can be obtained by special-casing in
some way (e.g. streaming or patching column data sources)
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
In the context of a Bokeh server application, incoming updates
to properties will be annotated with the session that is
doing the updating. This value is propagated through any
subsequent change notifications that the update triggers.
The session can compare the event setter to itself, and
suppress any updates that originate from itself.
Returns:
None
'''
value = self.property.prepare_value(obj, self.name, value)
old = self.__get__(obj, obj.__class__)
self._real_set(obj, old, value, hint=hint, setter=setter)
def _real_set(self, obj, old, value, hint=None, setter=None):
''' Internal implementation helper to set property values.
This function handles bookkeeping around noting whether values have
been explicitly set, etc.
Args:
obj (HasProps)
The object the property is being set on.
old (obj) :
The previous value of the property to compare
hint (event hint or None, optional)
An optional update event hint, e.g. ``ColumnStreamedEvent``
(default: None)
Update event hints are usually used at times when better
update performance can be obtained by special-casing in
some way (e.g. streaming or patching column data sources)
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
In the context of a Bokeh server application, incoming updates
to properties will be annotated with the session that is
doing the updating. This value is propagated through any
subsequent change notifications that the update triggers.
The session can compare the event setter to itself, and
suppress any updates that originate from itself.
Returns:
None
'''
# Normally we want a "no-op" if the new value and old value are identical
# but some hinted events are in-place. This check will allow those cases
# to continue on to the notification machinery
if self.property.matches(value, old) and (hint is None):
return
was_set = self.name in obj._property_values
# "old" is the logical old value, but it may not be the actual current
# attribute value if our value was mutated behind our back and we got
# _notify_mutated.
if was_set:
old_attr_value = obj._property_values[self.name]
else:
old_attr_value = old
if old_attr_value is not value:
if isinstance(old_attr_value, PropertyValueContainer):
old_attr_value._unregister_owner(obj, self)
if isinstance(value, PropertyValueContainer):
value._register_owner(obj, self)
if self.name in obj._unstable_themed_values:
del obj._unstable_themed_values[self.name]
if self.name in obj._unstable_default_values:
del obj._unstable_default_values[self.name]
obj._property_values[self.name] = value
# for notification purposes, "old" should be the logical old
self._trigger(obj, old, value, hint=hint, setter=setter)
# called when a container is mutated "behind our back" and
# we detect it with our collection wrappers.
def _notify_mutated(self, obj, old, hint=None):
''' A method to call when a container is mutated "behind our back"
and we detect it with our |PropertyContainer| wrappers.
Args:
obj (HasProps) :
The object who's container value was mutated
old (object) :
The "old" value of the container
In this case, somewhat weirdly, ``old`` is a copy and the
new value should already be set unless we change it due to
validation.
hint (event hint or None, optional)
An optional update event hint, e.g. ``ColumnStreamedEvent``
(default: None)
Update event hints are usually used at times when better
update performance can be obtained by special-casing in
some way (e.g. streaming or patching column data sources)
Returns:
None
'''
value = self.__get__(obj, obj.__class__)
# re-validate because the contents of 'old' have changed,
# in some cases this could give us a new object for the value
value = self.property.prepare_value(obj, self.name, value)
self._real_set(obj, old, value, hint=hint)
def _trigger(self, obj, old, value, hint=None, setter=None):
''' Unconditionally send a change event notification for the property.
Args:
obj (HasProps)
The object the property is being set on.
old (obj) :
The previous value of the property
new (obj) :
The new value of the property
hint (event hint or None, optional)
An optional update event hint, e.g. ``ColumnStreamedEvent``
(default: None)
Update event hints are usually used at times when better
update performance can be obtained by special-casing in
some way (e.g. streaming or patching column data sources)
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
In the context of a Bokeh server application, incoming updates
to properties will be annotated with the session that is
doing the updating. This value is propagated through any
subsequent change notifications that the update triggers.
The session can compare the event setter to itself, and
suppress any updates that originate from itself.
Returns:
None
'''
if hasattr(obj, 'trigger'):
obj.trigger(self.name, old, value, hint, setter)
_CDS_SET_FROM_CDS_ERROR = """
ColumnDataSource.data properties may only be set from plain Python dicts,
not other ColumnDataSource.data values.
If you need to copy set from one CDS to another, make a shallow copy by
calling dict: s1.data = dict(s2.data)
"""
class ColumnDataPropertyDescriptor(BasicPropertyDescriptor):
''' A ``PropertyDescriptor`` specialized to handling ``ColumnData`` properties.
'''
def __set__(self, obj, value, setter=None):
''' Implement the setter for the Python `descriptor protocol`_.
This method first separately extracts and removes any ``units`` field
in the JSON, and sets the associated units property directly. The
remaining value is then passed to the superclass ``__set__`` to
be handled.
.. note::
An optional argument ``setter`` has been added to the standard
setter arguments. When needed, this value should be provided by
explicitly invoking ``__set__``. See below for more information.
Args:
obj (HasProps) :
The instance to set a new property value on
value (obj) :
The new value to set the property to
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
In the context of a Bokeh server application, incoming updates
to properties will be annotated with the session that is
doing the updating. This value is propagated through any
subsequent change notifications that the update triggers.
The session can compare the event setter to itself, and
suppress any updates that originate from itself.
Returns:
None
'''
if not hasattr(obj, '_property_values'):
# Initial values should be passed in to __init__, not set directly
raise RuntimeError("Cannot set a property value '%s' on a %s instance before HasProps.__init__" %
(self.name, obj.__class__.__name__))
if self.property._readonly:
raise RuntimeError("%s.%s is a readonly property" % (obj.__class__.__name__, self.name))
if isinstance(value, PropertyValueColumnData):
raise ValueError(_CDS_SET_FROM_CDS_ERROR)
from ...document.events import ColumnDataChangedEvent
if obj.document:
hint = ColumnDataChangedEvent(obj.document, obj, setter=setter)
else:
hint = None
self._internal_set(obj, value, hint=hint, setter=setter)
class DataSpecPropertyDescriptor(BasicPropertyDescriptor):
''' A ``PropertyDescriptor`` for Bokeh |DataSpec| properties that serialize to
field/value dictionaries.
'''
def serializable_value(self, obj):
'''
'''
return self.property.to_serializable(obj, self.name, getattr(obj, self.name))
def set_from_json(self, obj, json, models=None, setter=None):
''' Sets the value of this property from a JSON value.
This method first
Args:
obj (HasProps) :
json (JSON-dict) :
models(seq[Model], optional) :
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
In the context of a Bokeh server application, incoming updates
to properties will be annotated with the session that is
doing the updating. This value is propagated through any
subsequent change notifications that the update triggers.
The session can compare the event setter to itself, and
suppress any updates that originate from itself.
Returns:
None
'''
if isinstance(json, dict):
# we want to try to keep the "format" of the data spec as string, dict, or number,
# assuming the serialized dict is compatible with that.
old = getattr(obj, self.name)
if old is not None:
try:
self.property._type.validate(old, False)
if 'value' in json:
json = json['value']
except ValueError:
if isinstance(old, str) and 'field' in json:
json = json['field']
# leave it as a dict if 'old' was a dict
super().set_from_json(obj, json, models, setter)
class UnitsSpecPropertyDescriptor(DataSpecPropertyDescriptor):
''' A ``PropertyDecscriptor`` for Bokeh |UnitsSpec| properties that contribute
associated ``_units`` properties automatically as a side effect.
'''
def __init__(self, name, property, units_property):
'''
Args:
name (str) :
The attribute name that this property is for
property (Property) :
A basic property to create a descriptor for
units_property (Property) :
An associated property to hold units information
'''
super().__init__(name, property)
self.units_prop = units_property
def __set__(self, obj, value, setter=None):
''' Implement the setter for the Python `descriptor protocol`_.
This method first separately extracts and removes any ``units`` field
in the JSON, and sets the associated units property directly. The
remaining value is then passed to the superclass ``__set__`` to
be handled.
.. note::
An optional argument ``setter`` has been added to the standard
setter arguments. When needed, this value should be provided by
explicitly invoking ``__set__``. See below for more information.
Args:
obj (HasProps) :
The instance to set a new property value on
value (obj) :
The new value to set the property to
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
In the context of a Bokeh server application, incoming updates
to properties will be annotated with the session that is
doing the updating. This value is propagated through any
subsequent change notifications that the update triggers.
The session can compare the event setter to itself, and
suppress any updates that originate from itself.
Returns:
None
'''
value = self._extract_units(obj, value)
super().__set__(obj, value, setter)
def set_from_json(self, obj, json, models=None, setter=None):
''' Sets the value of this property from a JSON value.
This method first separately extracts and removes any ``units`` field
in the JSON, and sets the associated units property directly. The
remaining JSON is then passed to the superclass ``set_from_json`` to
be handled.
Args:
obj: (HasProps) : instance to set the property value on
json: (JSON-value) : value to set to the attribute to
models (dict or None, optional) :
Mapping of model ids to models (default: None)
This is needed in cases where the attributes to update also
have values that have references.
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
In the context of a Bokeh server application, incoming updates
to properties will be annotated with the session that is
doing the updating. This value is propagated through any
subsequent change notifications that the update triggers.
The session can compare the event setter to itself, and
suppress any updates that originate from itself.
Returns:
None
'''
json = self._extract_units(obj, json)
super().set_from_json(obj, json, models, setter)
def _extract_units(self, obj, value):
''' Internal helper for dealing with units associated units properties
when setting values on |UnitsSpec| properties.
When ``value`` is a dict, this function may mutate the value of the
associated units property.
Args:
obj (HasProps) : instance to update units spec property value for
value (obj) : new value to set for the property
Returns:
copy of ``value``, with 'units' key and value removed when
applicable
'''
if isinstance(value, dict):
if 'units' in value:
value = copy(value) # so we can modify it
units = value.pop("units", None)
if units:
self.units_prop.__set__(obj, units)
return value
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
import json
import os
from mock import patch
from flexget import __version__
from flexget.api import __version__ as __api_version__
from flexget.manager import Manager
from tests.conftest import MockManager
class TestValidator(object):
config = '{tasks: {}}'
def test_invalid_payload(self, api_client):
new_task = {
'name': 'new_task',
'config': {
'invalid_plugin': [{'title': 'entry 1'}],
'fake_plugin2': {'url': 'http://test/rss'}
}
}
rsp = api_client.json_post('/tasks/', data=json.dumps(new_task))
assert rsp.status_code == 400
data = json.loads(rsp.get_data(as_text=True))
assert data.get('code') == 400
assert data.get('message') == 'validation error'
assert data.get('validation_errors')
assert 'The keys' in data['validation_errors'][0]['message']
assert 'invalid_plugin' in data['validation_errors'][0]['message']
assert 'fake_plugin2' in data['validation_errors'][0]['message']
class TestServerAPI(object):
config = """
tasks:
test:
rss:
url: http://test/rss
mock:
- title: entry 1
"""
def test_pid(self, api_client):
rsp = api_client.get('/server/pid/', headers={})
assert rsp.status_code == 200
assert json.loads(rsp.get_data(as_text=True)) == {'pid': os.getpid()}
@patch.object(MockManager, 'load_config')
def test_reload(self, mocked_load_config, api_client):
rsp = api_client.get('/server/reload/')
assert rsp.status_code == 200
assert mocked_load_config.called
@patch.object(Manager, 'shutdown')
def test_shutdown(self, mocked_shutdown, api_client):
api_client.get('/server/shutdown/')
assert mocked_shutdown.called
def test_get_config(self, api_client):
rsp = api_client.get('/server/config/')
assert rsp.status_code == 200
assert json.loads(rsp.get_data(as_text=True)) == {
'tasks': {
'test': {
'mock': [{'title': 'entry 1'}],
'rss': {
'url': u'http://test/rss',
'group_links': False,
'ascii': False,
'silent': False,
'all_entries': True
}
}
}
}
def test_version(self, api_client):
rsp = api_client.get('/server/version/')
assert rsp.status_code == 200
assert json.loads(rsp.get_data(as_text=True)) == {'flexget_version': __version__, 'api_version': __api_version__}
class TestTaskAPI(object):
config = """
tasks:
test:
rss:
url: http://test/rss
mock:
- title: entry 1
"""
def test_list_tasks(self, api_client):
rsp = api_client.get('/tasks/')
data = json.loads(rsp.get_data(as_text=True))
assert data == {
'tasks': [
{
'name': 'test',
'config': {
'mock': [{'title': 'entry 1'}],
'rss': {
'url': u'http://test/rss',
'group_links': False,
'ascii': False,
'silent': False,
'all_entries': True
}
},
}
]
}
@patch.object(Manager, 'save_config')
def test_add_task(self, mocked_save_config, api_client, manager):
new_task = {
'name': 'new_task',
'config': {
'mock': [{'title': 'entry 1'}],
'rss': {'url': 'http://test/rss'}
}
}
rsp = api_client.json_post('/tasks/', data=json.dumps(new_task))
assert rsp.status_code == 201
assert mocked_save_config.called
assert json.loads(rsp.get_data(as_text=True)) == new_task
assert manager.user_config['tasks']['new_task'] == new_task['config']
assert manager.config['tasks']['new_task'] == new_task['config']
def test_add_task_existing(self, api_client):
new_task = {
'name': 'test',
'config': {
'mock': [{'title': 'entry 1'}]
}
}
rsp = api_client.json_post('/tasks/', data=json.dumps(new_task))
assert rsp.status_code == 409
def test_get_task(self, api_client):
rsp = api_client.get('/tasks/test/')
data = json.loads(rsp.get_data(as_text=True))
assert data == {
'name': 'test',
'config': {
'mock': [{'title': 'entry 1'}],
'rss': {
'url': u'http://test/rss',
'group_links': False,
'ascii': False,
'silent': False,
'all_entries': True
}
},
}
@patch.object(Manager, 'save_config')
def test_update_task(self, mocked_save_config, api_client, manager):
updated_task = {
'name': 'test',
'config': {
'mock': [{'title': 'entry 1'}],
'rss': {'url': 'http://newurl/rss'}
}
}
rsp = api_client.json_put('/tasks/test/', data=json.dumps(updated_task))
assert rsp.status_code == 200
assert mocked_save_config.called
assert json.loads(rsp.get_data(as_text=True)) == updated_task
assert manager.user_config['tasks']['test'] == updated_task['config']
assert manager.config['tasks']['test'] == updated_task['config']
@patch.object(Manager, 'save_config')
def test_rename_task(self, mocked_save_config, api_client, manager):
updated_task = {
'name': 'new_test',
'config': {
'mock': [{'title': 'entry 1'}],
'rss': {'url': 'http://newurl/rss'}
}
}
rsp = api_client.json_put('/tasks/test/', data=json.dumps(updated_task))
assert rsp.status_code == 201
assert mocked_save_config.called
assert json.loads(rsp.get_data(as_text=True)) == updated_task
assert 'test' not in manager.user_config['tasks']
assert 'test' not in manager.config['tasks']
assert manager.user_config['tasks']['new_test'] == updated_task['config']
assert manager.config['tasks']['new_test'] == updated_task['config']
@patch.object(Manager, 'save_config')
def test_delete_task(self, mocked_save_config, api_client, manager):
rsp = api_client.delete('/tasks/test/')
assert rsp.status_code == 200
assert mocked_save_config.called
assert 'test' not in manager.user_config['tasks']
assert 'test' not in manager.config['tasks']
class TestExecuteAPI(object):
@staticmethod
def get_task_queue(manager):
""" Used to execute task queue"""
assert len(manager.task_queue) == 1
task = manager.task_queue.run_queue.get(timeout=0.5)
assert task
return task
config = """
tasks:
test_task:
mock:
- title: accept_me
- title: reject_me
regexp:
accept:
- accept
reject:
- reject
"""
def test_execute(self, api_client, manager):
# Minimal payload
payload = {'tasks': ['test_task']}
rsp = api_client.json_post('/tasks/execute/', data=json.dumps(payload))
assert rsp.status_code == 200
task = self.get_task_queue(manager)
task.execute()
assert len(task.accepted) == 1
def test_inject_plain(self, api_client, manager):
entry = {
'title': "injected",
'url': 'http://test.com'
}
payload = {
"inject": [entry],
'tasks': ['test_task']
}
rsp = api_client.json_post('/tasks/execute/', data=json.dumps(payload))
assert rsp.status_code == 200
task = self.get_task_queue(manager)
task.execute()
assert len(task.all_entries) == 1
assert len(task.accepted) == 0
def test_inject_accept(self, api_client, manager):
entry = {
'title': "injected",
'url': 'http://test.com',
'accept': True,
'tasks': ['test_task']
}
payload = {
"inject": [entry],
'tasks': ['test_task']
}
rsp = api_client.json_post('/tasks/execute/', data=json.dumps(payload))
assert rsp.status_code == 200
task = self.get_task_queue(manager)
task.execute()
assert len(task.all_entries) == 1
assert len(task.accepted) == 1
def test_inject_force(self, api_client, manager):
entry = {
'title': "accept",
'url': 'http://test.com',
}
payload = {
"inject": [entry],
'tasks': ['test_task']
}
rsp = api_client.json_post('/tasks/execute/', data=json.dumps(payload))
assert rsp.status_code == 200
task = self.get_task_queue(manager)
task.execute()
assert len(task.all_entries) == 1
assert len(task.accepted) == 1
rsp = api_client.json_post('/tasks/execute/', data=json.dumps(payload))
assert rsp.status_code == 200
task = self.get_task_queue(manager)
task.execute()
assert len(task.all_entries) == 1
# Rejected due to Seen
assert len(task.accepted) == 0
# Forcing the entry not to be disabled
entry['force'] = True
rsp = api_client.json_post('/tasks/execute/', data=json.dumps(payload))
assert rsp.status_code == 200
task = self.get_task_queue(manager)
task.execute()
assert len(task.all_entries) == 1
assert len(task.accepted) == 1
def test_inject_with_fields(self, api_client, manager):
fields = {'imdb_id': "tt1234567",
'tmdb_id': "1234567"}
entry = {
'title': "injected",
'url': 'http://test.com',
'fields': fields,
'accept': True
}
payload = {
"inject": [entry],
'tasks': ['test_task']
}
rsp = api_client.json_post('/tasks/execute/', data=json.dumps(payload))
assert rsp.status_code == 200
task = self.get_task_queue(manager)
task.execute()
assert len(task.all_entries) == 1
assert len(task.accepted) == 1
entry = task.find_entry(title='injected')
assert entry['imdb_id'] == "tt1234567"
assert entry['tmdb_id'] == "1234567"
def test_multiple_entries(self, api_client, manager):
entry1 = {
'title': "entry1",
'url': 'http://test.com',
'accept': True
}
entry2 = {
'title': "entry2",
'url': 'http://test.com',
'accept': True
}
payload = {
"inject": [entry1, entry2],
'tasks': ['test_task']
}
rsp = api_client.json_post('/tasks/execute/', data=json.dumps(payload))
assert rsp.status_code == 200
task = self.get_task_queue(manager)
task.execute()
assert len(task.all_entries) == 2
assert len(task.accepted) == 2
def test_2nd_endpoint(self, api_client, manager):
entry = {
'title': "injected",
'url': 'http://test.com',
'accept': True
}
payload = {
"inject": [entry],
'tasks': ['test_task']
}
rsp = api_client.json_post('/inject/', data=json.dumps(payload))
assert rsp.status_code == 200
task = self.get_task_queue(manager)
task.execute()
assert len(task.all_entries) == 1
assert len(task.accepted) == 1
class TestExecuteMultipleTasks(object):
config = """
tasks:
test_task1:
mock:
- title: accept_me1
accept_all: yes
test_task2:
mock:
- title: accept_me2
accept_all: yes
"""
def test_execute_multiple_tasks(self, api_client, manager):
rsp = api_client.json_post('/tasks/execute/', data=json.dumps({}))
assert rsp.status_code == 400
payload = {'tasks': ['non_existing_test_task']}
rsp = api_client.json_post('/tasks/execute/', data=json.dumps(payload))
assert rsp.status_code == 404
payload = {'tasks': ['test_task1', 'test_task2']}
rsp = api_client.json_post('/tasks/execute/', data=json.dumps(payload))
assert rsp.status_code == 200
|
|
# *-* coding: utf-8 *-*
# This file is part of butterfly
#
# butterfly Copyright (C) 2014 Florian Mounier
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pty
import os
import io
import struct
import string
import random
import fcntl
import termios
import tornado.web
import tornado.websocket
import tornado.process
import tornado.ioloop
import tornado.options
import sys
import signal
from butterfly import url, Route, utils, __version__
ioloop = tornado.ioloop.IOLoop.instance()
server = utils.User()
daemon = utils.User(name='daemon')
# Python 2 backward compatibility
try:
input = raw_input
except NameError:
pass
def u(s):
if sys.version_info[0] == 2:
return s.decode('utf-8')
return s
@url(r'/(?:user/(.+))?/?(?:wd/(.+))?')
class Index(Route):
def get(self, user, path):
if not tornado.options.options.unsecure and user:
raise tornado.web.HTTPError(400)
return self.render('index.html')
@url(r'/style.css')
class Style(Route):
def get(self):
default_style = os.path.join(
os.path.dirname(__file__), 'static', 'main.css')
self.log.info('Getting style')
css = utils.get_style()
self.log.debug('Style ok')
self.set_header("Content-Type", "text/css")
if css:
self.write(css)
else:
with open(default_style) as s:
while True:
data = s.read(16384)
if data:
self.write(data)
else:
break
self.finish()
@url(r'/ws(?:/user/([^/]+))?/?(?:/wd/(.+))?')
class TermWebSocket(Route, tornado.websocket.WebSocketHandler):
terminals = set()
def pty(self):
# Make a "unique" id in 4 bytes
self.uid = ''.join(
random.choice(
string.ascii_lowercase + string.ascii_uppercase +
string.digits)
for _ in range(4))
self.pid, self.fd = pty.fork()
if self.pid == 0:
self.determine_user()
self.shell()
else:
self.communicate()
def determine_user(self):
if self.callee is None and (
tornado.options.options.unsecure and
tornado.options.options.login):
# If callee is now known and we have unsecure connection
user = ''
while user == '':
try:
user = input('login: ')
except (KeyboardInterrupt, EOFError):
self.log.debug("Errorin login input", exc_info=True)
pass
try:
self.callee = utils.User(name=user)
except Exception:
self.log.debug("Can't switch to user %s" % user, exc_info=True)
self.callee = utils.User(name='nobody')
elif (tornado.options.options.unsecure and not
tornado.options.options.login):
# if login is not required, we will use the same user as
# butterfly is executed
self.callee = utils.User()
assert self.callee is not None
def shell(self):
try:
os.chdir(self.path or self.callee.dir)
except Exception:
self.log.debug(
"Can't chdir to %s" % (self.path or self.callee.dir),
exc_info=True)
env = os.environ
# If local and local user is the same as login user
# We set the env of the user from the browser
# Usefull when running as root
if self.caller == self.callee:
env.update(self.socket.env)
env["TERM"] = "xterm-256color"
env["COLORTERM"] = "butterfly"
env["HOME"] = self.callee.dir
env["LOCATION"] = "http%s://%s:%d/" % (
"s" if not tornado.options.options.unsecure else "",
tornado.options.options.host, tornado.options.options.port)
env["PATH"] = '%s:%s' % (os.path.abspath(os.path.join(
os.path.dirname(__file__), 'bin')), env.get("PATH"))
try:
tty = os.ttyname(0).replace('/dev/', '')
except Exception:
self.log.debug("Can't get ttyname", exc_info=True)
tty = ''
if self.caller != self.callee:
try:
os.chown(os.ttyname(0), self.callee.uid, -1)
except Exception:
self.log.debug("Can't chown ttyname", exc_info=True)
utils.add_user_info(
self.uid,
tty, os.getpid(),
self.callee.name, self.request.headers['Host'])
if not tornado.options.options.unsecure or (
self.socket.local and
self.caller == self.callee and
server == self.callee
) or not tornado.options.options.login:
# User has been auth with ssl or is the same user as server
# or login is explicitly turned off
if (
not tornado.options.options.unsecure and
tornado.options.options.login and not (
self.socket.local and
self.caller == self.callee and
server == self.callee
)):
# User is authed by ssl, setting groups
try:
os.initgroups(self.callee.name, self.callee.gid)
os.setgid(self.callee.gid)
os.setuid(self.callee.uid)
except Exception:
self.log.error(
'The server must be run as root '
'if you want to log as different user\n',
exc_info=True)
sys.exit(1)
if tornado.options.options.cmd:
args = tornado.options.options.cmd.split(' ')
else:
args = [tornado.options.options.shell or self.callee.shell]
args.append('-i')
os.execvpe(args[0], args, env)
# This process has been replaced
# Unsecure connection with su
if server.root:
if self.socket.local:
if self.callee != self.caller:
# Force password prompt by dropping rights
# to the daemon user
os.setuid(daemon.uid)
else:
# We are not local so we should always get a password prompt
if self.callee == daemon:
# No logging from daemon
sys.exit(1)
os.setuid(daemon.uid)
if os.path.exists('/usr/bin/su'):
args = ['/usr/bin/su']
else:
args = ['/bin/su']
if sys.platform == 'linux':
args.append('-p')
if tornado.options.options.shell:
args.append('-s')
args.append(tornado.options.options.shell)
args.append(self.callee.name)
os.execvpe(args[0], args, env)
def communicate(self):
fcntl.fcntl(self.fd, fcntl.F_SETFL, os.O_NONBLOCK)
def utf8_error(e):
self.log.error(e)
self.reader = io.open(
self.fd,
'rb',
buffering=0,
closefd=False
)
self.writer = io.open(
self.fd,
'wt',
encoding='utf-8',
closefd=False
)
ioloop.add_handler(
self.fd, self.shell_handler, ioloop.READ | ioloop.ERROR)
def open(self, user, path):
self.fd = None
self.closed = False
if self.request.headers['Origin'] not in (
'http://%s' % self.request.headers['Host'],
'https://%s' % self.request.headers['Host']):
self.log.warning(
'Unauthorized connection attempt: from : %s to: %s' % (
self.request.headers['Origin'],
self.request.headers['Host']))
self.close()
return
self.socket = utils.Socket(self.ws_connection.stream.socket)
self.set_nodelay(True)
self.log.info('Websocket opened %r' % self.socket)
self.path = path
self.user = user if user else None
self.caller = self.callee = None
# If local we have the user connecting
if self.socket.local and self.socket.user is not None:
self.caller = self.socket.user
if tornado.options.options.unsecure:
if self.user:
try:
self.callee = utils.User(name=self.user)
except LookupError:
self.log.debug(
"Can't switch to user %s" % self.user, exc_info=True)
self.callee = None
# If no user where given and we are local, keep the same user
# as the one who opened the socket
# ie: the one openning a terminal in borwser
if not self.callee and not self.user and self.socket.local:
self.callee = self.caller
else:
user = utils.parse_cert(self.stream.socket.getpeercert())
assert user, 'No user in certificate'
self.user = user
try:
self.callee = utils.User(name=self.user)
except LookupError:
raise Exception('Invalid user in certificate')
TermWebSocket.terminals.add(self)
motd = (self.render_string(
tornado.options.options.motd,
butterfly=self,
version=__version__,
opts=tornado.options.options,
colors=utils.ansi_colors)
.decode('utf-8')
.replace('\r', '')
.replace('\n', '\r\n'))
self.write_message(motd)
self.pty()
def on_message(self, message):
if not hasattr(self, 'writer'):
self.on_close()
self.close()
return
if message[0] == 'R':
cols, rows = map(int, message[1:].split(','))
s = struct.pack("HHHH", rows, cols, 0, 0)
fcntl.ioctl(self.fd, termios.TIOCSWINSZ, s)
self.log.info('SIZE (%d, %d)' % (cols, rows))
elif message[0] == 'S':
self.log.debug('WRIT<%r' % message)
self.writer.write(message[1:])
self.writer.flush()
def shell_handler(self, fd, events):
if events & ioloop.READ:
try:
read = self.reader.read()
except IOError:
read = ''
self.log.debug('READ>%r' % read)
if read and len(read) != 0 and self.ws_connection:
self.write_message(read.decode('utf-8', 'replace'))
else:
events = ioloop.ERROR
if events & ioloop.ERROR:
self.log.info('Error on fd %d, closing' % fd)
# Terminated
self.on_close()
self.close()
def on_close(self):
if self.closed:
return
self.closed = True
if self.fd is not None:
self.log.info('Closing fd %d' % self.fd)
if getattr(self, 'pid', 0) == 0:
self.log.info('pid is 0')
return
utils.rm_user_info(self.uid, self.pid)
try:
ioloop.remove_handler(self.fd)
except Exception:
self.log.error('handler removal fail', exc_info=True)
try:
os.close(self.fd)
except Exception:
self.log.debug('closing fd fail', exc_info=True)
try:
os.kill(self.pid, signal.SIGKILL)
os.waitpid(self.pid, 0)
except Exception:
self.log.debug('waitpid fail', exc_info=True)
TermWebSocket.terminals.remove(self)
self.log.info('Websocket closed')
if self.application.systemd and not len(TermWebSocket.terminals):
self.log.info('No more terminals, exiting...')
sys.exit(0)
|
|
# -*- coding: utf-8 -*-
import pytest
from warnings import catch_warnings
import numpy as np
from datetime import datetime
from pandas.util import testing as tm
import pandas as pd
from pandas.core import config as cf
from pandas.compat import u
from pandas._libs import missing as libmissing
from pandas._libs.tslib import iNaT
from pandas import (NaT, Float64Index, Series,
DatetimeIndex, TimedeltaIndex, date_range)
from pandas.core.dtypes.common import is_scalar
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.missing import (
array_equivalent, isna, notna, isnull, notnull,
na_value_for_dtype)
@pytest.mark.parametrize('notna_f', [notna, notnull])
def test_notna_notnull(notna_f):
assert notna_f(1.)
assert not notna_f(None)
assert not notna_f(np.NaN)
with cf.option_context("mode.use_inf_as_na", False):
assert notna_f(np.inf)
assert notna_f(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notna_f(arr)
assert result.all()
with cf.option_context("mode.use_inf_as_na", True):
assert not notna_f(np.inf)
assert not notna_f(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notna_f(arr)
assert result.sum() == 2
with cf.option_context("mode.use_inf_as_na", False):
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert (isinstance(notna_f(s), Series))
class TestIsNA(object):
def test_0d_array(self):
assert isna(np.array(np.nan))
assert not isna(np.array(0.0))
assert not isna(np.array(0))
# test object dtype
assert isna(np.array(np.nan, dtype=object))
assert not isna(np.array(0.0, dtype=object))
assert not isna(np.array(0, dtype=object))
def test_empty_object(self):
for shape in [(4, 0), (4,)]:
arr = np.empty(shape=shape, dtype=object)
result = isna(arr)
expected = np.ones(shape=shape, dtype=bool)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('isna_f', [isna, isnull])
def test_isna_isnull(self, isna_f):
assert not isna_f(1.)
assert isna_f(None)
assert isna_f(np.NaN)
assert float('nan')
assert not isna_f(np.inf)
assert not isna_f(-np.inf)
# series
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert isinstance(isna_f(s), Series)
# frame
for df in [tm.makeTimeDataFrame(), tm.makePeriodFrame(),
tm.makeMixedDataFrame()]:
result = isna_f(df)
expected = df.apply(isna_f)
tm.assert_frame_equal(result, expected)
# panel
with catch_warnings(record=True):
for p in [tm.makePanel(), tm.makePeriodPanel(),
tm.add_nans(tm.makePanel())]:
result = isna_f(p)
expected = p.apply(isna_f)
tm.assert_panel_equal(result, expected)
# panel 4d
with catch_warnings(record=True):
for p in [tm.makePanel4D(), tm.add_nans_panel4d(tm.makePanel4D())]:
result = isna_f(p)
expected = p.apply(isna_f)
tm.assert_panel4d_equal(result, expected)
def test_isna_lists(self):
result = isna([[False]])
exp = np.array([[False]])
tm.assert_numpy_array_equal(result, exp)
result = isna([[1], [2]])
exp = np.array([[False], [False]])
tm.assert_numpy_array_equal(result, exp)
# list of strings / unicode
result = isna(['foo', 'bar'])
exp = np.array([False, False])
tm.assert_numpy_array_equal(result, exp)
result = isna([u('foo'), u('bar')])
exp = np.array([False, False])
tm.assert_numpy_array_equal(result, exp)
def test_isna_nat(self):
result = isna([NaT])
exp = np.array([True])
tm.assert_numpy_array_equal(result, exp)
result = isna(np.array([NaT], dtype=object))
exp = np.array([True])
tm.assert_numpy_array_equal(result, exp)
def test_isna_numpy_nat(self):
arr = np.array([NaT, np.datetime64('NaT'), np.timedelta64('NaT'),
np.datetime64('NaT', 's')])
result = isna(arr)
expected = np.array([True] * 4)
tm.assert_numpy_array_equal(result, expected)
def test_isna_datetime(self):
assert not isna(datetime.now())
assert notna(datetime.now())
idx = date_range('1/1/1990', periods=20)
exp = np.ones(len(idx), dtype=bool)
tm.assert_numpy_array_equal(notna(idx), exp)
idx = np.asarray(idx)
idx[0] = iNaT
idx = DatetimeIndex(idx)
mask = isna(idx)
assert mask[0]
exp = np.array([True] + [False] * (len(idx) - 1), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
# GH 9129
pidx = idx.to_period(freq='M')
mask = isna(pidx)
assert mask[0]
exp = np.array([True] + [False] * (len(idx) - 1), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
mask = isna(pidx[1:])
exp = np.zeros(len(mask), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
@pytest.mark.parametrize(
"value, expected",
[(np.complex128(np.nan), True),
(np.float64(1), False),
(np.array([1, 1 + 0j, np.nan, 3]),
np.array([False, False, True, False])),
(np.array([1, 1 + 0j, np.nan, 3], dtype=object),
np.array([False, False, True, False])),
(np.array([1, 1 + 0j, np.nan, 3]).astype(object),
np.array([False, False, True, False]))])
def test_complex(self, value, expected):
result = isna(value)
if is_scalar(result):
assert result is expected
else:
tm.assert_numpy_array_equal(result, expected)
def test_datetime_other_units(self):
idx = pd.DatetimeIndex(['2011-01-01', 'NaT', '2011-01-02'])
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isna(idx), exp)
tm.assert_numpy_array_equal(notna(idx), ~exp)
tm.assert_numpy_array_equal(isna(idx.values), exp)
tm.assert_numpy_array_equal(notna(idx.values), ~exp)
for dtype in ['datetime64[D]', 'datetime64[h]', 'datetime64[m]',
'datetime64[s]', 'datetime64[ms]', 'datetime64[us]',
'datetime64[ns]']:
values = idx.values.astype(dtype)
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isna(values), exp)
tm.assert_numpy_array_equal(notna(values), ~exp)
exp = pd.Series([False, True, False])
s = pd.Series(values)
tm.assert_series_equal(isna(s), exp)
tm.assert_series_equal(notna(s), ~exp)
s = pd.Series(values, dtype=object)
tm.assert_series_equal(isna(s), exp)
tm.assert_series_equal(notna(s), ~exp)
def test_timedelta_other_units(self):
idx = pd.TimedeltaIndex(['1 days', 'NaT', '2 days'])
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isna(idx), exp)
tm.assert_numpy_array_equal(notna(idx), ~exp)
tm.assert_numpy_array_equal(isna(idx.values), exp)
tm.assert_numpy_array_equal(notna(idx.values), ~exp)
for dtype in ['timedelta64[D]', 'timedelta64[h]', 'timedelta64[m]',
'timedelta64[s]', 'timedelta64[ms]', 'timedelta64[us]',
'timedelta64[ns]']:
values = idx.values.astype(dtype)
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isna(values), exp)
tm.assert_numpy_array_equal(notna(values), ~exp)
exp = pd.Series([False, True, False])
s = pd.Series(values)
tm.assert_series_equal(isna(s), exp)
tm.assert_series_equal(notna(s), ~exp)
s = pd.Series(values, dtype=object)
tm.assert_series_equal(isna(s), exp)
tm.assert_series_equal(notna(s), ~exp)
def test_period(self):
idx = pd.PeriodIndex(['2011-01', 'NaT', '2012-01'], freq='M')
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isna(idx), exp)
tm.assert_numpy_array_equal(notna(idx), ~exp)
exp = pd.Series([False, True, False])
s = pd.Series(idx)
tm.assert_series_equal(isna(s), exp)
tm.assert_series_equal(notna(s), ~exp)
s = pd.Series(idx, dtype=object)
tm.assert_series_equal(isna(s), exp)
tm.assert_series_equal(notna(s), ~exp)
def test_array_equivalent():
assert array_equivalent(np.array([np.nan, np.nan]),
np.array([np.nan, np.nan]))
assert array_equivalent(np.array([np.nan, 1, np.nan]),
np.array([np.nan, 1, np.nan]))
assert array_equivalent(np.array([np.nan, None], dtype='object'),
np.array([np.nan, None], dtype='object'))
assert array_equivalent(np.array([np.nan, 1 + 1j], dtype='complex'),
np.array([np.nan, 1 + 1j], dtype='complex'))
assert not array_equivalent(
np.array([np.nan, 1 + 1j], dtype='complex'), np.array(
[np.nan, 1 + 2j], dtype='complex'))
assert not array_equivalent(
np.array([np.nan, 1, np.nan]), np.array([np.nan, 2, np.nan]))
assert not array_equivalent(
np.array(['a', 'b', 'c', 'd']), np.array(['e', 'e']))
assert array_equivalent(Float64Index([0, np.nan]),
Float64Index([0, np.nan]))
assert not array_equivalent(
Float64Index([0, np.nan]), Float64Index([1, np.nan]))
assert array_equivalent(DatetimeIndex([0, np.nan]),
DatetimeIndex([0, np.nan]))
assert not array_equivalent(
DatetimeIndex([0, np.nan]), DatetimeIndex([1, np.nan]))
assert array_equivalent(TimedeltaIndex([0, np.nan]),
TimedeltaIndex([0, np.nan]))
assert not array_equivalent(
TimedeltaIndex([0, np.nan]), TimedeltaIndex([1, np.nan]))
assert array_equivalent(DatetimeIndex([0, np.nan], tz='US/Eastern'),
DatetimeIndex([0, np.nan], tz='US/Eastern'))
assert not array_equivalent(
DatetimeIndex([0, np.nan], tz='US/Eastern'), DatetimeIndex(
[1, np.nan], tz='US/Eastern'))
assert not array_equivalent(
DatetimeIndex([0, np.nan]), DatetimeIndex(
[0, np.nan], tz='US/Eastern'))
assert not array_equivalent(
DatetimeIndex([0, np.nan], tz='CET'), DatetimeIndex(
[0, np.nan], tz='US/Eastern'))
assert not array_equivalent(
DatetimeIndex([0, np.nan]), TimedeltaIndex([0, np.nan]))
def test_array_equivalent_compat():
# see gh-13388
m = np.array([(1, 2), (3, 4)], dtype=[('a', int), ('b', float)])
n = np.array([(1, 2), (3, 4)], dtype=[('a', int), ('b', float)])
assert (array_equivalent(m, n, strict_nan=True))
assert (array_equivalent(m, n, strict_nan=False))
m = np.array([(1, 2), (3, 4)], dtype=[('a', int), ('b', float)])
n = np.array([(1, 2), (4, 3)], dtype=[('a', int), ('b', float)])
assert (not array_equivalent(m, n, strict_nan=True))
assert (not array_equivalent(m, n, strict_nan=False))
m = np.array([(1, 2), (3, 4)], dtype=[('a', int), ('b', float)])
n = np.array([(1, 2), (3, 4)], dtype=[('b', int), ('a', float)])
assert (not array_equivalent(m, n, strict_nan=True))
assert (not array_equivalent(m, n, strict_nan=False))
def test_array_equivalent_str():
for dtype in ['O', 'S', 'U']:
assert array_equivalent(np.array(['A', 'B'], dtype=dtype),
np.array(['A', 'B'], dtype=dtype))
assert not array_equivalent(np.array(['A', 'B'], dtype=dtype),
np.array(['A', 'X'], dtype=dtype))
def test_na_value_for_dtype():
for dtype in [np.dtype('M8[ns]'), np.dtype('m8[ns]'),
DatetimeTZDtype('datetime64[ns, US/Eastern]')]:
assert na_value_for_dtype(dtype) is NaT
for dtype in ['u1', 'u2', 'u4', 'u8',
'i1', 'i2', 'i4', 'i8']:
assert na_value_for_dtype(np.dtype(dtype)) == 0
for dtype in ['bool']:
assert na_value_for_dtype(np.dtype(dtype)) is False
for dtype in ['f2', 'f4', 'f8']:
assert np.isnan(na_value_for_dtype(np.dtype(dtype)))
for dtype in ['O']:
assert np.isnan(na_value_for_dtype(np.dtype(dtype)))
class TestNAObj(object):
_1d_methods = ['isnaobj', 'isnaobj_old']
_2d_methods = ['isnaobj2d', 'isnaobj2d_old']
def _check_behavior(self, arr, expected):
for method in TestNAObj._1d_methods:
result = getattr(libmissing, method)(arr)
tm.assert_numpy_array_equal(result, expected)
arr = np.atleast_2d(arr)
expected = np.atleast_2d(expected)
for method in TestNAObj._2d_methods:
result = getattr(libmissing, method)(arr)
tm.assert_numpy_array_equal(result, expected)
def test_basic(self):
arr = np.array([1, None, 'foo', -5.1, pd.NaT, np.nan])
expected = np.array([False, True, False, False, True, True])
self._check_behavior(arr, expected)
def test_non_obj_dtype(self):
arr = np.array([1, 3, np.nan, 5], dtype=float)
expected = np.array([False, False, True, False])
self._check_behavior(arr, expected)
def test_empty_arr(self):
arr = np.array([])
expected = np.array([], dtype=bool)
self._check_behavior(arr, expected)
def test_empty_str_inp(self):
arr = np.array([""]) # empty but not na
expected = np.array([False])
self._check_behavior(arr, expected)
def test_empty_like(self):
# see gh-13717: no segfaults!
arr = np.empty_like([None])
expected = np.array([True])
self._check_behavior(arr, expected)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from singa import autograd
from singa import tensor
from singa import device
from singa import opt
import numpy as np
from tqdm import trange
# the code is modified from
# https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/xception.py
class Block(autograd.Layer):
def __init__(self,
in_filters,
out_filters,
reps,
strides=1,
padding=0,
start_with_relu=True,
grow_first=True):
super(Block, self).__init__()
if out_filters != in_filters or strides != 1:
self.skip = autograd.Conv2d(in_filters,
out_filters,
1,
stride=strides,
padding=padding,
bias=False)
self.skipbn = autograd.BatchNorm2d(out_filters)
else:
self.skip = None
self.layers = []
filters = in_filters
if grow_first:
self.layers.append(autograd.ReLU())
self.layers.append(
autograd.SeparableConv2d(in_filters,
out_filters,
3,
stride=1,
padding=1,
bias=False))
self.layers.append(autograd.BatchNorm2d(out_filters))
filters = out_filters
for i in range(reps - 1):
self.layers.append(autograd.ReLU())
self.layers.append(
autograd.SeparableConv2d(filters,
filters,
3,
stride=1,
padding=1,
bias=False))
self.layers.append(autograd.BatchNorm2d(filters))
if not grow_first:
self.layers.append(autograd.ReLU())
self.layers.append(
autograd.SeparableConv2d(in_filters,
out_filters,
3,
stride=1,
padding=1,
bias=False))
self.layers.append(autograd.BatchNorm2d(out_filters))
if not start_with_relu:
self.layers = self.layers[1:]
else:
self.layers[0] = autograd.ReLU()
if strides != 1:
self.layers.append(autograd.MaxPool2d(3, strides, padding + 1))
def __call__(self, x):
y = self.layers[0](x)
for layer in self.layers[1:]:
if isinstance(y, tuple):
y = y[0]
y = layer(y)
if self.skip is not None:
skip = self.skip(x)
skip = self.skipbn(skip)
else:
skip = x
y = autograd.add(y, skip)
return y
__all__ = ['Xception']
class Xception(autograd.Layer):
"""
Xception optimized for the ImageNet dataset, as specified in
https://arxiv.org/pdf/1610.02357.pdf
"""
def __init__(self, num_classes=1000):
""" Constructor
Args:
num_classes: number of classes
"""
super(Xception, self).__init__()
self.num_classes = num_classes
self.conv1 = autograd.Conv2d(3, 32, 3, 2, 0, bias=False)
self.bn1 = autograd.BatchNorm2d(32)
self.conv2 = autograd.Conv2d(32, 64, 3, 1, 1, bias=False)
self.bn2 = autograd.BatchNorm2d(64)
# do relu here
self.block1 = Block(64,
128,
2,
2,
padding=0,
start_with_relu=False,
grow_first=True)
self.block2 = Block(128,
256,
2,
2,
padding=0,
start_with_relu=True,
grow_first=True)
self.block3 = Block(256,
728,
2,
2,
padding=0,
start_with_relu=True,
grow_first=True)
self.block4 = Block(728,
728,
3,
1,
start_with_relu=True,
grow_first=True)
self.block5 = Block(728,
728,
3,
1,
start_with_relu=True,
grow_first=True)
self.block6 = Block(728,
728,
3,
1,
start_with_relu=True,
grow_first=True)
self.block7 = Block(728,
728,
3,
1,
start_with_relu=True,
grow_first=True)
self.block8 = Block(728,
728,
3,
1,
start_with_relu=True,
grow_first=True)
self.block9 = Block(728,
728,
3,
1,
start_with_relu=True,
grow_first=True)
self.block10 = Block(728,
728,
3,
1,
start_with_relu=True,
grow_first=True)
self.block11 = Block(728,
728,
3,
1,
start_with_relu=True,
grow_first=True)
self.block12 = Block(728,
1024,
2,
2,
start_with_relu=True,
grow_first=False)
self.conv3 = autograd.SeparableConv2d(1024, 1536, 3, 1, 1)
self.bn3 = autograd.BatchNorm2d(1536)
# do relu here
self.conv4 = autograd.SeparableConv2d(1536, 2048, 3, 1, 1)
self.bn4 = autograd.BatchNorm2d(2048)
self.globalpooling = autograd.MaxPool2d(10, 1)
self.fc = autograd.Linear(2048, num_classes)
def features(self, input):
x = self.conv1(input)
x = self.bn1(x)
x = autograd.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = autograd.relu(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
x = self.block5(x)
x = self.block6(x)
x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
x = self.block12(x)
x = self.conv3(x)
x = self.bn3(x)
x = autograd.relu(x)
x = self.conv4(x)
x = self.bn4(x)
return x
def logits(self, features):
x = autograd.relu(features)
x = self.globalpooling(x)
x = autograd.flatten(x)
x = self.fc(x)
return x
def __call__(self, input):
x = self.features(input)
x = self.logits(x)
return x
if __name__ == '__main__':
model = Xception(num_classes=1000)
print('Start intialization............')
dev = device.create_cuda_gpu_on(0)
#dev = device.create_cuda_gpu()
niters = 20
batch_size = 16
IMG_SIZE = 299
sgd = opt.SGD(lr=0.1, momentum=0.9, weight_decay=1e-5)
tx = tensor.Tensor((batch_size, 3, IMG_SIZE, IMG_SIZE), dev)
ty = tensor.Tensor((batch_size,), dev, tensor.int32)
autograd.training = True
x = np.random.randn(batch_size, 3, IMG_SIZE, IMG_SIZE).astype(np.float32)
y = np.random.randint(0, 1000, batch_size, dtype=np.int32)
tx.copy_from_numpy(x)
ty.copy_from_numpy(y)
with trange(niters) as t:
for _ in t:
x = model(tx)
loss = autograd.softmax_cross_entropy(x, ty)
for p, g in autograd.backward(loss):
sgd.update(p, g)
|
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Filter Scheduler.
"""
import mock
from cinder import context
from cinder import exception
from cinder.scheduler import filter_scheduler
from cinder.scheduler import host_manager
from cinder.tests.scheduler import fakes
from cinder.tests.scheduler import test_scheduler
from cinder.volume import utils
class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
"""Test case for Filter Scheduler."""
driver_cls = filter_scheduler.FilterScheduler
def test_create_consistencygroup_no_hosts(self):
# Ensure empty hosts result in NoValidHosts exception.
sched = fakes.FakeFilterScheduler()
fake_context = context.RequestContext('user', 'project')
request_spec = {'volume_properties': {'project_id': 1,
'size': 0},
'volume_type': {'name': 'Type1',
'extra_specs': {}}}
request_spec2 = {'volume_properties': {'project_id': 1,
'size': 0},
'volume_type': {'name': 'Type2',
'extra_specs': {}}}
request_spec_list = [request_spec, request_spec2]
self.assertRaises(exception.NoValidHost,
sched.schedule_create_consistencygroup,
fake_context, 'faki-id1', request_spec_list, {})
@mock.patch('cinder.db.service_get_all_by_topic')
def test_schedule_consistencygroup(self,
_mock_service_get_all_by_topic):
# Make sure _schedule_group() can find host successfully.
sched = fakes.FakeFilterScheduler()
sched.host_manager = fakes.FakeHostManager()
fake_context = context.RequestContext('user', 'project',
is_admin=True)
fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic)
specs = {'capabilities:consistencygroup_support': '<is> True'}
request_spec = {'volume_properties': {'project_id': 1,
'size': 0},
'volume_type': {'name': 'Type1',
'extra_specs': specs}}
request_spec2 = {'volume_properties': {'project_id': 1,
'size': 0},
'volume_type': {'name': 'Type2',
'extra_specs': specs}}
request_spec_list = [request_spec, request_spec2]
weighed_host = sched._schedule_group(fake_context,
request_spec_list,
{})
self.assertIsNotNone(weighed_host.obj)
self.assertTrue(_mock_service_get_all_by_topic.called)
@mock.patch('cinder.db.service_get_all_by_topic')
def test_schedule_consistencygroup_no_cg_support_in_extra_specs(
self,
_mock_service_get_all_by_topic):
# Make sure _schedule_group() can find host successfully even
# when consistencygroup_support is not specified in volume type's
# extra specs
sched = fakes.FakeFilterScheduler()
sched.host_manager = fakes.FakeHostManager()
fake_context = context.RequestContext('user', 'project',
is_admin=True)
fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic)
request_spec = {'volume_properties': {'project_id': 1,
'size': 0},
'volume_type': {'name': 'Type1',
'extra_specs': {}}}
request_spec2 = {'volume_properties': {'project_id': 1,
'size': 0},
'volume_type': {'name': 'Type2',
'extra_specs': {}}}
request_spec_list = [request_spec, request_spec2]
weighed_host = sched._schedule_group(fake_context,
request_spec_list,
{})
self.assertIsNotNone(weighed_host.obj)
self.assertTrue(_mock_service_get_all_by_topic.called)
def test_create_volume_no_hosts(self):
# Ensure empty hosts/child_zones result in NoValidHosts exception.
sched = fakes.FakeFilterScheduler()
fake_context = context.RequestContext('user', 'project')
request_spec = {'volume_properties': {'project_id': 1,
'size': 1},
'volume_type': {'name': 'LVM_iSCSI'},
'volume_id': ['fake-id1']}
self.assertRaises(exception.NoValidHost, sched.schedule_create_volume,
fake_context, request_spec, {})
@mock.patch('cinder.scheduler.host_manager.HostManager.'
'get_all_host_states')
def test_create_volume_non_admin(self, _mock_get_all_host_states):
# Test creating a volume locally using create_volume, passing
# a non-admin context. DB actions should work.
self.was_admin = False
def fake_get(ctxt):
# Make sure this is called with admin context, even though
# we're using user context below.
self.was_admin = ctxt.is_admin
return {}
sched = fakes.FakeFilterScheduler()
_mock_get_all_host_states.side_effect = fake_get
fake_context = context.RequestContext('user', 'project')
request_spec = {'volume_properties': {'project_id': 1,
'size': 1},
'volume_type': {'name': 'LVM_iSCSI'},
'volume_id': ['fake-id1']}
self.assertRaises(exception.NoValidHost, sched.schedule_create_volume,
fake_context, request_spec, {})
self.assertTrue(self.was_admin)
@mock.patch('cinder.db.service_get_all_by_topic')
def test_schedule_happy_day(self, _mock_service_get_all_by_topic):
# Make sure there's nothing glaringly wrong with _schedule()
# by doing a happy day pass through.
sched = fakes.FakeFilterScheduler()
sched.host_manager = fakes.FakeHostManager()
fake_context = context.RequestContext('user', 'project',
is_admin=True)
fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic)
request_spec = {'volume_type': {'name': 'LVM_iSCSI'},
'volume_properties': {'project_id': 1,
'size': 1}}
weighed_host = sched._schedule(fake_context, request_spec, {})
self.assertIsNotNone(weighed_host.obj)
self.assertTrue(_mock_service_get_all_by_topic.called)
def test_max_attempts(self):
self.flags(scheduler_max_attempts=4)
sched = fakes.FakeFilterScheduler()
self.assertEqual(4, sched._max_attempts())
def test_invalid_max_attempts(self):
self.flags(scheduler_max_attempts=0)
self.assertRaises(exception.InvalidParameterValue,
fakes.FakeFilterScheduler)
def test_retry_disabled(self):
# Retry info should not get populated when re-scheduling is off.
self.flags(scheduler_max_attempts=1)
sched = fakes.FakeFilterScheduler()
request_spec = {'volume_type': {'name': 'LVM_iSCSI'},
'volume_properties': {'project_id': 1,
'size': 1}}
filter_properties = {}
sched._schedule(self.context, request_spec,
filter_properties=filter_properties)
# Should not have retry info in the populated filter properties.
self.assertNotIn("retry", filter_properties)
def test_retry_attempt_one(self):
# Test retry logic on initial scheduling attempt.
self.flags(scheduler_max_attempts=2)
sched = fakes.FakeFilterScheduler()
request_spec = {'volume_type': {'name': 'LVM_iSCSI'},
'volume_properties': {'project_id': 1,
'size': 1}}
filter_properties = {}
sched._schedule(self.context, request_spec,
filter_properties=filter_properties)
num_attempts = filter_properties['retry']['num_attempts']
self.assertEqual(1, num_attempts)
def test_retry_attempt_two(self):
# Test retry logic when re-scheduling.
self.flags(scheduler_max_attempts=2)
sched = fakes.FakeFilterScheduler()
request_spec = {'volume_type': {'name': 'LVM_iSCSI'},
'volume_properties': {'project_id': 1,
'size': 1}}
retry = dict(num_attempts=1)
filter_properties = dict(retry=retry)
sched._schedule(self.context, request_spec,
filter_properties=filter_properties)
num_attempts = filter_properties['retry']['num_attempts']
self.assertEqual(2, num_attempts)
def test_retry_exceeded_max_attempts(self):
# Test for necessary explosion when max retries is exceeded.
self.flags(scheduler_max_attempts=2)
sched = fakes.FakeFilterScheduler()
request_spec = {'volume_type': {'name': 'LVM_iSCSI'},
'volume_properties': {'project_id': 1,
'size': 1}}
retry = dict(num_attempts=2)
filter_properties = dict(retry=retry)
self.assertRaises(exception.NoValidHost, sched._schedule, self.context,
request_spec, filter_properties=filter_properties)
def test_add_retry_host(self):
retry = dict(num_attempts=1, hosts=[])
filter_properties = dict(retry=retry)
host = "fakehost"
sched = fakes.FakeFilterScheduler()
sched._add_retry_host(filter_properties, host)
hosts = filter_properties['retry']['hosts']
self.assertEqual(1, len(hosts))
self.assertEqual(host, hosts[0])
def test_post_select_populate(self):
# Test addition of certain filter props after a node is selected.
retry = {'hosts': [], 'num_attempts': 1}
filter_properties = {'retry': retry}
sched = fakes.FakeFilterScheduler()
host_state = host_manager.HostState('host')
host_state.total_capacity_gb = 1024
sched._post_select_populate_filter_properties(filter_properties,
host_state)
self.assertEqual('host',
filter_properties['retry']['hosts'][0])
self.assertEqual(1024, host_state.total_capacity_gb)
def _host_passes_filters_setup(self, mock_obj):
sched = fakes.FakeFilterScheduler()
sched.host_manager = fakes.FakeHostManager()
fake_context = context.RequestContext('user', 'project',
is_admin=True)
fakes.mock_host_manager_db_calls(mock_obj)
return (sched, fake_context)
@mock.patch('cinder.db.service_get_all_by_topic')
def test_host_passes_filters_happy_day(self, _mock_service_get_topic):
"""Do a successful pass through of with host_passes_filters()."""
sched, ctx = self._host_passes_filters_setup(
_mock_service_get_topic)
request_spec = {'volume_id': 1,
'volume_type': {'name': 'LVM_iSCSI'},
'volume_properties': {'project_id': 1,
'size': 1}}
ret_host = sched.host_passes_filters(ctx, 'host1#lvm1',
request_spec, {})
self.assertEqual(utils.extract_host(ret_host.host), 'host1')
self.assertTrue(_mock_service_get_topic.called)
@mock.patch('cinder.db.service_get_all_by_topic')
def test_host_passes_filters_default_pool_happy_day(
self, _mock_service_get_topic):
"""Do a successful pass through of with host_passes_filters()."""
sched, ctx = self._host_passes_filters_setup(
_mock_service_get_topic)
request_spec = {'volume_id': 1,
'volume_type': {'name': 'LVM_iSCSI'},
'volume_properties': {'project_id': 1,
'size': 1}}
ret_host = sched.host_passes_filters(ctx, 'host5#_pool0',
request_spec, {})
self.assertEqual(utils.extract_host(ret_host.host), 'host5')
self.assertTrue(_mock_service_get_topic.called)
@mock.patch('cinder.db.service_get_all_by_topic')
def test_host_passes_filters_no_capacity(self, _mock_service_get_topic):
"""Fail the host due to insufficient capacity."""
sched, ctx = self._host_passes_filters_setup(
_mock_service_get_topic)
request_spec = {'volume_id': 1,
'volume_type': {'name': 'LVM_iSCSI'},
'volume_properties': {'project_id': 1,
'size': 1024}}
self.assertRaises(exception.NoValidHost,
sched.host_passes_filters,
ctx, 'host1#lvm1', request_spec, {})
self.assertTrue(_mock_service_get_topic.called)
@mock.patch('cinder.db.service_get_all_by_topic')
def test_retype_policy_never_migrate_pass(self, _mock_service_get_topic):
# Retype should pass if current host passes filters and
# policy=never. host4 doesn't have enough space to hold an additional
# 200GB, but it is already the host of this volume and should not be
# counted twice.
sched, ctx = self._host_passes_filters_setup(
_mock_service_get_topic)
extra_specs = {'volume_backend_name': 'lvm4'}
request_spec = {'volume_id': 1,
'volume_type': {'name': 'LVM_iSCSI',
'extra_specs': extra_specs},
'volume_properties': {'project_id': 1,
'size': 200,
'host': 'host4#lvm4'}}
host_state = sched.find_retype_host(ctx, request_spec,
filter_properties={},
migration_policy='never')
self.assertEqual(utils.extract_host(host_state.host), 'host4')
@mock.patch('cinder.db.service_get_all_by_topic')
def test_retype_with_pool_policy_never_migrate_pass(
self, _mock_service_get_topic):
# Retype should pass if current host passes filters and
# policy=never. host4 doesn't have enough space to hold an additional
# 200GB, but it is already the host of this volume and should not be
# counted twice.
sched, ctx = self._host_passes_filters_setup(
_mock_service_get_topic)
extra_specs = {'volume_backend_name': 'lvm3'}
request_spec = {'volume_id': 1,
'volume_type': {'name': 'LVM_iSCSI',
'extra_specs': extra_specs},
'volume_properties': {'project_id': 1,
'size': 200,
'host': 'host3#lvm3'}}
host_state = sched.find_retype_host(ctx, request_spec,
filter_properties={},
migration_policy='never')
self.assertEqual(host_state.host, 'host3#lvm3')
@mock.patch('cinder.db.service_get_all_by_topic')
def test_retype_policy_never_migrate_fail(self, _mock_service_get_topic):
# Retype should fail if current host doesn't pass filters and
# policy=never.
sched, ctx = self._host_passes_filters_setup(
_mock_service_get_topic)
extra_specs = {'volume_backend_name': 'lvm1'}
request_spec = {'volume_id': 1,
'volume_type': {'name': 'LVM_iSCSI',
'extra_specs': extra_specs},
'volume_properties': {'project_id': 1,
'size': 200,
'host': 'host4'}}
self.assertRaises(exception.NoValidHost, sched.find_retype_host, ctx,
request_spec, filter_properties={},
migration_policy='never')
@mock.patch('cinder.db.service_get_all_by_topic')
def test_retype_policy_demand_migrate_pass(self, _mock_service_get_topic):
# Retype should pass if current host fails filters but another host
# is suitable when policy=on-demand.
sched, ctx = self._host_passes_filters_setup(
_mock_service_get_topic)
extra_specs = {'volume_backend_name': 'lvm1'}
request_spec = {'volume_id': 1,
'volume_type': {'name': 'LVM_iSCSI',
'extra_specs': extra_specs},
'volume_properties': {'project_id': 1,
'size': 200,
'host': 'host4'}}
host_state = sched.find_retype_host(ctx, request_spec,
filter_properties={},
migration_policy='on-demand')
self.assertEqual(utils.extract_host(host_state.host), 'host1')
@mock.patch('cinder.db.service_get_all_by_topic')
def test_retype_policy_demand_migrate_fail(self, _mock_service_get_topic):
# Retype should fail if current host doesn't pass filters and
# no other suitable candidates exist even if policy=on-demand.
sched, ctx = self._host_passes_filters_setup(
_mock_service_get_topic)
extra_specs = {'volume_backend_name': 'lvm1'}
request_spec = {'volume_id': 1,
'volume_type': {'name': 'LVM_iSCSI',
'extra_specs': extra_specs},
'volume_properties': {'project_id': 1,
'size': 2048,
'host': 'host4'}}
self.assertRaises(exception.NoValidHost, sched.find_retype_host, ctx,
request_spec, filter_properties={},
migration_policy='on-demand')
|
|
import logging
from typing import Optional
from . import context
from .lexer import Lexer
from .utils import log
logger = logging.getLogger('parser').print_pos
tree_logger = logging.getLogger('tree').print_pos
class Token(Lexer):
def __init__(self, *args, **kwargs):
super(Token, self).__init__(*args, **kwargs)
self.is_safe = False
def __eq__(self, other):
return self.cur_token == other
def __ne__(self, other):
return not self.__eq__(other)
def safe(self):
self.is_safe = True
return self
def __rshift__(self, other):
if self.cur_token in other if isinstance(other, (tuple, list)) else self.cur_token == other:
self.is_safe = False
val = self.cur_value
self.next()
return val
else:
is_fatal = not self.is_safe
self.is_safe = False
logger.current_token.log(logging.FATAL if is_fatal else logging.WARNING,
'Expected token (%s), current token (`%s`, "%s")',
', '.join(
'`{}`'.format(t)
for t in (other
if isinstance(other, (tuple, list)) else
[other])
),
self.cur_token, self.cur_raw_value)
if is_fatal:
exit(1)
raise context.ParserException()
else:
return None
class Parser(object):
def __init__(self, token: Token):
self.token = token
def parse(self):
self.program()
@log(tree_logger)
def program(self):
# <program_element_list>
self.program_element_list()
@log(tree_logger)
def program_element_list(self):
# <program_element>+
self.program_element()
while self.token != 'END_CHAR':
self.program_element()
@log(tree_logger)
def program_element(self):
# <table_declaration>
# <procedure_declaration>
# <sequence_declaration>
self.token >> 'CREATE'
if self.token == 'TABLE':
self.table_declaration()
elif self.token == 'PROCEDURE':
self.procedure_declaration()
elif self.token == 'SEQUENCE':
self.sequence_declaration()
else:
self.token >> ('TABLE', 'PROCEDURE', 'SEQUENCE') # For exception
raise context.UnreachableException()
@log(tree_logger)
def table_declaration(self):
# CREATE TABLE id LPARENT <column_declaration_list> RPARENT SEMICOLON
self.token.next() # self.token >> 'TABLE'
table_name = self.token >> 'IDENTIFIER'
table = context.Table(table_name)
check_name = context.VARIABLES.get(table_name)
if check_name:
context.table_logger.error('Table name "%s" is already used for the %s.', table_name, check_name)
else:
context.VARIABLES[table_name] = 'table'
context.TABLES[table_name] = table
self.token >> 'LPARENT'
self.column_declaration_list(table)
self.token >> 'RPARENT'
self.token.safe() >> 'SEMICOLON'
@log(tree_logger)
def column_declaration_list(self, table: context.Table):
# column_declaration_list (COMMA column_declaration)*
self.column_declaration(table)
while self.token == 'COMMA':
self.token.next() # self.token >> 'COMMA'
self.column_declaration(table)
@log(tree_logger)
def column_declaration(self, table: context.Table):
# id TYPE
# id TYPE PK
# id TYPE PK FLOAT
# id TYPE INDEXED
# id TYPE INDEXED FLOAT
# id TYPE BLOOM
# id TYPE BLOOM FLOAT
column_name = self.token >> 'IDENTIFIER'
column_type = self.parse_type()
if self.token in ('PK', 'INDEXED', 'BLOOM'):
mod = self.token >> ('PK', 'INDEXED', 'BLOOM')
value = self.token >> 'FLOAT' if self.token == 'FLOAT' else None
column = context.Column.auto(column_name, column_type, mod, table, value)
else:
column = context.Column.common(column_name, column_type, table)
table.add_column(column)
@log(tree_logger)
def parse_type(self):
kind = self.token >> 'TYPE'
if self.token == 'LPARENT':
self.token.next() # self.token >> 'LPARENT'
size = self.token >> 'INTEGER'
self.token >> 'RPARENT'
return context.get_type_by_name(kind, size)
return context.get_type_by_name(kind)
@log(tree_logger)
def procedure_declaration(self):
# CREATE PROCEDURE id LPARENT <parameter_declaration_list> RPARENT BEGIN <statement_list> END SEMICOLON
self.token.next() # self.token >> 'PROCEDURE'
procedure_name = self.token >> 'IDENTIFIER'
check_name = context.VARIABLES.get(procedure_name)
procedure = context.Procedure(procedure_name)
if check_name:
context.procedure_logger.error('Procedure name "%s" is already used for the %s.',
procedure_name, check_name)
else:
context.VARIABLES[procedure_name] = 'procedure'
context.PROCEDURES[procedure_name] = procedure
self.token >> 'LPARENT'
self.parameter_declaration_list(procedure)
self.token >> 'RPARENT'
self.token.safe() >> 'BEGIN'
self.statement_list(procedure)
self.token.safe() >> 'END'
self.token.safe() >> 'SEMICOLON'
@log(tree_logger)
def parameter_declaration_list(self, procedure: context.Procedure):
# <parameter_declaration> (COMMA <parameter_declaration>)*
self.parameter_declaration(procedure)
while self.token == 'COMMA':
self.token.next() # self.token >> 'COMMA'
self.parameter_declaration(procedure)
@log(tree_logger)
def parameter_declaration(self, procedure: context.Procedure):
# pid TYPE IN
# pid TYPE OUT
parameter_name = self.token >> 'PARAMETER'
parameter_type = self.parse_type()
if self.token == 'IN':
self.token.next() # self.token >> 'IN'
parameter = context.InputParameter(parameter_name, parameter_type)
elif self.token == 'OUT':
self.token.next() # self.token >> 'OUT'
parameter = context.OutputParameter(parameter_name, parameter_type)
else:
context.parameter_logger.error('%s Not found parameter type (`in` or `out`)', procedure)
parameter = context.Parameter(parameter_name, parameter_type)
procedure.add_parameter(parameter)
@log(tree_logger)
def statement_list(self, procedure: context.Procedure):
# <statement>+
self.statement(procedure)
while self.token == 'INSERT' or self.token == 'SELECT':
self.statement(procedure)
@log(tree_logger)
def statement(self, procedure: context.Procedure):
# <insert_statement>
# <select_statement>
if self.token == 'INSERT':
if procedure.mode is None:
procedure.set_mode_to_write()
self.insert_statement(procedure)
elif self.token == 'SELECT':
if procedure.mode is None:
procedure.set_mode_to_read()
self.select_statement(procedure)
else:
raise context.UnreachableException()
@log(tree_logger)
def insert_statement(self, procedure: context.Procedure):
# INSERT TABLE id VALUES LPARENT <argument_list> RPARENT SEMICOLON
self.token.next() # self.token >> 'INSERT'
self.token >> 'TABLE'
table_name = self.token >> 'IDENTIFIER'
table = context.TABLES.get(table_name)
# Check exists table
if not table:
context.statement_insert_logger.error('%s Table "%s" not found', procedure, table_name)
insert_statement = context.InsertStatement(procedure, table)
self.token >> 'VALUES'
self.token >> 'LPARENT'
self.argument_list(procedure, insert_statement)
# Check num columns and num arguments
if table and len(table.columns) != len(insert_statement.arguments):
context.statement_insert_logger.error('%s Incorrect number of arguments', procedure)
self.token >> 'RPARENT'
self.token >> 'SEMICOLON'
procedure.add_statement(insert_statement)
@log(tree_logger)
def argument_list(self, procedure: context.Procedure, insert_statement: context.InsertStatement):
# <argument> (COMMA <argument>)*
self.argument(procedure, insert_statement)
while self.token == 'COMMA':
self.token.next() # self.token >> 'COMMA'
self.argument(procedure, insert_statement)
@log(tree_logger)
def argument(self, procedure: context.Procedure, insert_statement: context.InsertStatement):
# pid
# CURRVAL LPARENT id RPARENT
# NEXTVAL LPARENT id RPARENT
if self.token == 'CURRVAL':
self.token.next() # self.token >> 'CURRVAL'
self.token >> 'LPARENT'
sequence_name = self.token >> 'IDENTIFIER'
sequence = context.SEQUENCES.get(sequence_name)
if not sequence:
context.statement_insert_logger.error('%s Sequence "%s" not found', procedure, sequence_name)
argument = context.ArgumentSequenceCurrent(sequence_name, sequence)
self.token >> 'RPARENT'
insert_statement.arguments.append(argument)
elif self.token == 'NEXTVAL':
self.token.next() # self.token >> 'NEXTVAL'
self.token >> 'LPARENT'
sequence_name = self.token >> 'IDENTIFIER'
sequence = context.SEQUENCES.get(sequence_name)
if not sequence:
context.statement_insert_logger.error('%s Sequence "%s" not found', procedure, sequence_name)
argument = context.ArgumentSequenceNext(sequence_name, sequence)
self.token >> 'RPARENT'
insert_statement.arguments.append(argument)
elif self.token == 'PARAMETER':
parameter_name = self.token >> 'PARAMETER'
parameter = procedure.parameters.get(parameter_name)
if not parameter:
context.statement_insert_logger.error('%s Parameter "%s" not found in procedure parameters',
procedure,
parameter_name)
elif not isinstance(parameter, context.InputParameter):
context.statement_insert_logger.error('%s The parameter "%s" must be input', procedure, parameter_name)
argument = context.ArgumentParameter(parameter_name, parameter)
insert_statement.arguments.append(argument)
else:
logger.error('Expected `currval` or `nextval` or <parameter>')
@log(tree_logger)
def select_statement(self, procedure: context.Procedure):
# SELECT <selection_list> FROM <table_list> WHERE <condition_list> SEMICOLON
self.token.next() # self.token >> 'SELECT'
select_statement = context.SelectStatement(procedure)
self.selection_list(procedure, select_statement)
self.token >> 'FROM'
self.table_list(procedure, select_statement)
select_statement.check_selections()
self.token >> 'WHERE'
raw_condition_tree = self.condition_list(procedure, select_statement)
_, _, condition_tree = self.parse_tree_node(raw_condition_tree)
select_statement.condition_tree = condition_tree
self.token.safe() >> 'SEMICOLON'
procedure.add_statement(select_statement)
@staticmethod
def parse_tree_node(tree):
if isinstance(tree, (tuple, list)):
lop, *children = tree # lop = logic operator
if lop in ('AND', 'OR'):
new_children = [
el
for child in children
for child_lop, child_children, new_child in [Parser.parse_tree_node(child)]
for children in [child_children if child_lop == lop else [new_child]]
for el in children
]
node = (context.ConditionTreeNodeOr(new_children)
if lop == 'OR' else
context.ConditionTreeNodeAnd(new_children))
elif lop == 'NOT':
assert len(children) == 1
_, _, node = Parser.parse_tree_node(*children)
node.set_not()
new_children = [node]
else:
raise context.UnreachableException()
return lop, new_children, node
else:
return None, [tree], tree
@log(tree_logger)
def selection_list(self, procedure: context.Procedure, select_statement: context.SelectStatement):
# <selection> (COMMA <selection>)*
self.selection(procedure, select_statement)
while self.token == 'COMMA':
self.token.next() # self.token >> 'COMMA'
self.selection(procedure, select_statement)
@log(tree_logger)
def selection(self, procedure: context.Procedure, select_statement: context.SelectStatement):
# id SET pid
column_name = self.token >> 'IDENTIFIER'
self.token >> 'SET'
parameter_name = self.token >> 'PARAMETER'
parameter = procedure.parameters.get(parameter_name)
if not parameter:
context.statement_select_logger.error('%s Parameter %s not found in procedure parameters',
procedure,
parameter_name)
elif not isinstance(parameter, context.OutputParameter):
context.statement_select_logger.error('%s The parameter %s must be output',
procedure,
parameter_name)
select_statement.raw_selections.append((column_name, parameter))
@log(tree_logger)
def table_list(self, procedure: context.Procedure, select_statement: context.SelectStatement):
# id (JOIN id)*
table_name = self.token >> 'IDENTIFIER'
table = context.TABLES.get(table_name)
if not table:
context.statement_select_logger.error('%s Table %s not found', procedure, table_name)
else:
select_statement.add_table(table)
while self.token == 'JOIN':
self.token.next() # self.token >> 'JOIN'
table_name = self.token >> 'IDENTIFIER'
table = context.TABLES.get(table_name)
if not table:
context.statement_select_logger.error('%s Table %s not found', procedure, table_name)
else:
select_statement.add_table(table)
@log(tree_logger)
def condition_list(self, procedure: context.Procedure, select_statement: context.Statement):
# <condition_simple> (OR|AND <condition_simple>)*
cond = self.condition_simple(procedure, select_statement)
interim_out = [[cond]]
while True:
if self.token == 'AND':
self.token.next() # self.token >> 'AND'
interim_out[-1].append(self.condition_simple(procedure, select_statement))
elif self.token == 'OR':
self.token.next() # self.token >> 'OR'
interim_out.append([self.condition_simple(procedure, select_statement)])
else:
break
out = (
['OR', *map(lambda x: x[0] if len(x) == 1 else ('AND', *x), interim_out)]
if len(interim_out) > 1 else
['AND', *interim_out[0]]
if len(interim_out[0]) > 1 else
interim_out[0][0]
)
return out
@log(tree_logger)
def condition_simple(self, procedure: context.Procedure, select_statement: context.Statement):
# LPARENT <condition_list> RPARENT
# NOT <condition_simple>
# id <op> id
# id <op> pid
if self.token == 'LPARENT':
self.token.next() # self.token >> 'LPARENT'
cond = self.condition_list(procedure, select_statement)
self.token >> 'RPARENT'
elif self.token == 'NOT':
self.token.next() # self.token >> 'NOT'
cond = ['NOT', self.condition_simple(procedure, select_statement)]
else:
left = self.token >> 'IDENTIFIER'
left_column: Optional[context.Column] = select_statement.find_column(left)
op = self.operator()
if self.token == 'IDENTIFIER':
right = self.token >> 'IDENTIFIER'
right_column: Optional[context.Column] = select_statement.find_column(right)
if left_column and right_column and left_column.kind != right_column.kind:
context.condition_logger.error(
'%s Incompatible types column %s and column %s',
procedure,
left_column.name, right_column.name
)
cond = context.ConditionWithOnlyColumns(left, right, op, left_column, right_column)
elif self.token == 'PARAMETER':
right = self.token >> 'PARAMETER'
right_parameter: Optional[context.Parameter] = procedure.parameters.get(right)
if not right_parameter:
context.condition_logger.error('%s Parameter %s not found in procedure parameters',
procedure,
right)
elif not isinstance(right_parameter, context.InputParameter):
context.condition_logger.error('The parameter %s must be input', right)
if right_parameter and left_column and left_column.kind != right_parameter.kind:
context.condition_logger.error(
'%s Incompatible types parameter %s and column %s',
procedure,
left_column.name, right_parameter.name
)
cond = context.ConditionWithParameter(left, right, op, left_column, right_parameter)
else:
self.token >> ('IDENTIFIER', 'PARAMETER')
raise context.UnreachableException()
select_statement.add_condition_to_table(cond)
return cond
@log(tree_logger)
def operator(self):
# EQ
# LESS
# MORE
# NOT_EQ
# LESS_OR_EQ
# MORE_OR_EQ
return self.token >> ('EQ', 'LESS', 'MORE', 'NOT_EQ', 'LESS_OR_EQ', 'MORE_OR_EQ')
@log(tree_logger)
def sequence_declaration(self):
# CREATE SEQUENCE id SEMICOLON
self.token >> 'SEQUENCE'
sequence_name = self.token >> 'IDENTIFIER'
sequence = context.Sequence(sequence_name)
check_name = context.VARIABLES.get(sequence_name)
if check_name:
context.sequence_logger.error('Sequence name `%s` is already used for the %s', sequence_name, check_name)
else:
context.SEQUENCES[sequence_name] = sequence
context.VARIABLES[sequence_name] = 'sequence'
self.token >> 'SEMICOLON'
|
|
"""
ltisys -- a collection of classes and functions for modeling linear
time invariant systems.
"""
from __future__ import division, print_function, absolute_import
#
# Author: Travis Oliphant 2001
#
# Feb 2010: Warren Weckesser
# Rewrote lsim2 and added impulse2.
# Aug 2013: Juan Luis Cano
# Rewrote abcd_normalize.
#
from .filter_design import tf2zpk, zpk2tf, normalize, freqs
import numpy
from numpy import product, zeros, array, dot, transpose, ones, \
nan_to_num, zeros_like, linspace
import scipy.interpolate as interpolate
import scipy.integrate as integrate
import scipy.linalg as linalg
from scipy.lib.six import xrange
from numpy import r_, eye, real, atleast_1d, atleast_2d, poly, \
squeeze, diag, asarray
__all__ = ['tf2ss', 'ss2tf', 'abcd_normalize', 'zpk2ss', 'ss2zpk', 'lti',
'lsim', 'lsim2', 'impulse', 'impulse2', 'step', 'step2', 'bode',
'freqresp']
def tf2ss(num, den):
"""Transfer function to state-space representation.
Parameters
----------
num, den : array_like
Sequences representing the numerator and denominator polynomials.
The denominator needs to be at least as long as the numerator.
Returns
-------
A, B, C, D : ndarray
State space representation of the system.
"""
# Controller canonical state-space representation.
# if M+1 = len(num) and K+1 = len(den) then we must have M <= K
# states are found by asserting that X(s) = U(s) / D(s)
# then Y(s) = N(s) * X(s)
#
# A, B, C, and D follow quite naturally.
#
num, den = normalize(num, den) # Strips zeros, checks arrays
nn = len(num.shape)
if nn == 1:
num = asarray([num], num.dtype)
M = num.shape[1]
K = len(den)
if M > K:
msg = "Improper transfer function. `num` is longer than `den`."
raise ValueError(msg)
if M == 0 or K == 0: # Null system
return array([], float), array([], float), array([], float), \
array([], float)
# pad numerator to have same number of columns has denominator
num = r_['-1', zeros((num.shape[0], K - M), num.dtype), num]
if num.shape[-1] > 0:
D = num[:, 0]
else:
D = array([], float)
if K == 1:
return array([], float), array([], float), array([], float), D
frow = -array([den[1:]])
A = r_[frow, eye(K - 2, K - 1)]
B = eye(K - 1, 1)
C = num[:, 1:] - num[:, 0] * den[1:]
return A, B, C, D
def _none_to_empty_2d(arg):
if arg is None:
return zeros((0, 0))
else:
return arg
def _atleast_2d_or_none(arg):
if arg is not None:
return atleast_2d(arg)
def _shape_or_none(M):
if M is not None:
return M.shape
else:
return (None,) * 2
def _choice_not_none(*args):
for arg in args:
if arg is not None:
return arg
def _restore(M, shape):
if M.shape == (0, 0):
return zeros(shape)
else:
if M.shape != shape:
raise ValueError("The input arrays have incompatible shapes.")
return M
def abcd_normalize(A=None, B=None, C=None, D=None):
"""Check state-space matrices and ensure they are rank-2.
If enough information on the system is provided, that is, enough
properly-shaped arrays are passed to the function, the missing ones
are built from this information, ensuring the correct number of
rows and columns. Otherwise a ValueError is raised.
Parameters
----------
A, B, C, D : array_like, optional
State-space matrices. All of them are None (missing) by default.
Returns
-------
A, B, C, D : array
Properly shaped state-space matrices.
Raises
------
ValueError
If not enough information on the system was provided.
"""
A, B, C, D = map(_atleast_2d_or_none, (A, B, C, D))
MA, NA = _shape_or_none(A)
MB, NB = _shape_or_none(B)
MC, NC = _shape_or_none(C)
MD, ND = _shape_or_none(D)
p = _choice_not_none(MA, MB, NC)
q = _choice_not_none(NB, ND)
r = _choice_not_none(MC, MD)
if p is None or q is None or r is None:
raise ValueError("Not enough information on the system.")
A, B, C, D = map(_none_to_empty_2d, (A, B, C, D))
A = _restore(A, (p, p))
B = _restore(B, (p, q))
C = _restore(C, (r, p))
D = _restore(D, (r, q))
return A, B, C, D
def ss2tf(A, B, C, D, input=0):
"""State-space to transfer function.
Parameters
----------
A, B, C, D : ndarray
State-space representation of linear system.
input : int, optional
For multiple-input systems, the input to use.
Returns
-------
num : 2-D ndarray
Numerator(s) of the resulting transfer function(s). `num` has one row
for each of the system's outputs. Each row is a sequence representation
of the numerator polynomial.
den : 1-D ndarray
Denominator of the resulting transfer function(s). `den` is a sequence
representation of the denominator polynomial.
"""
# transfer function is C (sI - A)**(-1) B + D
A, B, C, D = map(asarray, (A, B, C, D))
# Check consistency and make them all rank-2 arrays
A, B, C, D = abcd_normalize(A, B, C, D)
nout, nin = D.shape
if input >= nin:
raise ValueError("System does not have the input specified.")
# make MOSI from possibly MOMI system.
if B.shape[-1] != 0:
B = B[:, input]
B.shape = (B.shape[0], 1)
if D.shape[-1] != 0:
D = D[:, input]
try:
den = poly(A)
except ValueError:
den = 1
if (product(B.shape, axis=0) == 0) and (product(C.shape, axis=0) == 0):
num = numpy.ravel(D)
if (product(D.shape, axis=0) == 0) and (product(A.shape, axis=0) == 0):
den = []
return num, den
num_states = A.shape[0]
type_test = A[:, 0] + B[:, 0] + C[0, :] + D
num = numpy.zeros((nout, num_states + 1), type_test.dtype)
for k in range(nout):
Ck = atleast_2d(C[k, :])
num[k] = poly(A - dot(B, Ck)) + (D[k] - 1) * den
return num, den
def zpk2ss(z, p, k):
"""Zero-pole-gain representation to state-space representation
Parameters
----------
z, p : sequence
Zeros and poles.
k : float
System gain.
Returns
-------
A, B, C, D : ndarray
State-space matrices.
"""
return tf2ss(*zpk2tf(z, p, k))
def ss2zpk(A, B, C, D, input=0):
"""State-space representation to zero-pole-gain representation.
Parameters
----------
A, B, C, D : ndarray
State-space representation of linear system.
input : int, optional
For multiple-input systems, the input to use.
Returns
-------
z, p : sequence
Zeros and poles.
k : float
System gain.
"""
return tf2zpk(*ss2tf(A, B, C, D, input=input))
class lti(object):
"""Linear Time Invariant class which simplifies representation.
Parameters
----------
args : arguments
The `lti` class can be instantiated with either 2, 3 or 4 arguments.
The following gives the number of elements in the tuple and the
interpretation:
* 2: (numerator, denominator)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
Each argument can be an array or sequence.
Notes
-----
`lti` instances have all types of representations available; for example
after creating an instance s with ``(zeros, poles, gain)`` the transfer
function representation (numerator, denominator) can be accessed as
``s.num`` and ``s.den``.
"""
def __init__(self, *args, **kwords):
"""
Initialize the LTI system using either:
- (numerator, denominator)
- (zeros, poles, gain)
- (A, B, C, D) : state-space.
"""
N = len(args)
if N == 2: # Numerator denominator transfer function input
self._num, self._den = normalize(*args)
self._update(N)
self.inputs = 1
if len(self.num.shape) > 1:
self.outputs = self.num.shape[0]
else:
self.outputs = 1
elif N == 3: # Zero-pole-gain form
self._zeros, self._poles, self._gain = args
self._update(N)
# make sure we have numpy arrays
self.zeros = numpy.asarray(self.zeros)
self.poles = numpy.asarray(self.poles)
self.inputs = 1
if len(self.zeros.shape) > 1:
self.outputs = self.zeros.shape[0]
else:
self.outputs = 1
elif N == 4: # State-space form
self._A, self._B, self._C, self._D = abcd_normalize(*args)
self._update(N)
self.inputs = self.B.shape[-1]
self.outputs = self.C.shape[0]
else:
raise ValueError("Needs 2, 3, or 4 arguments.")
def __repr__(self):
"""
Canonical representation using state-space to preserve numerical
precision and any MIMO information
"""
return '{0}(\n{1},\n{2},\n{3},\n{4}\n)'.format(
self.__class__.__name__,
repr(self.A),
repr(self.B),
repr(self.C),
repr(self.D),
)
@property
def num(self):
return self._num
@num.setter
def num(self, value):
self._num = value
self._update(2)
@property
def den(self):
return self._den
@den.setter
def den(self, value):
self._den = value
self._update(2)
@property
def zeros(self):
return self._zeros
@zeros.setter
def zeros(self, value):
self._zeros = value
self._update(3)
@property
def poles(self):
return self._poles
@poles.setter
def poles(self, value):
self._poles = value
self._update(3)
@property
def gain(self):
return self._gain
@gain.setter
def gain(self, value):
self._gain = value
self._update(3)
@property
def A(self):
return self._A
@A.setter
def A(self, value):
self._A = value
self._update(4)
@property
def B(self):
return self._B
@B.setter
def B(self, value):
self._B = value
self._update(4)
@property
def C(self):
return self._C
@C.setter
def C(self, value):
self._C = value
self._update(4)
@property
def D(self):
return self._D
@D.setter
def D(self, value):
self._D = value
self._update(4)
def _update(self, N):
if N == 2:
self._zeros, self._poles, self._gain = tf2zpk(self.num, self.den)
self._A, self._B, self._C, self._D = tf2ss(self.num, self.den)
if N == 3:
self._num, self._den = zpk2tf(self.zeros, self.poles, self.gain)
self._A, self._B, self._C, self._D = zpk2ss(self.zeros,
self.poles, self.gain)
if N == 4:
self._num, self._den = ss2tf(self.A, self.B, self.C, self.D)
self._zeros, self._poles, self._gain = ss2zpk(self.A, self.B,
self.C, self.D)
def impulse(self, X0=None, T=None, N=None):
return impulse(self, X0=X0, T=T, N=N)
def step(self, X0=None, T=None, N=None):
return step(self, X0=X0, T=T, N=N)
def output(self, U, T, X0=None):
return lsim(self, U, T, X0=X0)
def bode(self, w=None, n=100):
"""
Calculate Bode magnitude and phase data.
Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude
[dB] and phase [deg]. See scipy.signal.bode for details.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> s1 = signal.lti([1], [1, 1])
>>> w, mag, phase = s1.bode()
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
return bode(self, w=w, n=n)
def freqresp(self, w=None, n=10000):
"""Calculate the frequency response of a continuous-time system.
Returns a 2-tuple containing arrays of frequencies [rad/s] and
complex magnitude.
See scipy.signal.freqresp for details.
"""
return freqresp(self, w=w, n=n)
def lsim2(system, U=None, T=None, X0=None, **kwargs):
"""
Simulate output of a continuous-time linear system, by using
the ODE solver `scipy.integrate.odeint`.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like (1D or 2D), optional
An input array describing the input at each time T. Linear
interpolation is used between given times. If there are
multiple inputs, then each column of the rank-2 array
represents an input. If U is not given, the input is assumed
to be zero.
T : array_like (1D or 2D), optional
The time steps at which the input is defined and at which the
output is desired. The default is 101 evenly spaced points on
the interval [0,10.0].
X0 : array_like (1D), optional
The initial condition of the state vector. If `X0` is not
given, the initial conditions are assumed to be 0.
kwargs : dict
Additional keyword arguments are passed on to the function
`odeint`. See the notes below for more details.
Returns
-------
T : 1D ndarray
The time values for the output.
yout : ndarray
The response of the system.
xout : ndarray
The time-evolution of the state-vector.
Notes
-----
This function uses `scipy.integrate.odeint` to solve the
system's differential equations. Additional keyword arguments
given to `lsim2` are passed on to `odeint`. See the documentation
for `scipy.integrate.odeint` for the full list of arguments.
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if X0 is None:
X0 = zeros(sys.B.shape[0], sys.A.dtype)
if T is None:
# XXX T should really be a required argument, but U was
# changed from a required positional argument to a keyword,
# and T is after U in the argument list. So we either: change
# the API and move T in front of U; check here for T being
# None and raise an exception; or assign a default value to T
# here. This code implements the latter.
T = linspace(0, 10.0, 101)
T = atleast_1d(T)
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
if U is not None:
U = atleast_1d(U)
if len(U.shape) == 1:
U = U.reshape(-1, 1)
sU = U.shape
if sU[0] != len(T):
raise ValueError("U must have the same number of rows "
"as elements in T.")
if sU[1] != sys.inputs:
raise ValueError("The number of inputs in U (%d) is not "
"compatible with the number of system "
"inputs (%d)" % (sU[1], sys.inputs))
# Create a callable that uses linear interpolation to
# calculate the input at any time.
ufunc = interpolate.interp1d(T, U, kind='linear',
axis=0, bounds_error=False)
def fprime(x, t, sys, ufunc):
"""The vector field of the linear system."""
return dot(sys.A, x) + squeeze(dot(sys.B, nan_to_num(ufunc([t]))))
xout = integrate.odeint(fprime, X0, T, args=(sys, ufunc), **kwargs)
yout = dot(sys.C, transpose(xout)) + dot(sys.D, transpose(U))
else:
def fprime(x, t, sys):
"""The vector field of the linear system."""
return dot(sys.A, x)
xout = integrate.odeint(fprime, X0, T, args=(sys,), **kwargs)
yout = dot(sys.C, transpose(xout))
return T, squeeze(transpose(yout)), xout
def _cast_to_array_dtype(in1, in2):
"""Cast array to dtype of other array, while avoiding ComplexWarning.
Those can be raised when casting complex to real.
"""
if numpy.issubdtype(in2.dtype, numpy.float):
# dtype to cast to is not complex, so use .real
in1 = in1.real.astype(in2.dtype)
else:
in1 = in1.astype(in2.dtype)
return in1
def lsim(system, U, T, X0=None, interp=1):
"""
Simulate output of a continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like
An input array describing the input at each time `T`
(interpolation is assumed between given times). If there are
multiple inputs, then each column of the rank-2 array
represents an input.
T : array_like
The time steps at which the input is defined and at which the
output is desired.
X0 :
The initial conditions on the state vector (zero by default).
interp : {1, 0}
Whether to use linear (1) or zero-order hold (0) interpolation.
Returns
-------
T : 1D ndarray
Time values for the output.
yout : 1D ndarray
System response.
xout : ndarray
Time-evolution of the state-vector.
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
U = atleast_1d(U)
T = atleast_1d(T)
if len(U.shape) == 1:
U = U.reshape((U.shape[0], 1))
sU = U.shape
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
if sU[0] != len(T):
raise ValueError("U must have the same number of rows "
"as elements in T.")
if sU[1] != sys.inputs:
raise ValueError("System does not define that many inputs.")
if X0 is None:
X0 = zeros(sys.B.shape[0], sys.A.dtype)
xout = zeros((len(T), sys.B.shape[0]), sys.A.dtype)
xout[0] = X0
A = sys.A
AT, BT = transpose(sys.A), transpose(sys.B)
dt = T[1] - T[0]
lam, v = linalg.eig(A)
vt = transpose(v)
vti = linalg.inv(vt)
GT = dot(dot(vti, diag(numpy.exp(dt * lam))), vt)
GT = _cast_to_array_dtype(GT, xout)
ATm1 = linalg.inv(AT)
ATm2 = dot(ATm1, ATm1)
I = eye(A.shape[0], dtype=A.dtype)
GTmI = GT - I
F1T = dot(dot(BT, GTmI), ATm1)
if interp:
F2T = dot(BT, dot(GTmI, ATm2) / dt - ATm1)
for k in xrange(1, len(T)):
dt1 = T[k] - T[k - 1]
if dt1 != dt:
dt = dt1
GT = dot(dot(vti, diag(numpy.exp(dt * lam))), vt)
GT = _cast_to_array_dtype(GT, xout)
GTmI = GT - I
F1T = dot(dot(BT, GTmI), ATm1)
if interp:
F2T = dot(BT, dot(GTmI, ATm2) / dt - ATm1)
xout[k] = dot(xout[k - 1], GT) + dot(U[k - 1], F1T)
if interp:
xout[k] = xout[k] + dot((U[k] - U[k - 1]), F2T)
yout = (squeeze(dot(U, transpose(sys.D))) +
squeeze(dot(xout, transpose(sys.C))))
return T, squeeze(yout), squeeze(xout)
def _default_response_times(A, n):
"""Compute a reasonable set of time samples for the response time.
This function is used by `impulse`, `impulse2`, `step` and `step2`
to compute the response time when the `T` argument to the function
is None.
Parameters
----------
A : ndarray
The system matrix, which is square.
n : int
The number of time samples to generate.
Returns
-------
t : ndarray
The 1-D array of length `n` of time samples at which the response
is to be computed.
"""
# Create a reasonable time interval.
# TODO: This could use some more work.
# For example, what is expected when the system is unstable?
vals = linalg.eigvals(A)
r = min(abs(real(vals)))
if r == 0.0:
r = 1.0
tc = 1.0 / r
t = linspace(0.0, 7 * tc, n)
return t
def impulse(system, X0=None, T=None, N=None):
"""Impulse response of continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector. Defaults to zero.
T : array_like, optional
Time points. Computed if not given.
N : int, optional
The number of time points to compute (if `T` is not given).
Returns
-------
T : ndarray
A 1-D array of time points.
yout : ndarray
A 1-D array containing the impulse response of the system (except for
singularities at zero).
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if X0 is None:
B = sys.B
else:
B = sys.B + X0
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
h = zeros(T.shape, sys.A.dtype)
s, v = linalg.eig(sys.A)
vi = linalg.inv(v)
C = sys.C
for k in range(len(h)):
es = diag(numpy.exp(s * T[k]))
eA = dot(dot(v, es), vi)
eA = _cast_to_array_dtype(eA, h)
h[k] = squeeze(dot(dot(C, eA), B))
return T, h
def impulse2(system, X0=None, T=None, N=None, **kwargs):
"""
Impulse response of a single-input, continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : 1-D array_like, optional
The initial condition of the state vector. Default: 0 (the
zero vector).
T : 1-D array_like, optional
The time steps at which the input is defined and at which the
output is desired. If `T` is not given, the function will
generate a set of time samples automatically.
N : int, optional
Number of time points to compute. Default: 100.
kwargs : various types
Additional keyword arguments are passed on to the function
`scipy.signal.lsim2`, which in turn passes them on to
`scipy.integrate.odeint`; see the latter's documentation for
information about these arguments.
Returns
-------
T : ndarray
The time values for the output.
yout : ndarray
The output response of the system.
See Also
--------
impulse, lsim2, integrate.odeint
Notes
-----
The solution is generated by calling `scipy.signal.lsim2`, which uses
the differential equation solver `scipy.integrate.odeint`.
.. versionadded:: 0.8.0
Examples
--------
Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = u(t)
>>> from scipy import signal
>>> system = ([1.0], [1.0, 2.0, 1.0])
>>> t, y = signal.impulse2(system)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, y)
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
B = sys.B
if B.shape[-1] != 1:
raise ValueError("impulse2() requires a single-input system.")
B = B.squeeze()
if X0 is None:
X0 = zeros_like(B)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
# Move the impulse in the input to the initial conditions, and then
# solve using lsim2().
ic = B + X0
Tr, Yr, Xr = lsim2(sys, T=T, X0=ic, **kwargs)
return Tr, Yr
def step(system, X0=None, T=None, N=None):
"""Step response of continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int
Number of time points to compute if `T` is not given.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step2
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
U = ones(T.shape, sys.A.dtype)
vals = lsim(sys, U, T, X0=X0)
return vals[0], vals[1]
def step2(system, X0=None, T=None, N=None, **kwargs):
"""Step response of continuous-time system.
This function is functionally the same as `scipy.signal.step`, but
it uses the function `scipy.signal.lsim2` to compute the step
response.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int
Number of time points to compute if `T` is not given.
kwargs : various types
Additional keyword arguments are passed on the function
`scipy.signal.lsim2`, which in turn passes them on to
`scipy.integrate.odeint`. See the documentation for
`scipy.integrate.odeint` for information about these arguments.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step
Notes
-----
.. versionadded:: 0.8.0
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
U = ones(T.shape, sys.A.dtype)
vals = lsim2(sys, U, T, X0=X0, **kwargs)
return vals[0], vals[1]
def bode(system, w=None, n=100):
"""
Calculate Bode magnitude and phase data of a continuous-time system.
.. versionadded:: 0.11.0
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
w : array_like, optional
Array of frequencies (in rad/s). Magnitude and phase data is calculated
for every value in this array. If not given a reasonable set will be
calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
Returns
-------
w : 1D ndarray
Frequency array [rad/s]
mag : 1D ndarray
Magnitude array [dB]
phase : 1D ndarray
Phase array [deg]
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> s1 = signal.lti([1], [1, 1])
>>> w, mag, phase = signal.bode(s1)
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
w, y = freqresp(system, w=w, n=n)
mag = 20.0 * numpy.log10(abs(y))
phase = numpy.unwrap(numpy.arctan2(y.imag, y.real)) * 180.0 / numpy.pi
return w, mag, phase
def freqresp(system, w=None, n=10000):
"""Calculate the frequency response of a continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
w : array_like, optional
Array of frequencies (in rad/s). Magnitude and phase data is
calculated for every value in this array. If not given a reasonable
set will be calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
Returns
-------
w : 1D ndarray
Frequency array [rad/s]
H : 1D ndarray
Array of complex magnitude values
Examples
--------
# Generating the Nyquist plot of a transfer function
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> s1 = signal.lti([], [1, 1, 1], [5])
# transfer function: H(s) = 5 / (s-1)^3
>>> w, H = signal.freqresp(s1)
>>> plt.figure()
>>> plt.plot(H.real, H.imag, "b")
>>> plt.plot(H.real, -H.imag, "r")
>>> plt.show()
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if sys.inputs != 1 or sys.outputs != 1:
raise ValueError("freqresp() requires a SISO (single input, single "
"output) system.")
if w is not None:
worN = w
else:
worN = n
# In the call to freqs(), sys.num.ravel() is used because there are
# cases where sys.num is a 2-D array with a single row.
w, h = freqs(sys.num.ravel(), sys.den, worN=worN)
return w, h
|
|
#!/usr/bin/env python
"""
m2g.stats.qa_reg.py
~~~~~~~~~~~~~~~~~~~~~~
The qa for registration step in the pipeline.
In this module, m2g compares the registered and reference brain data to show the quality of each registration by plotting 9 overlay brain slices.
"""
import warnings
warnings.simplefilter("ignore")
import os
import re
import sys
import numpy as np
import nibabel as nb
from m2g.utils.gen_utils import get_braindata, get_filename
from m2g.utils.qa_utils import get_min_max, opaque_colorscale, pad_im
from argparse import ArgumentParser
from scipy import ndimage
from matplotlib.colors import LinearSegmentedColormap
import matplotlib as mpl
mpl.use("Agg") # very important above pyplot import
from nilearn.plotting.edge_detect import _edge_map as edge_map
import matplotlib.pyplot as plt
def reg_mri_pngs(
mri, atlas, outdir, loc=0, mean=False, minthr=2, maxthr=95, edge=False
):
"""
A function to create and save registered brain slice figures.
Parameter
---------
mri: nifti file
the registered brain file generated in each registration step.
atlas: nifti file
the reference brain file used in each registration step.
outdir: str
directory where output png file is saved.
loc: int
which dimension of the 4d brain data to use
mean: bool
whether to calculate the mean of the 4d brain data
If False, the loc=0 dimension of the data (mri_data[:, :, :, loc]) is used
minthr: int
lower percentile threshold
maxthr: int
upper percentile threshold
"""
atlas_data = nb.load(atlas).get_data()
mri_data = nb.load(mri).get_data()
if mri_data.ndim == 4: # 4d data, so we need to reduce a dimension
if mean:
mr_data = mri_data.mean(axis=3)
else:
mr_data = mri_data[:, :, :, loc]
else: # dim=3
mr_data = mri_data
cmap1 = LinearSegmentedColormap.from_list("mycmap1", ["white", "magenta"])
cmap2 = LinearSegmentedColormap.from_list("mycmap2", ["white", "green"])
fig = plot_overlays(atlas_data, mr_data, [cmap1, cmap2], minthr, maxthr, edge)
# name and save the file
fig.savefig(outdir + "/" + get_filename(mri) + "_2_" + get_filename(atlas) + ".png", format="png")
plt.close()
def plot_brain(brain, minthr=2, maxthr=95, edge=False):
"""
A function to plot a brain.
Parameter
---------
brain: str, nifti image, numpy.ndarray
an object to open the data for a registered brain. Can be a string (path to a brain file),
nibabel.nifti1.nifti1image, or a numpy.ndarray.
minthr: int
lower percentile threshold
maxthr: int
upper percentile threshold
edge: bool
whether to use normalized luminance data
If None, the respective min and max of the color array is used.
Returns
---------
fbr: matplotlib.figure.Figure
"""
brain = get_braindata(brain)
cmap = LinearSegmentedColormap.from_list("mycmap2", ["white", "green"])
plt.rcParams.update({"axes.labelsize": "x-large", "axes.titlesize": "x-large"})
fbr = plt.figure()
if brain.shape == (182, 218, 182):
x = [78, 90, 100]
y = [82, 107, 142]
z = [88, 103, 107]
else:
brain_volume = brain.shape
x = [int(brain_volume[0] * 0.35), int(brain_volume[0] * 0.51), int(brain_volume[0] * 0.65)]
y = [int(brain_volume[1] * 0.35), int(brain_volume[1] * 0.51), int(brain_volume[1] * 0.65)]
z = [int(brain_volume[2] * 0.35), int(brain_volume[2] * 0.51), int(brain_volume[2] * 0.65)]
coords = (x, y, z)
labs = [
"Sagittal Slice",
"Coronal Slice",
"Axial Slice",
]
var = ["X", "Y", "Z"]
# create subplot for first slice
# and customize all labels
idx = 0
min_val, max_val = get_min_max(brain, minthr, maxthr)
for i, coord in enumerate(coords):
for pos in coord:
idx += 1
ax = fbr.add_subplot(3, 3, idx)
ax.set_axis_bgcolor("white")
ax.set_title(var[i] + " = " + str(pos))
if i == 0:
image = ndimage.rotate(brain[pos, :, :], 90)
elif i == 1:
image = ndimage.rotate(brain[:, pos, :], 90)
else:
image = brain[:, :, pos]
if idx % 3 == 1:
ax.set_ylabel(labs[i])
ax.yaxis.set_ticks([0, image.shape[0] / 2, image.shape[0] - 1])
ax.xaxis.set_ticks([0, image.shape[1] / 2, image.shape[1] - 1])
if edge:
image = edge_map(image).data
ax.imshow(
image,
interpolation="none",
cmap=cmap,
alpha=1,
vmin=min_val,
vmax=max_val,
)
fbr.set_size_inches(12.5, 10.5, forward=True)
fbr.tight_layout()
return fbr
def plot_overlays(atlas, b0, cmaps=None, minthr=2, maxthr=95, edge=False):
"""
A function to plot the overlay figures of registered and reference brain slices.
Parameter
---------
atlas: str, nifti image, numpy.ndarray
an object to open the data for a registered brain. Can be a string (path to a brain file),
nibabel.nifti1.nifti1image, or a numpy.ndarray.
b0: str, nifti image, numpy.ndarray
an object to open the data for a reference brain. Can be a string (path to a brain file),
nibabel.nifti1.nifti1image, or a numpy.ndarray.
cmap: Colormap objects based on lookup tables using linear segments.
minthr: int
lower percentile threshold
maxthr: int
upper percentile threshold
edge: bool
whether to use normalized luminance data
If None, the respective min and max of the color array is used.
Returns
---------
foverlay: matplotlib.figure.Figure
"""
plt.rcParams.update({"axes.labelsize": "x-large", "axes.titlesize": "x-large"})
foverlay = plt.figure()
atlas = get_braindata(atlas)
b0 = get_braindata(b0)
if atlas.shape != b0.shape:
raise ValueError("Brains are not the same shape.")
if cmaps is None:
cmap1 = LinearSegmentedColormap.from_list("mycmap1", ["white", "magenta"])
cmap2 = LinearSegmentedColormap.from_list("mycmap2", ["white", "green"])
cmaps = [cmap1, cmap2]
brain_volume = b0.shape
if brain_volume == (182, 218, 182):
x = [78, 90, 100]
y = [82, 107, 142]
z = [88, 103, 107]
else:
x = [int(brain_volume[0] * 0.35), int(brain_volume[0] * 0.51), int(brain_volume[0] * 0.65)]
y = [int(brain_volume[1] * 0.35), int(brain_volume[1] * 0.51), int(brain_volume[1] * 0.65)]
z = [int(brain_volume[2] * 0.35), int(brain_volume[2] * 0.51), int(brain_volume[2] * 0.65)]
coords = (x, y, z)
atlas = pad_im(atlas, max(brain_volume[0:3]), 0, False)
b0 = pad_im(b0, max(brain_volume[0:3]), 0, False)
x = [int(max(brain_volume[0:3]) * 0.35), int(max(brain_volume[0:3]) * 0.51), int(max(brain_volume[0:3]) * 0.65)]
y = [int(max(brain_volume[0:3]) * 0.35), int(max(brain_volume[0:3]) * 0.51), int(max(brain_volume[0:3]) * 0.65)]
z = [int(max(brain_volume[0:3]) * 0.35), int(max(brain_volume[0:3]) * 0.51), int(max(brain_volume[0:3]) * 0.65)]
coords = (x, y, z)
labs = [
"Sagittal Slice",
"Coronal Slice",
"Axial Slice",
]
var = ["X", "Y", "Z"]
# create subplot for first slice
# and customize all labels
idx = 0
if edge:
min_val = 0
max_val = 1
else:
min_val, max_val = get_min_max(b0, minthr, maxthr)
for i, coord in enumerate(coords):
for pos in coord:
idx += 1
ax = foverlay.add_subplot(3, 3, idx)
ax.set_title(var[i] + " = " + str(pos))
if i == 0:
image = ndimage.rotate(b0[pos, :, :], 90)
atl = ndimage.rotate(atlas[pos, :, :], 90)
elif i == 1:
image = ndimage.rotate(b0[:, pos, :], 90)
atl = ndimage.rotate(atlas[:, pos, :], 90)
else:
image = b0[:, :, pos]
atl = atlas[:, :, pos]
if idx % 3 == 1:
ax.set_ylabel(labs[i])
ax.yaxis.set_ticks([0, image.shape[0] / 2, image.shape[0] - 1])
ax.xaxis.set_ticks([0, image.shape[1] / 2, image.shape[1] - 1])
if edge:
image = edge_map(image).data
image[image > 0] = max_val
image[image == 0] = min_val
#Set the axis invisible
plt.xticks([])
plt.yticks([])
#Set the frame invisible
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.imshow(atl, interpolation="none", cmap=cmaps[0], alpha=0.9)
ax.imshow(
opaque_colorscale(
cmaps[1], image, alpha=0.9, vmin=min_val, vmax=max_val
)
)
#set the legend
if idx == 3:
plt.plot(0,0,"-",c="pink",label='registered')
plt.plot(0,0,"-",c="green",label='reference')
plt.legend(loc='best',fontsize=12,bbox_to_anchor=(1.5,1.5))
#Set title for the whole picture
a,b,c = brain_volume
title = 'QA For Registration\nVolume:'+ str(a) +'*'+ str(b) + '*' + str(c)+'\n'
foverlay.suptitle(title,fontsize=24)
foverlay.set_size_inches(12.5, 10.5, forward=True)
foverlay.tight_layout()
return foverlay
|
|
'''
MathDoku solver module.
@author: Radian Baskoro
'''
import itertools
from datetime import datetime
from Utility import Utility
class Solver:
'''
Solver class used to solve the MathDoku problem.
'''
__debugLevel = 0
__iterationCount = 0
__initFlag = False
boardSize = 0
cages = None
def __init__(self, debugLevel=0):
'''
Constructor for Solver class.
Accepts debugLevel:
0 - No debug information
1 - Number of iterations and elapsed time only
2 - All information
'''
self.__debugLevel = debugLevel
def initializeFromFile(self, filePath):
'''
Initializes the problem from file.
'''
inputDataFile = open(filePath, 'r')
inputData = ''.join(inputDataFile.readlines())
inputDataFile.close()
self.initialize(inputData)
def initialize(self, inputData):
'''
Initializes the problem from string.
'''
lines = inputData.split('\n')
parts = lines[0].split()
self.boardSize = int(parts[0])
cageCount = int(parts[1])
self.cages = [None]*cageCount
try:
for i in range(self.boardSize+1, self.boardSize+cageCount+1):
parts = lines[i].split()
cage = Cage()
cage.op = parts[1]
cage.value = int(parts[2])
self.cages[int(parts[0])] = cage
for i in range(0, self.boardSize):
parts = lines[i+1].split()
for j in range(0, self.boardSize):
cageIndex = int(parts[j])
self.cages[cageIndex].cells.append([i,j])
except Exception:
raise InputError('Expecting board size of %d and %d cages.' % (self.boardSize, cageCount))
self.__validate()
self.__initFlag = True
def solve(self):
'''
Solves the initialized problem.
Returns the solution in a 2 dimensional list or None if no solution is found.
'''
if not self.__initFlag: raise SolverError('Solver not initialized')
startTime = datetime.now()
domain = [[range(1, self.boardSize+1) for y in range(0, self.boardSize)] for x in range(0, self.boardSize)]
conf = [[None]*self.boardSize for x in range(0, self.boardSize)]
for cage in self.cages:
# First pass - assign all single cell cages
if len(cage.cells) == 1:
x = cage.cells[0][0]
y = cage.cells[0][1]
self.__setValue(domain, conf, x, y, cage.value)
# Second pass - heuristics
# Cage using multiplication operation: domains must be a factor of the cage value
elif cage.op == Op.MULTIPLICATION:
for cell in cage.cells:
x = cell[0]
y = cell[1]
toRemove = set()
for d in domain[x][y]:
if cage.value % d != 0:
toRemove.add(d)
domain[x][y] = list(set(domain[x][y])-toRemove)
# Propagating constraints also removes all infeasible number combinations from each cage
self.__propagateConstraints(domain, conf)
# Third pass - greedy
tree = SolverNode(domain, conf)
if self.__solveTree(tree):
if self.__debugLevel >= 1:
elapsedTime = datetime.now() - startTime
print 'Solved in %d iterations.' % self.__iterationCount
print 'Elapsed time: %d seconds %d microseconds' % \
(elapsedTime.seconds, elapsedTime.microseconds)
return tree.conf
else:
if self.__debugLevel >= 1:
print 'No solution found.'
return None
def __solveTree(self, node):
'''
Recursively solve the problem tree.
'''
self.__iterationCount += 1
if self.__debugLevel >= 2:
print '----------------'
print 'Iteration %d' % self.__iterationCount
print '----------------'
print Utility.formatSolution(node.conf)
# Assign next value, and check constraints
if node.value != None:
self.__setValue(node.domain, node.conf, node.x, node.y, node.value)
if not ConstraintStore.checkConstraints(self.boardSize, self.cages, node.domain, node.conf, debugLevel=self.__debugLevel):
return False
# Done if all cells are assigned
unassignedCells = filter(lambda x: node.conf[x[0]][x[1]] == None, itertools.product(range(0, self.boardSize), range(0, self.boardSize)))
if len(unassignedCells) == 0:
if self.__debugLevel >= 2: print 'Solution found!'
return True
# Get next cell
unassignedCells = sorted(unassignedCells, key=lambda x: len(node.domain[x[0]][x[1]]))
cell = unassignedCells[0]
x = cell[0]
y = cell[1]
# Try each domain value
for value in node.domain[x][y]:
childNode = SolverNode(node.domain, node.conf, x, y, value)
if self.__solveTree(childNode):
node.domain = childNode.domain
node.conf = childNode.conf
return True
# No solution found in this subtree
return False
def __setValue(self, domain, conf, x, y, value):
'''
Sets the value at the given location and propagate constraints.
'''
conf[x][y] = value
domain[x][y] = [value]
if self.__debugLevel >= 2: print '(%d,%d) = %d' % (x, y, value)
self.__propagateConstraints(domain, conf, x=x, y=y, value=value)
def __propagateConstraints(self, domain, conf, x=None, y=None, value=None):
'''
Limits the domain values based on the known constraints.
'''
removeCount = 0
if x != None and y != None and value != None:
for i in range(0, self.boardSize):
if y != i:
# Propagate to row
d = domain[x][i]
if value in d:
removeCount += 1
d.remove(value)
if len(d) == 1:
self.__setValue(domain, conf, x, i, d[0])
if x != i:
# Propagate to column
d = domain[i][y]
if value in d:
removeCount += 1
d.remove(value)
if len(d) == 1:
self.__setValue(domain, conf, i, y, d[0])
# Propagate to all cages
for cage in self.cages:
cellCount = len(cage.cells)
if len(cage.cells) > 1:
# Try every possible combination of the domain of each cells
# and only keep feasible values
d = tuple(map(lambda cell: domain[cell[0]][cell[1]], cage.cells))
feasibleDomain = [set() for x in range(0, cellCount)]
comb = list(itertools.product(*d))
for c in comb:
cageCalcValue = cage.func(*c)
if float(cage.value) == cageCalcValue:
for i in range(0, cellCount):
feasibleDomain[i].add(c[i])
for i in range(0, cellCount):
cell = cage.cells[i]
x = cell[0]
y = cell[1]
newDomain = list(set(domain[x][y])&feasibleDomain[i])
removeCount += len(domain[x][y])-len(newDomain)
domain[x][y] = newDomain
if self.__debugLevel >= 2: print "%d infeasible values removed from the domain." % removeCount
def __validate(self):
'''
Initial validation of the problem.
'''
# All cages' cells must be attached to one another.
for i in range(0, len(self.cages)):
cage = self.cages[i]
if len(cage.cells) > 1:
valid = [False]*len(cage.cells)
for i in range(0, len(cage.cells)):
if not valid[i]:
c1 = cage.cells[i]
for j in range(0, len(cage.cells)):
c2 = cage.cells[j]
if abs(c1[0]-c2[0])+abs(c1[1]-c2[1]) == 1:
valid[i] = True
valid[j] = True
if False in valid: raise InputError ('Cage #%d cells are not attached.' % i)
# Check for invalid operation
if not cage.op in [Op.ADDITION, Op.SUBTRACTION, Op.MULTIPLICATION, Op.DIVISION]: raise InputError ('Invalid operation in cage #%d: %s' % (i, cage.op))
class Cage:
'''
Represents a cage of MathDoku cells with an operation and value.
'''
cells = None
op = None
value = None
def __init__(self):
self.cells = list()
def func(self, *n):
'''
Executes the cage function on the given values tuple.
'''
values = sorted(n, reverse=True)
value = float(values[0])
for v in values[1:]:
if self.op == Op.ADDITION: value += v
elif self.op == Op.SUBTRACTION: value -= v
elif self.op == Op.MULTIPLICATION: value *= v
elif self.op == Op.DIVISION: value /= v
return value
class Op:
'''
Valid operators enumeration.
'''
ADDITION = '+'
SUBTRACTION = "-"
MULTIPLICATION = '*'
DIVISION = '/'
class SolverNode:
'''
Represents the solver tree node.
'''
domain = None
conf = None
x = None
y = None
value = None
def __init__(self, domain, conf, x=None, y=None, value=None):
self.domain = list()
self.conf = list()
for i in range(0, len(domain)):
dRow = list()
cRow = list(conf[i])
for j in range(0, len(domain)):
dRow.append(list(domain[i][j]))
self.domain.append(dRow)
self.conf.append(cRow)
self.x = x
self.y = y
self.value = value
class ConstraintStore:
'''
Constraint store static class for checking feasibility.
'''
@staticmethod
def checkConstraints(boardSize, cages, domain, conf, debugLevel=0):
'''
Returns true if none of the constraints are broken, false otherwise.
'''
completeDomain = set(range(1, boardSize+1))
for i in range(0, boardSize):
rowUsed = list()
colUsed = list()
rowDomain = set()
colDomain = set()
for j in range(0, boardSize):
# 1 - Row values are all different
value = conf[i][j]
if value != None:
if value in rowUsed:
if debugLevel >= 2: print 'Row #%d constraint violated.' % i
return False
else:
rowUsed.append(value)
# 2 - Column values are all different
value = conf[j][i]
if value != None:
if value in colUsed:
if debugLevel >= 2: print 'Column #%d constraint violated.' % i
return False
else:
colUsed.append(value)
rowDomain = rowDomain.union(set(domain[i][j]))
colDomain = colDomain.union(set(domain[j][i]))
#3 - Row must contain all numbers
if rowDomain != completeDomain:
if debugLevel >= 2: print 'Row #%i domain constraint violated.' % i
return False
#4 - Column must contain all numbers
if colDomain != completeDomain:
if debugLevel >= 2: print 'Column #%i domain constraint violated.' % i
return False
# 3 - Cage calculation is correct
for i in range(0, len(cages)):
cage = cages[i]
values = tuple(map(lambda x: conf[x[0]][x[1]], cage.cells))
if not None in values:
cageCalcValue = cage.func(*values)
if float(cage.value) != cageCalcValue:
if debugLevel >= 2: print 'Cage #%d constraint violated.' % i
return False
return True
class InputError(Exception):
'''
Represents an error in the problem space.
'''
def __init__(self, message):
self.message = message
class SolverError(Exception):
'''
Represents an error in the solver.
'''
def __init__(self, message):
self.message = message
|
|
# coding: utf-8
"""
Functions for working with pitch data
This file depends on the praat script get_pitch_and_intensity.praat
(which depends on praat) to extract pitch and intensity values from
audio data. Once the data is extracted, there are functions for
data normalization and calculating various measures from the time
stamped output of the praat script (ie **generatePIMeasures()**)
For brevity, 'pitch_and_intensity' is referred to as 'PI'
see **examples/get_pitch_and_formants.py**
"""
import os
from os.path import join
import io
import math
from typing import List, Tuple, Optional, cast
from praatio import data_points
from praatio import praatio_scripts
from praatio import textgrid
from praatio.utilities import errors
from praatio.utilities import my_math
from praatio.utilities import utils
from praatio.utilities.constants import Point
HERTZ = "Hertz"
UNSPECIFIED = "unspecified"
_PITCH_ERROR_TIER_NAME = "pitch errors"
def _extractPIPiecewise(
inputFN: str,
outputFN: str,
praatEXE: str,
minPitch: float,
maxPitch: float,
tgFN: str,
tierName: str,
tmpOutputPath: str,
sampleStep: float = 0.01,
silenceThreshold: float = 0.03,
pitchUnit: str = HERTZ,
forceRegenerate: bool = True,
undefinedValue: float = None,
medianFilterWindowSize: int = 0,
pitchQuadInterp: bool = False,
) -> List[Tuple[float, ...]]:
"""
Extracts pitch and int from each labeled interval in a textgrid
This has the benefit of being faster than using _extractPIFile if only
labeled regions need to have their pitch values sampled, particularly
for longer files.
Returns the result as a list. Will load the serialized result
if this has already been called on the appropriate files before
"""
outputPath = os.path.split(outputFN)[0]
utils.makeDir(outputPath)
windowSize = medianFilterWindowSize
if not os.path.exists(inputFN):
raise errors.ArgumentError(f"Required folder does not exist: f{inputFN}")
firstTime = not os.path.exists(outputFN)
if firstTime or forceRegenerate is True:
utils.makeDir(tmpOutputPath)
splitAudioList = praatio_scripts.splitAudioOnTier(
inputFN, tgFN, tierName, tmpOutputPath, False
)
allPIList: List[Tuple[str, str, str]] = []
for start, _, fn in splitAudioList:
tmpTrackName = os.path.splitext(fn)[0] + ".txt"
piList = _extractPIFile(
join(tmpOutputPath, fn),
join(tmpOutputPath, tmpTrackName),
praatEXE,
minPitch,
maxPitch,
sampleStep,
silenceThreshold,
pitchUnit,
forceRegenerate=True,
medianFilterWindowSize=windowSize,
pitchQuadInterp=pitchQuadInterp,
)
convertedPiList = [
("%0.3f" % (float(time) + start), str(pV), str(iV))
for time, pV, iV in piList
]
allPIList.extend(convertedPiList)
outputData = [",".join(row) for row in allPIList]
with open(outputFN, "w") as fd:
fd.write("\n".join(outputData) + "\n")
return loadTimeSeriesData(outputFN, undefinedValue=undefinedValue)
def _extractPIFile(
inputFN: str,
outputFN: str,
praatEXE: str,
minPitch: float,
maxPitch: float,
sampleStep: float = 0.01,
silenceThreshold: float = 0.03,
pitchUnit: str = HERTZ,
forceRegenerate: bool = True,
undefinedValue: float = None,
medianFilterWindowSize: int = 0,
pitchQuadInterp: bool = False,
) -> List[Tuple[float, ...]]:
"""
Extracts pitch and intensity values from an audio file
Returns the result as a list. Will load the serialized result
if this has already been called on the appropriate files before
"""
outputPath = os.path.split(outputFN)[0]
utils.makeDir(outputPath)
if not os.path.exists(inputFN):
raise errors.ArgumentError(f"Required folder does not exist: f{inputFN}")
firstTime = not os.path.exists(outputFN)
if firstTime or forceRegenerate is True:
# The praat script uses append mode, so we need to clear any prior
# result
if os.path.exists(outputFN):
os.remove(outputFN)
if pitchQuadInterp is True:
doInterpolation = 1
else:
doInterpolation = 0
argList = [
inputFN,
outputFN,
sampleStep,
minPitch,
maxPitch,
silenceThreshold,
pitchUnit,
-1,
-1,
medianFilterWindowSize,
doInterpolation,
]
scriptName = "get_pitch_and_intensity.praat"
scriptFN = join(utils.scriptsPath, scriptName)
utils.runPraatScript(praatEXE, scriptFN, argList)
return loadTimeSeriesData(outputFN, undefinedValue=undefinedValue)
def extractIntensity(
inputFN: str,
outputFN: str,
praatEXE: str,
minPitch: float,
sampleStep: float = 0.01,
forceRegenerate: bool = True,
undefinedValue: float = None,
) -> List[Tuple[float, ...]]:
"""
Extract the intensity for an audio file
Calculates intensity using the following praat command:
https://www.fon.hum.uva.nl/praat/manual/Sound__To_Intensity___.html
"""
outputPath = os.path.split(outputFN)[0]
utils.makeDir(outputPath)
if not os.path.exists(inputFN):
raise errors.ArgumentError(f"Required folder does not exist: f{inputFN}")
firstTime = not os.path.exists(outputFN)
if firstTime or forceRegenerate is True:
# The praat script uses append mode, so we need to clear any prior
# result
if os.path.exists(outputFN):
os.remove(outputFN)
argList = [inputFN, outputFN, sampleStep, minPitch, -1, -1]
scriptName = "get_intensity.praat"
scriptFN = join(utils.scriptsPath, scriptName)
utils.runPraatScript(praatEXE, scriptFN, argList)
return loadTimeSeriesData(outputFN, undefinedValue=undefinedValue)
def extractPitchTier(
wavFN: str,
outputFN: str,
praatEXE: str,
minPitch: float,
maxPitch: float,
sampleStep: float = 0.01,
silenceThreshold: float = 0.03,
forceRegenerate: bool = True,
medianFilterWindowSize: int = 0,
pitchQuadInterp: bool = False,
) -> data_points.PointObject2D:
"""
Extract pitch at regular intervals from the input wav file
Data is output to a text file and then returned in a list in the form
[(timeV1, pitchV1), (timeV2, pitchV2), ...]
sampleStep - the frequency to sample pitch at
silenceThreshold - segments with lower intensity won't be analyzed
for pitch
forceRegenerate - if running this function for the same file, if False
just read in the existing pitch file
pitchQuadInterp - if True, quadratically interpolate pitch
Calculates pitch using the following praat command:
https://www.fon.hum.uva.nl/praat/manual/Sound__To_Pitch___.html
"""
outputPath = os.path.split(outputFN)[0]
utils.makeDir(outputPath)
if pitchQuadInterp is True:
doInterpolation = 1
else:
doInterpolation = 0
if not os.path.exists(wavFN):
raise errors.ArgumentError(f"Required file does not exist: f{wavFN}")
firstTime = not os.path.exists(outputFN)
if firstTime or forceRegenerate is True:
if os.path.exists(outputFN):
os.remove(outputFN)
argList = [
wavFN,
outputFN,
sampleStep,
minPitch,
maxPitch,
silenceThreshold,
medianFilterWindowSize,
doInterpolation,
]
scriptName = "get_pitchtier.praat"
scriptFN = join(utils.scriptsPath, scriptName)
utils.runPraatScript(praatEXE, scriptFN, argList)
return data_points.open2DPointObject(outputFN)
def extractPitch(
wavFN: str,
outputFN: str,
praatEXE: str,
minPitch: float,
maxPitch: float,
sampleStep: float = 0.01,
silenceThreshold: float = 0.03,
forceRegenerate: bool = True,
undefinedValue: float = None,
medianFilterWindowSize: int = 0,
pitchQuadInterp: bool = False,
) -> List[Tuple[float, ...]]:
"""
Extract pitch at regular intervals from the input wav file
Data is output to a text file and then returned in a list in the form
[(timeV1, pitchV1), (timeV2, pitchV2), ...]
sampleStep - the frequency to sample pitch at
silenceThreshold - segments with lower intensity won't be analyzed
for pitch
forceRegenerate - if running this function for the same file, if False
just read in the existing pitch file
undefinedValue - if None remove from the dataset, otherset set to
undefinedValue
pitchQuadInterp - if True, quadratically interpolate pitch
Calculates pitch using the following praat command:
https://www.fon.hum.uva.nl/praat/manual/Sound__To_Pitch___.html
"""
outputPath = os.path.split(outputFN)[0]
utils.makeDir(outputPath)
if pitchQuadInterp is True:
doInterpolation = 1
else:
doInterpolation = 0
if not os.path.exists(wavFN):
raise errors.ArgumentError(f"Required file does not exist: f{wavFN}")
firstTime = not os.path.exists(outputFN)
if firstTime or forceRegenerate is True:
if os.path.exists(outputFN):
os.remove(outputFN)
argList = [
wavFN,
outputFN,
sampleStep,
minPitch,
maxPitch,
silenceThreshold,
-1,
-1,
medianFilterWindowSize,
doInterpolation,
]
scriptName = "get_pitch.praat"
scriptFN = join(utils.scriptsPath, scriptName)
utils.runPraatScript(praatEXE, scriptFN, argList)
return loadTimeSeriesData(outputFN, undefinedValue=undefinedValue)
def extractPI(
inputFN: str,
outputFN: str,
praatEXE: str,
minPitch: float,
maxPitch: float,
sampleStep: float = 0.01,
silenceThreshold: float = 0.03,
pitchUnit: str = HERTZ,
forceRegenerate: bool = True,
tgFN: str = None,
tierName: str = None,
tmpOutputPath: str = None,
undefinedValue: float = None,
medianFilterWindowSize: int = 0,
pitchQuadInterp: bool = False,
) -> List[Tuple[float, ...]]:
"""
Extracts pitch and intensity from a file wholesale or piecewise
If the parameters for a tg are passed in, this will only extract labeled
segments in a tier of the tg. Otherwise, pitch will be extracted from
the entire file.
male: minPitch=50; maxPitch=350
female: minPitch=75; maxPitch=450
pitchUnit: "Hertz", "semitones re 100 Hz", etc
Calculates pitch and intensity using the following praat command:
https://www.fon.hum.uva.nl/praat/manual/Sound__To_Pitch___.html
https://www.fon.hum.uva.nl/praat/manual/Sound__To_Intensity___.html
"""
outputPath = os.path.split(outputFN)[0]
windowSize = medianFilterWindowSize
if tgFN is None or tierName is None:
piList = _extractPIFile(
inputFN,
outputFN,
praatEXE,
minPitch,
maxPitch,
sampleStep,
silenceThreshold,
pitchUnit,
forceRegenerate,
undefinedValue=undefinedValue,
medianFilterWindowSize=windowSize,
pitchQuadInterp=pitchQuadInterp,
)
else:
if tmpOutputPath is None:
tmpOutputPath = join(outputPath, "piecewise_output")
piList = _extractPIPiecewise(
inputFN,
outputFN,
praatEXE,
minPitch,
maxPitch,
tgFN,
tierName,
tmpOutputPath,
sampleStep,
silenceThreshold,
pitchUnit,
forceRegenerate,
undefinedValue=undefinedValue,
medianFilterWindowSize=windowSize,
pitchQuadInterp=pitchQuadInterp,
)
return piList
def loadTimeSeriesData(
fn: str, undefinedValue: float = None
) -> List[Tuple[float, ...]]:
"""
For reading the output of get_pitch_and_intensity or get_intensity
Data should be of the form
[(time1, value1a, value1b, ...),
(time2, value2a, value2b, ...), ]
"""
name = os.path.splitext(os.path.split(fn)[1])[0]
try:
with io.open(fn, "r", encoding="utf-8") as fd:
data = fd.read()
except IOError:
print(f"No pitch track for: {name}")
raise
dataList = [row.split(",") for row in data.splitlines() if row != ""]
# The new praat script includes a header
if dataList[0][0] == "time":
dataList = dataList[1:]
newDataList = []
for row in dataList:
time = float(row.pop(0))
entry = [
time,
]
doSkip = False
for value in row:
if "--" in value:
if undefinedValue is not None:
appendValue = undefinedValue
else:
doSkip = True
break
else:
appendValue = float(value)
entry.append(appendValue)
if doSkip is True:
continue
newDataList.append(tuple(entry))
return newDataList
def generatePIMeasures(
dataList: List[Tuple[float, float, float]],
tgFN: str,
tierName: str,
doPitch: bool,
medianFilterWindowSize: int = None,
globalZNormalization: bool = False,
localZNormalizationWindowSize: int = 0,
) -> List[Tuple[float, ...]]:
"""
Generates processed values for the labeled intervals in a textgrid
nullLabelList - labels to ignore in the textgrid. Defaults to ["",]
if 'doPitch'=true get pitch measures; if =false get rms intensity
medianFilterWindowSize: if none, no filtering is done
globalZNormalization: if True, values are normalized with the mean
and stdDev of the data in dataList
localZNormalization: if greater than 1, values are normalized with the mean
and stdDev of the local context (for a window of 5, it
would consider the current value, 2 values before and 2
values after)
"""
# Warn user that normalizing a second time nullifies the first normalization
if globalZNormalization is True and localZNormalizationWindowSize > 0:
raise errors.NormalizationException()
castDataList = cast(List[Tuple[float, ...]], dataList)
if globalZNormalization is True:
if doPitch:
castDataList = my_math.znormalizeSpeakerData(castDataList, 1, True)
else:
castDataList = my_math.znormalizeSpeakerData(castDataList, 2, True)
# Raw values should have 0 filtered; normalized values are centered around 0, so don't filter
filterZeroFlag = not globalZNormalization
tg = textgrid.openTextgrid(tgFN, False)
if not isinstance(tg.tierDict[tierName], textgrid.IntervalTier):
raise errors.IncompatibleTierError(tg.tierDict[tierName])
tier = cast(textgrid.IntervalTier, tg.tierDict[tierName])
piData = tier.getValuesInIntervals(castDataList)
outputList: List[List[float]] = []
for interval, entryList in piData:
label = interval[0]
if doPitch:
tmpValList = [f0Val for _, f0Val, _ in entryList]
f0Measures = getPitchMeasures(
tmpValList, tgFN, label, medianFilterWindowSize, filterZeroFlag
)
outputList.append(list(f0Measures))
else:
tmpValList = [intensityVal for _, _, intensityVal in entryList]
if filterZeroFlag:
tmpValList = [
intensityVal for intensityVal in tmpValList if intensityVal != 0.0
]
rmsIntensity = 0.0
if len(tmpValList) != 0:
rmsIntensity = my_math.rms(tmpValList)
outputList.append(
[
rmsIntensity,
]
)
# Locally normalize the output
if localZNormalizationWindowSize > 0 and len(outputList) > 0:
for colI in range(len(outputList[0])):
featValList = [row[colI] for row in outputList]
featValList = my_math.znormWindowFilter(
featValList, localZNormalizationWindowSize, True, True
)
if len(featValList) != len(outputList): # This should hopefully not happen
raise errors.UnexpectedError(
"Lists must be of the same length but are not: "
f"({len(featValList)}), ({len(outputList)})"
)
for i, val in enumerate(featValList):
outputList[i][colI] = val
return [tuple(row) for row in outputList]
def getPitchMeasures(
f0Values: List[float],
name: str = None,
label: str = None,
medianFilterWindowSize: int = None,
filterZeroFlag: bool = False,
) -> Tuple[float, float, float, float, float, float]:
"""
Get various measures (min, max, etc) for the passed in list of pitch values
name is the name of the file. Label is the label of the current interval.
Both of these labels are only used debugging and can be ignored if desired.
medianFilterWindowSize: None -> no median filtering
filterZeroFlag:True -> zero values are removed
"""
if name is None:
name = UNSPECIFIED
if label is None:
label = UNSPECIFIED
if medianFilterWindowSize is not None:
f0Values = my_math.medianFilter(
f0Values, medianFilterWindowSize, useEdgePadding=True
)
if filterZeroFlag:
f0Values = [f0Val for f0Val in f0Values if int(f0Val) != 0]
if len(f0Values) == 0:
myStr = f"No pitch data for file: {name}, label: {label}"
print(myStr.encode("ascii", "replace"))
counts = 0.0
meanF0 = 0.0
maxF0 = 0.0
minF0 = 0.0
rangeF0 = 0.0
variance = 0.0
std = 0.0
else:
counts = float(len(f0Values))
meanF0 = sum(f0Values) / counts
maxF0 = max(f0Values)
minF0 = min(f0Values)
rangeF0 = maxF0 - minF0
variance = sum([(val - meanF0) ** 2 for val in f0Values]) / counts
std = math.sqrt(variance)
return (meanF0, maxF0, minF0, rangeF0, variance, std)
def detectPitchErrors(
pitchList: List[Tuple[float, float]],
maxJumpThreshold: float = 0.70,
tgToMark: Optional[textgrid.Textgrid] = None,
) -> Tuple[List[Point], Optional[textgrid.Textgrid]]:
"""
Detect pitch halving and doubling errors.
If a textgrid is passed in, it adds the markings to the textgrid
"""
if maxJumpThreshold < 0 or maxJumpThreshold > 1:
raise errors.ArgumentError(
f"'maxJumpThreshold' must be between 0 and 1. Was given ({maxJumpThreshold})"
)
tierName = _PITCH_ERROR_TIER_NAME
if tgToMark is not None and tierName in tgToMark.tierNameList:
raise errors.ArgumentError(
f"Tier name '{tierName}' is already in provided textgrid"
)
errorList = []
for i in range(1, len(pitchList)):
lastPitch = pitchList[i - 1][1]
currentPitch = pitchList[i][1]
ceilingCutoff = currentPitch / maxJumpThreshold
floorCutoff = currentPitch * maxJumpThreshold
if (lastPitch <= floorCutoff) or (lastPitch >= ceilingCutoff):
currentTime = pitchList[i][0]
errorList.append(Point(currentTime, str(currentPitch / lastPitch)))
if tgToMark is not None:
pointTier = textgrid.PointTier(
tierName, errorList, tgToMark.minTimestamp, tgToMark.maxTimestamp
)
tgToMark.addTier(pointTier)
return errorList, tgToMark
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.kernels.functional_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
import tensorflow.python.ops.tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.util import compat
# pylint: disable=invalid-name
def simple_scoped_fn(a, x):
"""Simple function: (a, x) -> 2(x+a), but with "2" as a variable in scope."""
with variable_scope.variable_scope("body"):
# Dummy variable, just to check that scoping works as intended.
two = variable_scope.get_variable(
"two", [],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(2))
return math_ops.multiply(math_ops.add(a, x), two)
@test_util.with_control_flow_v2
class FunctionalOpsTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testFoldl_Simple(self):
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
r = functional_ops.foldl(
lambda a, x: math_ops.multiply(math_ops.add(a, x), 2),
elems)
self.assertAllEqual(208, self.evaluate(r))
r = functional_ops.foldl(
lambda a, x: math_ops.multiply(math_ops.add(a, x), 2),
elems,
initializer=10)
self.assertAllEqual(880, self.evaluate(r))
@test_util.run_in_graph_and_eager_modes
def testFoldl_SingleInputMultiOutput(self):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
initializer = np.array([1, -1.0])
r = functional_ops.foldl(lambda a, x: a + x, elems, initializer)
r_value = self.evaluate(r)
self.assertAllEqual(22, r_value[0])
self.assertAllEqual(20, r_value[1])
@test_util.run_in_graph_and_eager_modes
def testFoldl_MultiInputSingleOutput(self):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
initializer = np.array(1.0)
r = functional_ops.foldl(lambda a, x: a + x[0] + x[1], (elems, -elems),
initializer)
self.assertAllEqual(1, self.evaluate(r))
@test_util.run_in_graph_and_eager_modes
def testFoldl_MultiInputDifferentDimsSingleOutput(self):
elems = np.array([[1.0, 1.0, 1.0], [2.0, 3.0, 4.0]])
other_elems = np.array([-1.0, 1.0])
initializer = np.array([0.0, 0.0, 0.0])
r = functional_ops.foldl(lambda a, x: a + x[0] * x[1],
(elems, other_elems), initializer)
self.assertAllEqual([1.0, 2.0, 3.0], self.evaluate(r))
def testFoldl_Scoped(self):
with self.cached_session() as sess:
with variable_scope.variable_scope("root") as varscope:
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
r = functional_ops.foldl(simple_scoped_fn, elems)
# Check that we have the one variable we asked for here.
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertEqual(variables.trainable_variables()[0].name,
"root/body/two:0")
sess.run([variables.global_variables_initializer()])
self.assertAllEqual(208, self.evaluate(r))
# Now let's reuse our single variable.
varscope.reuse_variables()
r = functional_ops.foldl(simple_scoped_fn, elems, initializer=10)
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertAllEqual(880, self.evaluate(r))
@test_util.run_in_graph_and_eager_modes
def testFoldr_Simple(self):
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
r = functional_ops.foldr(
lambda a, x: math_ops.multiply(math_ops.add(a, x), 2),
elems)
self.assertAllEqual(450, self.evaluate(r))
r = functional_ops.foldr(
lambda a, x: math_ops.multiply(math_ops.add(a, x), 2),
elems,
initializer=10)
self.assertAllEqual(1282, self.evaluate(r))
@test_util.run_in_graph_and_eager_modes
def testFoldr_SingleInputMultiOutput(self):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
initializer = np.array([1, -1.0])
r = functional_ops.foldr(lambda a, x: a + x, elems, initializer)
r_value = self.evaluate(r)
self.assertAllEqual(22, r_value[0])
self.assertAllEqual(20, r_value[1])
@test_util.run_in_graph_and_eager_modes
def testFoldr_MultiInputSingleOutput(self):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
initializer = np.array(1.0)
r = functional_ops.foldr(lambda a, x: a + x[0] + x[1], (elems, -elems),
initializer)
self.assertAllEqual(1, self.evaluate(r))
def testFoldr_Scoped(self):
with self.cached_session() as sess:
with variable_scope.variable_scope("root") as varscope:
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
r = functional_ops.foldr(simple_scoped_fn, elems)
# Check that we have the one variable we asked for here.
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertEqual(variables.trainable_variables()[0].name,
"root/body/two:0")
sess.run([variables.global_variables_initializer()])
self.assertAllEqual(450, self.evaluate(r))
# Now let's reuse our single variable.
varscope.reuse_variables()
r = functional_ops.foldr(simple_scoped_fn, elems, initializer=10)
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertAllEqual(1282, self.evaluate(r))
# pylint: disable=unnecessary-lambda
def testFold_Grad(self):
with self.cached_session():
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
v = constant_op.constant(2.0, name="v")
r = functional_ops.foldl(
lambda a, x: math_ops.multiply(a, x), elems, initializer=v)
r = gradients_impl.gradients(r, v)[0]
self.assertAllEqual(720.0, self.evaluate(r))
r = functional_ops.foldr(
lambda a, x: math_ops.multiply(a, x), elems, initializer=v)
r = gradients_impl.gradients(r, v)[0]
self.assertAllEqual(720.0, self.evaluate(r))
# pylint: enable=unnecessary-lambda
@test_util.run_in_graph_and_eager_modes
def testMap_Simple(self):
nums = [1, 2, 3, 4, 5, 6]
elems = constant_op.constant(nums, name="data")
r = functional_ops.map_fn(
lambda x: math_ops.multiply(math_ops.add(x, 3), 2), elems)
self.assertAllEqual(
np.array([(x + 3) * 2 for x in nums]), self.evaluate(r))
def testMapSparseTensor(self):
with self.cached_session():
with self.assertRaises(TypeError):
functional_ops.map_fn(
lambda x: x,
sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1], [1, 0]],
values=constant_op.constant([0, 1, 2]),
dense_shape=[2, 2]))
@test_util.run_in_graph_and_eager_modes
def testMapOverScalarErrors(self):
with self.assertRaisesRegexp(ValueError, "not scalars"):
functional_ops.map_fn(lambda x: x, [1, 2])
with self.assertRaisesRegexp(ValueError, "not a scalar"):
functional_ops.map_fn(lambda x: x, 1)
def testMap_Scoped(self):
with self.cached_session() as sess:
def double_scoped(x):
"""2x with a dummy 2 that is scoped."""
with variable_scope.variable_scope("body"):
# Dummy variable, just to check that scoping works as intended.
two = variable_scope.get_variable(
"two", [],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(2))
return math_ops.multiply(x, two)
with variable_scope.variable_scope("root") as varscope:
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
doubles = np.array([2 * x for x in [1, 2, 3, 4, 5, 6]])
r = functional_ops.map_fn(double_scoped, elems)
# Check that we have the one variable we asked for here.
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertEqual(variables.trainable_variables()[0].name,
"root/body/two:0")
sess.run([variables.global_variables_initializer()])
self.assertAllEqual(doubles, self.evaluate(r))
# Now let's reuse our single variable.
varscope.reuse_variables()
r = functional_ops.map_fn(double_scoped, elems)
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertAllEqual(doubles, self.evaluate(r))
def testMap_Grad(self):
with self.cached_session():
param = constant_op.constant(2.0)
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="elems")
y = functional_ops.map_fn(
lambda x: math_ops.multiply(math_ops.square(x), param), elems)
r = gradients_impl.gradients(y, param)[0]
self.assertAllEqual(91.0, self.evaluate(r))
r = gradients_impl.gradients(y, elems)[0]
self.assertAllEqual([4.0, 8.0, 12.0, 16.0, 20.0, 24.0], self.evaluate(r))
@test_util.run_in_graph_and_eager_modes
def testMap_SimpleNotTensor(self):
nums = np.array([1, 2, 3, 4, 5, 6])
r = functional_ops.map_fn(
lambda x: math_ops.multiply(math_ops.add(x, 3), 2), nums)
self.assertAllEqual(
np.array([(x + 3) * 2 for x in nums]), self.evaluate(r))
@test_util.run_in_graph_and_eager_modes
def testMap_SingleInputMultiOutput(self):
nums = np.array([1, 2, 3, 4, 5, 6])
r = functional_ops.map_fn(
lambda x: ((x + 3) * 2, -(x + 3) * 2),
nums,
dtype=(dtypes.int64, dtypes.int64))
self.assertEqual(2, len(r))
self.assertEqual((6,), r[0].get_shape())
self.assertEqual((6,), r[1].get_shape())
received = self.evaluate(r)
self.assertAllEqual((nums + 3) * 2, received[0])
self.assertAllEqual(-(nums + 3) * 2, received[1])
@test_util.run_in_graph_and_eager_modes
def testMap_MultiOutputMismatchedDtype(self):
nums = np.array([1, 2, 3, 4, 5, 6])
with self.assertRaisesRegexp(
TypeError, r"two structures don't have the same nested structure"):
# lambda emits tuple, but dtype is a list
functional_ops.map_fn(
lambda x: ((x + 3) * 2, -(x + 3) * 2),
nums,
dtype=[dtypes.int64, dtypes.int64])
@test_util.run_in_graph_and_eager_modes
def testMap_MultiInputSingleOutput(self):
nums = np.array([1, 2, 3, 4, 5, 6])
r = functional_ops.map_fn(
lambda x: x[0] * x[1][0] + x[1][1], (nums, (nums, -nums)),
dtype=dtypes.int64)
self.assertEqual((6,), r.get_shape())
received = self.evaluate(r)
self.assertAllEqual(nums * nums + (-nums), received)
@test_util.run_in_graph_and_eager_modes
def testMap_MultiInputSameStructureOutput(self):
nums = np.array([1, 2, 3, 4, 5, 6])
r = functional_ops.map_fn(lambda x: (x[1][0], (x[1][1], x[0])),
(nums, (2 * nums, -nums)))
r = [r[0], r[1][0], r[1][1]]
self.assertEqual((6,), r[0].get_shape())
self.assertEqual((6,), r[1].get_shape())
self.assertEqual((6,), r[2].get_shape())
received = self.evaluate(r)
self.assertAllEqual(2 * nums, received[0])
self.assertAllEqual(-nums, received[1])
self.assertAllEqual(nums, received[2])
@test_util.run_in_graph_and_eager_modes
def testScan_Simple(self):
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
v = constant_op.constant(2.0, name="v")
# pylint: disable=unnecessary-lambda
r = functional_ops.scan(lambda a, x: math_ops.multiply(a, x), elems)
self.assertAllEqual([1., 2., 6., 24., 120., 720.], self.evaluate(r))
r = functional_ops.scan(
lambda a, x: math_ops.multiply(a, x), elems, initializer=v)
self.assertAllEqual([2., 4., 12., 48., 240., 1440.], self.evaluate(r))
# pylint: enable=unnecessary-lambda
@test_util.run_in_graph_and_eager_modes
def testScan_Reverse(self):
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
v = constant_op.constant(2.0, name="v")
# pylint: disable=unnecessary-lambda
r = functional_ops.scan(lambda a, x: math_ops.multiply(a, x), elems,
reverse=True)
self.assertAllEqual([720., 720., 360., 120., 30., 6.], self.evaluate(r))
r = functional_ops.scan(
lambda a, x: math_ops.multiply(a, x), elems, initializer=v,
reverse=True)
self.assertAllEqual([1440., 1440., 720., 240., 60., 12.],
self.evaluate(r))
# pylint: enable=unnecessary-lambda
@test_util.run_in_graph_and_eager_modes
def testScan_SingleInputMultiOutput(self):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
initializer = (np.array(1.0), np.array(-1.0))
r = functional_ops.scan(lambda a, x: (a[0] * x, -a[1] * x), elems,
initializer)
r_value = self.evaluate(r)
self.assertAllEqual([1.0, 2.0, 6.0, 24.0, 120.0, 720.0], r_value[0])
self.assertAllEqual([1.0, -2.0, 6.0, -24.0, 120.0, -720.0], r_value[1])
@test_util.run_in_graph_and_eager_modes
def testScan_MultiInputSingleOutput(self):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
initializer = np.array(1.0)
# Multiply a * 1 each time
r = functional_ops.scan(lambda a, x: a * (x[0] + x[1]),
(elems + 1, -elems), initializer)
self.assertAllEqual([1.0, 1.0, 1.0, 1.0, 1.0, 1.0], self.evaluate(r))
@test_util.run_in_graph_and_eager_modes
def testScan_MultiInputSameTypeOutput(self):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
r = functional_ops.scan(lambda a, x: (a[0] + x[0], a[1] + x[1]),
(elems, -elems))
r_value = self.evaluate(r)
self.assertAllEqual(np.cumsum(elems), r_value[0])
self.assertAllEqual(np.cumsum(-elems), r_value[1])
@test_util.run_in_graph_and_eager_modes
def testScan_MultiOutputMismatchedInitializer(self):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
initializer = np.array(1.0)
# Multiply a * 1 each time
with self.assertRaisesRegexp(
ValueError, "two structures don't have the same nested structure"):
functional_ops.scan(lambda a, x: (a, -a), elems, initializer)
def testScan_Scoped(self):
with self.cached_session() as sess:
with variable_scope.variable_scope("root") as varscope:
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
r = functional_ops.scan(simple_scoped_fn, elems)
# Check that we have the one variable we asked for here.
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertEqual(variables.trainable_variables()[0].name,
"root/body/two:0")
sess.run([variables.global_variables_initializer()])
results = np.array([1, 6, 18, 44, 98, 208])
self.assertAllEqual(results, self.evaluate(r))
# Now let's reuse our single variable.
varscope.reuse_variables()
r = functional_ops.scan(simple_scoped_fn, elems, initializer=2)
self.assertEqual(len(variables.trainable_variables()), 1)
results = np.array([6, 16, 38, 84, 178, 368])
self.assertAllEqual(results, self.evaluate(r))
@test_util.run_in_graph_and_eager_modes
def testScanFoldl_Nested(self):
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0], name="data")
inner_elems = constant_op.constant([0.5, 0.5], name="data")
def r_inner(a, x):
return functional_ops.foldl(
lambda b, y: b * y * x, inner_elems, initializer=a)
r = functional_ops.scan(r_inner, elems)
# t == 0 (returns 1)
# t == 1, a == 1, x == 2 (returns 1)
# t_0 == 0, b == a == 1, y == 0.5, returns b * y * x = 1
# t_1 == 1, b == 1, y == 0.5, returns b * y * x = 1
# t == 2, a == 1, x == 3 (returns 1.5*1.5 == 2.25)
# t_0 == 0, b == a == 1, y == 0.5, returns b * y * x = 1.5
# t_1 == 1, b == 1.5, y == 0.5, returns b * y * x = 1.5*1.5
# t == 3, a == 2.25, x == 4 (returns 9)
# t_0 == 0, b == a == 2.25, y == 0.5, returns b * y * x = 4.5
# t_1 == 1, b == 4.5, y == 0.5, returns b * y * x = 9
self.assertAllClose([1., 1., 2.25, 9.], self.evaluate(r))
def testScan_Control(self):
with self.cached_session() as sess:
s = array_ops.placeholder(dtypes.float32, shape=[None])
b = array_ops.placeholder(dtypes.bool)
with ops.control_dependencies([b]):
c = functional_ops.scan(lambda a, x: x * a, s)
self.assertAllClose(
np.array([1.0, 3.0, 9.0]), sess.run(c, {s: [1, 3, 3],
b: True}))
def testScan_Grad(self):
with self.cached_session():
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
v = constant_op.constant(2.0, name="v")
# pylint: disable=unnecessary-lambda
r = functional_ops.scan(
lambda a, x: math_ops.multiply(a, x), elems, initializer=v)
# pylint: enable=unnecessary-lambda
r = gradients_impl.gradients(r, v)[0]
self.assertAllEqual(873.0, self.evaluate(r))
def testScanGradientWithPartStopGradient(self):
a = variables.Variable(0.0, name="a")
b = variables.Variable(0.0, name="b")
elems = array_ops.zeros(5)
l0, l1 = functional_ops.scan(
lambda elem_, input_: (a, b), elems, initializer=(0., 0.))
loss = l0 + array_ops.stop_gradient(l1)
grad = gradients_impl.gradients(ys=[loss], xs=[a, b])
with self.test_session(use_gpu=True) as sess:
variables.global_variables_initializer().run()
sess.run(grad)
@test_util.run_in_graph_and_eager_modes
def testFoldShape(self):
x = constant_op.constant([[1, 2, 3], [4, 5, 6]])
def fn(_, current_input):
return current_input
initializer = constant_op.constant([0, 0, 0])
y = functional_ops.foldl(fn, x, initializer=initializer)
self.assertAllEqual(y.get_shape(), self.evaluate(y).shape)
@test_util.run_in_graph_and_eager_modes
def testMapShape(self):
x = constant_op.constant([[1, 2, 3], [4, 5, 6]])
y = functional_ops.map_fn(lambda e: e, x)
self.assertAllEqual(y.get_shape(), self.evaluate(y).shape)
def testMapUnknownShape(self):
x = array_ops.placeholder(dtypes.float32)
y = functional_ops.map_fn(lambda e: e, x)
self.assertIs(None, y.get_shape().dims)
@test_util.disable_control_flow_v2("b/119323354")
@test_util.run_in_graph_and_eager_modes
def testMapEmptyScalar(self):
map_return = functional_ops.map_fn(lambda x: 1, constant_op.constant([]))
self.assertAllEqual([0], map_return.get_shape().dims)
self.assertAllEqual([0], self.evaluate(map_return).shape)
# TODO(akshayka): this test fails in eager: the iterable is of length 0 so
# so the body of the while loop never executes
@test_util.disable_control_flow_v2("b/119323354")
def testMapEmptyTensor(self):
with self.cached_session():
map_return = functional_ops.map_fn(lambda x: array_ops.zeros([3, 2]),
constant_op.constant([]))
self.assertAllEqual([0, 3, 2], map_return.get_shape().dims)
self.assertAllEqual([0, 3, 2], self.evaluate(map_return).shape)
@test_util.run_in_graph_and_eager_modes
def testScanShape(self):
x = constant_op.constant([[1, 2, 3], [4, 5, 6]])
def fn(_, current_input):
return current_input
initializer = constant_op.constant([0, 0, 0])
y = functional_ops.scan(fn, x, initializer=initializer)
self.assertAllEqual(y.get_shape(), self.evaluate(y).shape)
# TODO(akshayka): this test fails in eager: the iterable is of length 0 so
# so the body of the while loop never executes
def testScanEmptyTensor(self):
with self.cached_session():
x = functional_ops.scan(
lambda x, _: x, math_ops.range(0), initializer=array_ops.ones([2, 4]))
self.assertAllEqual([0, 2, 4], x.get_shape())
self.assertAllEqual(x.get_shape(), self.evaluate(x).shape)
def testScanUnknownShape(self):
x = array_ops.placeholder(dtypes.float32)
initializer = array_ops.placeholder(dtypes.float32)
def fn(_, current_input):
return current_input
y = functional_ops.scan(fn, x, initializer=initializer)
self.assertIs(None, y.get_shape().dims)
def testScanVaryingShape(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtype=dtypes.float32, shape=[None, 2])
x_t = array_ops.transpose(x)
# scan over dimension 0 (with shape None)
result = functional_ops.scan(lambda a, x: a + x, x)
# scanned over transposed dimension 0 (with shape 2)
result_t = functional_ops.scan(lambda a, x: a + x, x_t, infer_shape=False)
# ensure gradients can be calculated
result_grad = gradients_impl.gradients(result, [x])[0]
result_t_grad = gradients_impl.gradients(result_t, [x_t])[0]
# smoke test to ensure they all evaluate
sess.run([result, result_t, result_grad, result_t_grad],
feed_dict={x: [[1.0, 2.0]]})
def testRemoteFunction(self):
worker_config = config_pb2.ConfigProto()
worker_config.device_count["CPU"] = 2
worker, _ = test_util.create_local_cluster(
1, 1, worker_config=worker_config)
@function.Defun(dtypes.int32, dtypes.int32)
def _remote_fn(a, b):
return math_ops.multiply(a, b)
with ops.device("/job:ps/task:0"):
a = variables.Variable(2, dtype=dtypes.int32)
b = variables.Variable(3, dtype=dtypes.int32)
with ops.device("/job:worker/replica:0/task:0/cpu:0"):
remote_op = functional_ops.remote_call(
args=[a, b],
Tout=[dtypes.int32],
f=_remote_fn,
target="/job:worker/replica:0/task:0/cpu:1")
with session.Session(worker[0].target) as sess:
sess.run(variables.global_variables_initializer())
mul = sess.run(remote_op)
self.assertEqual(mul, [6])
def testRemoteFunctionDirectSession(self):
worker_config = config_pb2.ConfigProto()
worker_config.device_count["CPU"] = 2
@function.Defun(dtypes.int32, dtypes.int32)
def _remote_fn(a, b):
return math_ops.multiply(a, b)
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
a = variables.Variable(2, dtype=dtypes.int32)
b = variables.Variable(3, dtype=dtypes.int32)
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
remote_op = functional_ops.remote_call(
args=[a, b],
Tout=[dtypes.int32],
f=_remote_fn,
target="/job:localhost/replica:0/task:0/cpu:1")
with self.test_session(config=worker_config) as sess:
sess.run(variables.global_variables_initializer())
mul = sess.run(remote_op)
self.assertEqual(mul, [6])
def testRemoteFunctionSameDeviceDirectSession(self):
@function.Defun(dtypes.int32, dtypes.int32)
def _remote_fn(a, b):
return math_ops.multiply(a, b)
with ops.device("/cpu:0"):
a = variables.Variable(2, dtype=dtypes.int32)
b = variables.Variable(3, dtype=dtypes.int32)
with ops.device("/cpu:0"):
remote_op = functional_ops.remote_call(
args=[a, b], Tout=[dtypes.int32], f=_remote_fn, target="/cpu:0")
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
mul = sess.run(remote_op)
self.assertEqual(mul, [6])
def testRemoteFunctionCPUGPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
@function.Defun(dtypes.float32, dtypes.float32)
def _remote_fn(a, b):
return math_ops.multiply(a, b)
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
a = variables.Variable(2, dtype=dtypes.float32)
b = variables.Variable(3, dtype=dtypes.float32)
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
remote_op = functional_ops.remote_call(
args=[a, b],
Tout=[dtypes.float32],
f=_remote_fn,
target="/job:localhost/replica:0/task:0/device:GPU:0")[0] + 3.0
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
mul = sess.run(remote_op)
self.assertEqual(mul, 9.0)
def testRemoteFunctionGPUCPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
@function.Defun(dtypes.float32, dtypes.float32)
def _remote_fn(a, b):
return math_ops.multiply(a, b)
with ops.device("/job:localhost/replica:0/task:0/device:GPU:0"):
a = variables.Variable(2, dtype=dtypes.float32)
b = variables.Variable(3, dtype=dtypes.float32)
with ops.device("/job:localhost/replica:0/task:0/device:GPU:0"):
remote_op = functional_ops.remote_call(
args=[a, b],
Tout=[dtypes.float32],
f=_remote_fn,
target="/job:localhost/replica:0/task:0/cpu:0")[0] + 3.0
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
mul = sess.run(remote_op)
self.assertEqual(mul, 9.0)
def testRemoteFunctionGPUCPUStrings(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
@function.Defun(dtypes.string)
def _remote_fn(inp):
return array_ops.identity(inp)
a = array_ops.constant("a")
with ops.device("/gpu:0"):
remote_op = functional_ops.remote_call(
args=[a], Tout=[dtypes.string], f=_remote_fn, target="/cpu:0")
with self.cached_session() as sess:
ret = sess.run(remote_op)
self.assertAllEqual(ret, [b"a"])
def testRemoteFunctionCrossProcess(self):
workers, _ = test_util.create_local_cluster(2, 1)
@function.Defun(dtypes.float32, dtypes.float32)
def _remote_fn(a, b):
return math_ops.multiply(a, b)
with ops.device("/job:ps/task:0"):
a = variables.Variable(2, dtype=dtypes.float32)
b = variables.Variable(3, dtype=dtypes.float32)
with ops.device("/job:worker/replica:0/task:0/cpu:0"):
remote_op = functional_ops.remote_call(
args=[a, b],
Tout=[dtypes.float32],
f=_remote_fn,
target="/job:worker/replica:0/task:1/cpu:0")[0] + 3.0
with session.Session(workers[0].target) as sess:
sess.run(variables.global_variables_initializer())
mul = sess.run(remote_op)
self.assertEqual(mul, 9)
def testIf(self):
@function.Defun(dtypes.float32)
def Twice(x):
return x * 2
@function.Defun(dtypes.float32)
def Thrice(x):
return x * 3 + 1
with self.test_session(use_gpu=False) as sess:
x = array_ops.placeholder(dtypes.float32)
ret = functional_ops.If(math_ops.greater(x, 0), [x], Twice, Thrice)[0]
self.assertAllEqual(sess.run(ret, feed_dict={x: 9.}), 18.)
self.assertAllEqual(sess.run(ret, feed_dict={x: -8.}), -23.)
self.assertAllEqual(sess.run(ret, feed_dict={x: 0.}), 1.)
def testWhile(self):
for use_gpu in (True, False):
with ops.Graph().as_default() as g:
@function.Defun(*[dtypes.float32] * 2)
def Cond(n, unused_x):
return n > 0
@function.Defun(*[dtypes.float32] * 2)
def Body(n, x):
return n - 1, x + n
def Run(sess, n):
return sess.run(functional_ops.While([n, 0.], Cond, Body))[1]
with self.session(graph=g, use_gpu=use_gpu) as sess:
self.assertAllEqual(Run(sess, 20.), 210.)
self.assertAllEqual(Run(sess, 100.), 5050.)
def testWhileLowering(self):
def Run(n, fetch_by_name):
for use_gpu in (True, False):
with ops.Graph().as_default() as g:
@function.Defun(*[dtypes.float32] * 2)
def Cond(n, unused_x):
return n > 0
@function.Defun(*[dtypes.float32] * 2)
def Body(n, x):
return n - 1, x + n
# outputs: [0, n*(n+1)/2]
outputs = functional_ops.While([n, 0.], Cond, Body, name="my_while")
# `outputs` is the list of output tensors of the While op. We
# arbitrarily choose the 0th tensor to get the While op and set the
# lowering attribute on it.
outputs[0].op._set_attr("_lower_using_switch_merge",
attr_value_pb2.AttrValue(b=True))
if not fetch_by_name:
fetch = outputs[1]
else:
fetch = "my_while:1"
with self.session(graph=g, use_gpu=use_gpu) as sess:
return sess.run(fetch)
self.assertAllEqual(Run(20., False), 210.)
self.assertAllEqual(Run(20., True), 210.)
self.assertAllEqual(Run(100., False), 5050.)
self.assertAllEqual(Run(100., True), 5050.)
def testWhileError(self):
for use_gpu in (True, False):
with ops.Graph().as_default() as g:
@function.Defun(*[dtypes.float32] * 2)
def Cond(n, unused_x):
return n > 0
@function.Defun(*[dtypes.float32] * 2)
def CondReturnsTooManyArgs(n, x):
return n > 0, x
@function.Defun(*[dtypes.float32] * 2)
def Body(n, x):
return n - 1, x + n
@function.Defun(*[dtypes.float32] * 2)
def BodyReturnsTooManyArgs(n, x):
return n - 1, x + n, x
with self.session(graph=g, use_gpu=use_gpu):
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Expected a single scalar.*got 2 tensors."):
functional_ops.While([5., 0.], CondReturnsTooManyArgs,
Body)[0].eval()
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"While loop body returned 3 arguments. Expected: 2"):
functional_ops.While([5., 0.], Cond,
BodyReturnsTooManyArgs)[0].eval()
def testWhileInMultipleSubgraphs(self):
for use_gpu in (True, False):
with ops.Graph().as_default() as g:
@function.Defun(*[dtypes.float32] * 2)
def Cond(n, x): # pylint: disable=unused-argument
return n > 0
@function.Defun(*[dtypes.float32] * 2)
def Body(n, x):
return n - 1, x + n
with self.session(graph=g, use_gpu=use_gpu) as sess:
n = array_ops.placeholder(dtypes.float32)
_, result = functional_ops.While([n, 0.], Cond, Body)
c = constant_op.constant(37.)
self.assertAllEqual(210., sess.run(result, feed_dict={n: 20.}))
self.assertAllEqual(5050., sess.run(result, feed_dict={n: 100.}))
# Test that the result is the same when we run a different subgraph.
self.assertAllEqual(5050.,
sess.run([result, c], feed_dict={n: 100.})[0])
# pylint: disable=cell-var-from-loop
def testWhileCapturedInputs(self):
for use_gpu in (True, False):
with ops.Graph().as_default() as g:
v = variables.Variable(1.0)
def TestCond(n, *args):
del args
return n < 10
@function.Defun(*[dtypes.float32] * 2)
def TestUnary(n, x):
return math_ops.add(n, 1), x + n + v
@function.Defun(*[dtypes.float32] * 3)
def TestBinary(n, x, x2):
return math_ops.add(n, 1), x + n + v, x2 + v
with self.session(graph=g, use_gpu=use_gpu) as sess:
result_unary = functional_ops.While(
[1.0, 0.],
function.Defun(*[dtypes.float32] * 2)(TestCond), TestUnary)
result_binary = functional_ops.While(
[1.0, 0., 0.],
function.Defun(*[dtypes.float32] * 3)(TestCond), TestBinary)
sess.run(variables.global_variables_initializer())
assert len(result_unary) == 2
self.assertEqual([10.0, 54.0], sess.run(result_unary))
assert len(result_binary) == 3
self.assertEqual([10.0, 54.0, 9.0], sess.run(result_binary))
def TestCondCapture(n, *args):
del args
return math_ops.to_float(n) + v < 10
with self.assertRaises(ValueError):
_ = functional_ops.While(
[1],
function.Defun(dtypes.int32)(TestCondCapture),
function.Defun(dtypes.int32, dtypes.float32)(TestUnary))
# pylint: enable=cell-var-from-loop
def _tfSum(self, use_gpu, rewrite_with_while):
with ops.Graph().as_default() as g:
with self.session(graph=g, use_gpu=use_gpu) as sess:
@function.Defun(dtypes.int32, dtypes.float32)
def Body(n, x):
return x + math_ops.to_float(n)
xs = [
# 1 + 2 + ... + 20
functional_ops.For(
1, 21, 1, [0.], Body, rewrite_with_while=rewrite_with_while)[0],
# 100 + 99 + ... + 1
functional_ops.For(
100, 0, -1, [0.], Body, rewrite_with_while=rewrite_with_while)
[0],
]
xvals = sess.run(xs)
self.assertAllEqual(210, xvals[0])
self.assertAllEqual(5050, xvals[1])
def testFor(self):
for use_gpu in (True, False):
self._tfSum(use_gpu, False)
def testForWithWhile(self):
for use_gpu in (True, False):
self._tfSum(use_gpu, True)
def testForWithWhileNaming(self):
g = ops.Graph()
with g.as_default():
@function.Defun(dtypes.int32, dtypes.float32, func_name="TestBody")
def TestBody(n, x):
return x + math_ops.to_float(n)
_ = functional_ops.For(
1, 21, 1, [0.], TestBody, rewrite_with_while=True)[0]
names = []
for func in g.as_graph_def().library.function:
names.append(func.signature.name)
self.assertTrue("TestBody" in names)
self.assertTrue("TestBody_Cond" in names)
self.assertTrue("TestBody_Body" in names)
def testForCapturedInputs(self):
v = variables.Variable(1.0)
@function.Defun(dtypes.int32)
def TestNullary(n):
v + math_ops.to_float(n) # pylint: disable=expression-not-assigned
@function.Defun(dtypes.int32, dtypes.float32)
def TestUnary(n, x):
return x + math_ops.to_float(n) + v
@function.Defun(dtypes.int32, dtypes.float32, dtypes.float32)
def TestBinary(n, x, x2):
return x + math_ops.to_float(n) + v, x2 + v
for rewrite_with_while in (True, False):
use_gpu = not rewrite_with_while
with self.test_session(use_gpu=use_gpu) as sess:
result_nullary = functional_ops.For(
1, 10, 1, [], TestNullary,
rewrite_with_while=rewrite_with_while)
result_unary = functional_ops.For(
1, 10, 1, [0.], TestUnary,
rewrite_with_while=rewrite_with_while)
result_binary = functional_ops.For(
1, 10, 1, [0., 0.], TestBinary,
rewrite_with_while=rewrite_with_while)
sess.run(variables.global_variables_initializer())
assert not result_nullary
# The nullary variant doesn't return anything so we can't easily run it.
# As a total hack, fetch the operation by name and run it.
sess.run(ops.get_default_graph().get_operation_by_name(
"While" if rewrite_with_while else "For"))
assert len(result_unary) == 1
self.assertEqual([54.0], sess.run(result_unary))
assert len(result_binary) == 2
self.assertEqual([54.0, 9.0], sess.run(result_binary))
def _tfMLP(self, xval, wsval, bsval, rewrite_with_while):
# On GPU, don't rewrite using a while loop.
use_gpu = not rewrite_with_while
with self.test_session(use_gpu=use_gpu):
@function.Defun(dtypes.int32, *[dtypes.float64] * 3)
def MLP(i, a, ws, bs):
a = math_ops.tanh(math_ops.matmul(a, ws[i, :]) + bs[i, :])
return a, ws, bs
ret = functional_ops.For(
0,
wsval.shape[0],
1, [xval, wsval, bsval],
MLP,
rewrite_with_while=rewrite_with_while)[0]
return self.evaluate(ret)
def _npMLP(self, xval, wsval, bsval):
for i in range(wsval.shape[0]):
xval = np.tanh(np.dot(xval, wsval[i, :]) + bsval[i, :])
return xval
def _testForMLP(self, rewrite_with_while):
# We construct a 5-layer Multi-Layer Perceptron network here.
# Each layer have the same number of hidden unites (3), and the
# activation function is tanh(). We feed the input (xval) with
# batch size 2.
xval = np.random.normal(size=(2, 3))
wsval = np.random.normal(size=(5, 3, 3))
bsval = np.random.normal(size=(5, 3))
np_ans = self._npMLP(xval, wsval, bsval)
tf_for_ans = self._tfMLP(xval, wsval, bsval, rewrite_with_while)
self.assertAllClose(np_ans, tf_for_ans)
def testForMLP(self):
self._testForMLP(False)
def testForMLPWhile(self):
self._testForMLP(True)
def testForError(self):
@function.Defun(dtypes.int32, dtypes.float32)
def Foo(i, v):
return math_ops.to_float(i) + v
@function.Defun(dtypes.int32, dtypes.float32)
def ReturnsTooManyArgs(unused_i, v):
return v, v
with self.test_session(use_gpu=True):
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"must be a scalar"):
functional_ops.For([0], 10, 1, [0.0], Foo)[0].eval()
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Invalid start/limit/delta"):
functional_ops.For(0, 10, -1, [0.0], Foo)[0].eval()
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"For loop body returned 2 arguments. Expected: 1"):
functional_ops.For(0, 10, 1, [0.0], ReturnsTooManyArgs)[0].eval()
def testGradient(self):
@function.Defun(dtypes.float32)
def Poly(x):
# y = 2x^3+3x^2+4x+8
return 2 * x * x * x + 3 * x * x + 4 * x + 8
@function.Defun(dtypes.float32)
def Grad(x):
# dy/dx = dy/dy * dy/dx = 1.0 * (6x^2+6x+4)
return functional_ops.Gradient([x, 1.0], Poly)[0]
with self.test_session(use_gpu=False) as sess:
a = constant_op.constant(0.)
avals = [Poly(a), Grad(a)]
b = constant_op.constant(1.)
bvals = [Poly(b), Grad(b)]
self.assertAllEqual(sess.run(avals), [8., 4.])
self.assertAllEqual(sess.run(bvals), [17., 16.])
# TODO(akshayka): Replace `function.Defun` with tf.contrib.eager.defun` in the
# below test cases.
class PartitionedCallTest(test.TestCase):
def testBasicSingleDevice(self):
@function.Defun(*[dtypes.float32] * 2)
def Body(x, y):
with ops.device("/cpu:0"):
a = x + x
b = y + y
return a + b
output, = self.evaluate(
functional_ops.partitioned_call(
args=[constant_op.constant(1.),
constant_op.constant(2.)], f=Body))
self.assertEqual(output, 6.)
def testBasicMultiDevice(self):
config = config_pb2.ConfigProto(device_count={"CPU": 3})
@function.Defun(*[dtypes.float32] * 2)
def Body(x, y):
# if x = 1, y = 2, ...
with ops.device("/cpu:0"):
# a:= 1 + 1 = 2
a = x + x
with ops.device("/cpu:1"):
# b:= 2 + 2 = 4
b = a + y
with ops.device("/cpu:2"):
# c:= 2 + 4 = 6
c = a + b
# a + b + c = 2 + 4 + 6 = 12
return a + b + c
with self.test_session(config=config):
output, = functional_ops.partitioned_call(
args=[constant_op.constant(1.),
constant_op.constant(2.)], f=Body)
self.assertEqual(output.eval(), 12.)
def testBasicMultiDeviceGPU(self):
if not test_util.is_gpu_available():
return
@function.Defun(*[dtypes.float32] * 2)
def Body(x, y):
with ops.device("/gpu:0"):
a = x + x
b = y + y
with ops.device("/cpu:0"):
c = a + b
return c
output, = self.evaluate(
functional_ops.partitioned_call(
args=[constant_op.constant(1.),
constant_op.constant(2.)], f=Body))
self.assertEqual(output, 6.)
def testBasicNoDeviceAnnotations(self):
@function.Defun(*[dtypes.float32] * 2)
def Body(x, y):
a = x + x
b = y + y
return a + b
output, = self.evaluate(
functional_ops.partitioned_call(
args=[constant_op.constant(1.),
constant_op.constant(2.)], f=Body))
self.assertEqual(output, 6.)
def testShardsRunOnRequestedDevices(self):
config = config_pb2.ConfigProto(device_count={"CPU": 4})
@function.Defun()
def Body():
# Serialize DT_RESOURCE handles as DT_STRINGs, which encode the device on
# which the resource was created, so that we can verify that ops were
# actually run on the requested devices.
#
# TODO(akshayka): Provide a cleaner, more idiomatic API for obtaining the
# name of the device on which a resource lives / for determining the
# device on which an op ran.
with ops.device("/cpu:0"):
s1 = iterator_ops.Iterator.from_structure(
(dtypes.float32,)).string_handle()
with ops.device("/cpu:1"):
s2 = iterator_ops.Iterator.from_structure(
(dtypes.float32,)).string_handle()
with ops.device("/cpu:2"):
s3 = iterator_ops.Iterator.from_structure(
(dtypes.float32,)).string_handle()
return s1, s2, s3
with self.test_session(config=config, use_gpu=True) as sess:
outputs = sess.run(functional_ops.partitioned_call(args=[], f=Body))
self.assertIn(compat.as_bytes("CPU:0"), outputs[0])
self.assertIn(compat.as_bytes("CPU:1"), outputs[1])
self.assertIn(compat.as_bytes("CPU:2"), outputs[2])
def testAssignAddResourceVariable(self):
v = resource_variable_ops.ResourceVariable(1.0)
@function.Defun()
def AssignAdd():
v.assign_add(1.0)
op = functional_ops.partitioned_call(
args=AssignAdd.captured_inputs, f=AssignAdd)
_ = self.evaluate(variables.global_variables_initializer())
_ = self.evaluate(op)
value = self.evaluate(v.read_value())
self.assertEqual(value, 2.0)
def testFunctionWithResourcesOnDifferentDevices(self):
if not test_util.is_gpu_available():
self.skipTest("No GPUs available.")
with ops.device("/cpu:0"):
v_cpu_zero = resource_variable_ops.ResourceVariable(
[0.0, 1.0, 2.0], name="v_cpu_zero")
with ops.device("/cpu:1"):
v_cpu_one = resource_variable_ops.ResourceVariable(
[0.0, 1.0, 2.0], name="v_cpu_one")
with ops.device("/gpu:0"):
v_gpu = resource_variable_ops.ResourceVariable(
[0.0, 1.0, 2.0], name="v_gpu")
def sum_gather():
cpu_result = math_ops.reduce_sum(array_ops.gather(v_cpu_zero, [1, 2]))
also_cpu_result = math_ops.reduce_sum(array_ops.gather(v_cpu_one, [1, 2]))
gpu_result = math_ops.reduce_sum(array_ops.gather(v_gpu, [1, 2]))
return cpu_result, also_cpu_result, gpu_result
defined = function.Defun()(sum_gather)
with self.test_session(
config=config_pb2.ConfigProto(
allow_soft_placement=False,
log_device_placement=True,
device_count={"CPU": 2})) as sess:
sess.run(variables.global_variables_initializer())
expected = sess.run(sum_gather())
result = sess.run(
functional_ops.partitioned_call(
args=defined.captured_inputs, f=defined))
self.assertAllEqual(expected, result)
# Use an invalid executor name to test the plumbing of the executor_type attr.
def testExecutorTypeAttrExecutorNotFound(self):
@function.Defun(dtypes.int32)
def AddFive(x):
return x + 5
op = functional_ops.partitioned_call(
args=[constant_op.constant([1, 2, 3], dtype=dtypes.int32)],
f=AddFive,
executor_type="NON_EXISTENT_EXECUTOR")
with self.assertRaisesRegexp(errors.NotFoundError,
"NON_EXISTENT_EXECUTOR"):
self.evaluate(op)
if __name__ == "__main__":
test.main()
# pylint: enable=invalid-name
|
|
from distutils.core import setup, Extension
from distutils.sysconfig import get_python_lib
import os, os.path
import sys
if sys.platform == "darwin":
# Don't create resource files on OS X tar.
os.environ['COPY_EXTENDED_ATTRIBUTES_DISABLE'] = 'true'
os.environ['COPYFILE_DISABLE'] = 'true'
setup_args = {}
def add_command_class(name, cls):
cmdclasses = setup_args.get('cmdclass', {})
cmdclasses[name] = cls
setup_args['cmdclass'] = cmdclasses
from distutils.command.sdist import sdist as sdist_orig
class sdist(sdist_orig):
def run(self):
self.force_manifest = 1
if (sys.platform != "win32" and
os.path.isdir('.git')):
assert os.system("git show-ref -s HEAD > .gitrev") == 0
sdist_orig.run(self)
add_command_class('sdist', sdist)
if sys.version_info[0] >= 3:
import lib2to3.refactor
from distutils.command.build_py \
import build_py_2to3 as build_py
# need to convert sources to Py3 on installation
fixers = [ fix for fix in lib2to3.refactor.get_fixers_from_package("lib2to3.fixes")
if fix.split('fix_')[-1] not in ('next',)
]
build_py.fixer_names = fixers
add_command_class("build_py", build_py)
pxd_include_dirs = [
directory for directory, dirs, files in os.walk('Cython/Includes')
if '__init__.pyx' in files or '__init__.pxd' in files
or directory == 'Cython/Includes' or directory == 'Cython/Includes/Deprecated']
pxd_include_patterns = [
p+'/*.pxd' for p in pxd_include_dirs ] + [
p+'/*.pyx' for p in pxd_include_dirs ]
if sys.version_info < (2,4):
install_base_dir = get_python_lib(prefix='')
import glob
patterns = pxd_include_patterns + [
'Cython/Plex/*.pxd',
'Cython/Compiler/*.pxd',
'Cython/Runtime/*.pyx'
]
setup_args['data_files'] = [
(os.path.dirname(os.path.join(install_base_dir, pattern)),
[ f for f in glob.glob(pattern) ])
for pattern in patterns
]
else:
setup_args['package_data'] = {
'Cython.Plex' : ['*.pxd'],
'Cython.Compiler' : ['*.pxd'],
'Cython.Runtime' : ['*.pyx', '*.pxd'],
'Cython.Utility' : ['*.pyx', '*.pxd', '*.c', '*.h', '.cpp'],
'Cython' : [ p[7:] for p in pxd_include_patterns ],
}
# This dict is used for passing extra arguments that are setuptools
# specific to setup
setuptools_extra_args = {}
# tells whether to include cygdb (the script and the Cython.Debugger package
include_debugger = sys.version_info[:2] > (2, 5)
if 'setuptools' in sys.modules:
setuptools_extra_args['zip_safe'] = False
setuptools_extra_args['entry_points'] = {
'console_scripts': [
'cython = Cython.Compiler.Main:setuptools_main',
]
}
scripts = []
else:
if os.name == "posix":
scripts = ["bin/cython"]
if include_debugger:
scripts.append('bin/cygdb')
else:
scripts = ["cython.py"]
if include_debugger:
scripts.append('cygdb.py')
def compile_cython_modules(profile=False, compile_more=False, cython_with_refnanny=False):
source_root = os.path.abspath(os.path.dirname(__file__))
compiled_modules = ["Cython.Plex.Scanners",
"Cython.Plex.Actions",
"Cython.Compiler.Lexicon",
"Cython.Compiler.Scanning",
"Cython.Compiler.Parsing",
"Cython.Compiler.Visitor",
"Cython.Compiler.FlowControl",
"Cython.Compiler.Code",
"Cython.Runtime.refnanny",
]
if compile_more:
compiled_modules.extend([
"Cython.Compiler.ParseTreeTransforms",
"Cython.Compiler.Nodes",
"Cython.Compiler.ExprNodes",
"Cython.Compiler.ModuleNode",
"Cython.Compiler.Optimize",
])
defines = []
if cython_with_refnanny:
defines.append(('CYTHON_REFNANNY', '1'))
extensions = []
if sys.version_info[0] >= 3:
from Cython.Distutils import build_ext as build_ext_orig
for module in compiled_modules:
source_file = os.path.join(source_root, *module.split('.'))
if os.path.exists(source_file + ".py"):
pyx_source_file = source_file + ".py"
else:
pyx_source_file = source_file + ".pyx"
dep_files = []
if os.path.exists(source_file + '.pxd'):
dep_files.append(source_file + '.pxd')
if '.refnanny' in module:
defines_for_module = []
else:
defines_for_module = defines
extensions.append(
Extension(module, sources = [pyx_source_file],
define_macros = defines_for_module,
depends = dep_files)
)
class build_ext(build_ext_orig):
# we must keep the original modules alive to make sure
# their code keeps working when we remove them from
# sys.modules
dead_modules = []
def build_extensions(self):
# add path where 2to3 installed the transformed sources
# and make sure Python (re-)imports them from there
already_imported = [ module for module in sys.modules
if module == 'Cython' or module.startswith('Cython.') ]
keep_alive = self.dead_modules.append
for module in already_imported:
keep_alive(sys.modules[module])
del sys.modules[module]
sys.path.insert(0, os.path.join(source_root, self.build_lib))
if profile:
from Cython.Compiler.Options import directive_defaults
directive_defaults['profile'] = True
print("Enabled profiling for the Cython binary modules")
build_ext_orig.build_extensions(self)
setup_args['ext_modules'] = extensions
add_command_class("build_ext", build_ext)
else: # Python 2.x
from distutils.command.build_ext import build_ext as build_ext_orig
try:
class build_ext(build_ext_orig):
def build_extension(self, ext, *args, **kargs):
try:
build_ext_orig.build_extension(self, ext, *args, **kargs)
except StandardError:
print("Compilation of '%s' failed" % ext.sources[0])
from Cython.Compiler.Main import compile
from Cython import Utils
if profile:
from Cython.Compiler.Options import directive_defaults
directive_defaults['profile'] = True
print("Enabled profiling for the Cython binary modules")
source_root = os.path.dirname(__file__)
for module in compiled_modules:
source_file = os.path.join(source_root, *module.split('.'))
if os.path.exists(source_file + ".py"):
pyx_source_file = source_file + ".py"
else:
pyx_source_file = source_file + ".pyx"
c_source_file = source_file + ".c"
source_is_newer = False
if not os.path.exists(c_source_file):
source_is_newer = True
else:
c_last_modified = Utils.modification_time(c_source_file)
if Utils.file_newer_than(pyx_source_file, c_last_modified):
source_is_newer = True
else:
pxd_source_file = source_file + ".pxd"
if os.path.exists(pxd_source_file) and Utils.file_newer_than(pxd_source_file, c_last_modified):
source_is_newer = True
if source_is_newer:
print("Compiling module %s ..." % module)
result = compile(pyx_source_file)
c_source_file = result.c_file
if c_source_file:
# Py2 distutils can't handle unicode file paths
if isinstance(c_source_file, unicode):
filename_encoding = sys.getfilesystemencoding()
if filename_encoding is None:
filename_encoding = sys.getdefaultencoding()
c_source_file = c_source_file.encode(filename_encoding)
if '.refnanny' in module:
defines_for_module = []
else:
defines_for_module = defines
extensions.append(
Extension(module, sources = [c_source_file],
define_macros = defines_for_module)
)
else:
print("Compilation failed")
if extensions:
setup_args['ext_modules'] = extensions
add_command_class("build_ext", build_ext)
except Exception:
print('''
ERROR: %s
Extension module compilation failed, looks like Cython cannot run
properly on this system. To work around this, pass the option
"--no-cython-compile". This will install a pure Python version of
Cython without compiling its own sources.
''' % sys.exc_info()[1])
raise
cython_profile = '--cython-profile' in sys.argv
if cython_profile:
sys.argv.remove('--cython-profile')
try:
sys.argv.remove("--cython-compile-all")
cython_compile_more = True
except ValueError:
cython_compile_more = False
try:
sys.argv.remove("--cython-with-refnanny")
cython_with_refnanny = True
except ValueError:
cython_with_refnanny = False
try:
sys.argv.remove("--no-cython-compile")
compile_cython_itself = False
except ValueError:
compile_cython_itself = True
if compile_cython_itself:
compile_cython_modules(cython_profile, cython_compile_more, cython_with_refnanny)
setup_args.update(setuptools_extra_args)
from Cython import __version__ as version
packages = [
'Cython',
'Cython.Build',
'Cython.Compiler',
'Cython.Runtime',
'Cython.Distutils',
'Cython.Plex',
'Cython.Tests',
'Cython.Build.Tests',
'Cython.Compiler.Tests',
'Cython.Utility',
'Cython.Tempita',
]
if include_debugger:
packages.append('Cython.Debugger')
packages.append('Cython.Debugger.Tests')
# it's enough to do this for Py2.5+:
setup_args['package_data']['Cython.Debugger.Tests'] = ['codefile', 'cfuncs.c']
setup(
name = 'Cython',
version = version,
url = 'http://www.cython.org',
author = 'Robert Bradshaw, Stefan Behnel, Dag Seljebotn, Greg Ewing, et al.',
author_email = 'cython-devel@python.org',
description = "The Cython compiler for writing C extensions for the Python language.",
long_description = """\
The Cython language makes writing C extensions for the Python language as
easy as Python itself. Cython is a source code translator based on the
well-known Pyrex_, but supports more cutting edge functionality and
optimizations.
The Cython language is very close to the Python language (and most Python
code is also valid Cython code), but Cython additionally supports calling C
functions and declaring C types on variables and class attributes. This
allows the compiler to generate very efficient C code from Cython code.
This makes Cython the ideal language for writing glue code for external C
libraries, and for fast C modules that speed up the execution of Python
code.
.. _Pyrex: http://www.cosc.canterbury.ac.nz/greg.ewing/python/Pyrex/
""",
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Programming Language :: C",
"Programming Language :: Cython",
"Topic :: Software Development :: Code Generators",
"Topic :: Software Development :: Compilers",
"Topic :: Software Development :: Libraries :: Python Modules"
],
scripts = scripts,
packages=packages,
# pyximport
py_modules = ["pyximport/__init__",
"pyximport/pyximport",
"pyximport/pyxbuild",
"cython"],
**setup_args
)
|
|
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from panda3d.core import *
from panda3d.direct import *
from otp.avatar import Avatar
from toontown.hood import ZoneUtil
from toontown.launcher import DownloadForceAcknowledge
from toontown.safezone.SafeZoneLoader import SafeZoneLoader
from toontown.safezone.OZPlayground import OZPlayground
from direct.actor import Actor
from direct.interval.IntervalGlobal import *
import random
from toontown.distributed import DelayDelete
from direct.distributed.ClockDelta import *
from otp.otpbase import OTPGlobals
import copy
from toontown.effects import Bubbles
import random
if (__debug__):
import pdb
class OZSafeZoneLoader(SafeZoneLoader):
def __init__(self, hood, parentFSM, doneEvent):
SafeZoneLoader.__init__(self, hood, parentFSM, doneEvent)
self.musicFile = 'phase_6/audio/bgm/OZ_SZ.ogg'
self.activityMusicFile = 'phase_6/audio/bgm/GS_KartShop.ogg'
self.dnaFile = 'phase_6/dna/outdoor_zone_sz.pdna'
self.safeZoneStorageDNAFile = 'phase_6/dna/storage_OZ_sz.pdna'
self.__toonTracks = {}
del self.fsm
self.fsm = ClassicFSM.ClassicFSM('SafeZoneLoader', [State.State('start', self.enterStart, self.exitStart, ['quietZone', 'playground', 'toonInterior']),
State.State('playground', self.enterPlayground, self.exitPlayground, ['quietZone', 'golfcourse']),
State.State('toonInterior', self.enterToonInterior, self.exitToonInterior, ['quietZone']),
State.State('quietZone', self.enterQuietZone, self.exitQuietZone, ['playground', 'toonInterior', 'golfcourse']),
State.State('golfcourse', self.enterGolfCourse, self.exitGolfCourse, ['quietZone', 'playground']),
State.State('final', self.enterFinal, self.exitFinal, ['start'])], 'start', 'final')
def load(self):
self.done = 0
self.geyserTrack = None
SafeZoneLoader.load(self)
self.birdSound = map(base.loadSfx, ['phase_4/audio/sfx/SZ_TC_bird1.ogg', 'phase_4/audio/sfx/SZ_TC_bird2.ogg', 'phase_4/audio/sfx/SZ_TC_bird3.ogg'])
self.underwaterSound = base.loadSfx('phase_4/audio/sfx/AV_ambient_water.ogg')
self.swimSound = base.loadSfx('phase_4/audio/sfx/AV_swim_single_stroke.ogg')
self.submergeSound = base.loadSfx('phase_5.5/audio/sfx/AV_jump_in_water.ogg')
geyserPlacer = self.geom.find('**/geyser*')
waterfallPlacer = self.geom.find('**/waterfall*')
binMgr = CullBinManager.getGlobalPtr()
binMgr.addBin('water', CullBinManager.BTFixed, 29)
water = self.geom.find('**/water1*')
water.setTransparency(1)
water.setColorScale(1.0, 1.0, 1.0, 1.0)
water.setBin('water', 51, 1)
pool = self.geom.find('**/pPlane5*')
pool.setTransparency(1)
pool.setColorScale(1.0, 1.0, 1.0, 1.0)
pool.setBin('water', 50, 1)
self.geyserModel = loader.loadModel('phase_6/models/golf/golf_geyser_model')
self.geyserSound = loader.loadSfx('phase_6/audio/sfx/OZ_Geyser.ogg')
self.geyserSoundInterval = SoundInterval(self.geyserSound, node=geyserPlacer, listenerNode=base.camera, seamlessLoop=False, volume=1.0, cutOff=120)
self.geyserSoundNoToon = loader.loadSfx('phase_6/audio/sfx/OZ_Geyser_No_Toon.ogg')
self.geyserSoundNoToonInterval = SoundInterval(self.geyserSoundNoToon, node=geyserPlacer, listenerNode=base.camera, seamlessLoop=False, volume=1.0, cutOff=120)
if self.geyserModel:
self.geyserActor = Actor.Actor(self.geyserModel)
self.geyserActor.loadAnims({'idle': 'phase_6/models/golf/golf_geyser'})
self.geyserActor.reparentTo(render)
self.geyserActor.setPlayRate(8.6, 'idle')
self.geyserActor.loop('idle')
self.geyserActor.setDepthWrite(0)
self.geyserActor.setTwoSided(True, 11)
self.geyserActor.setColorScale(1.0, 1.0, 1.0, 1.0)
self.geyserActor.setBin('fixed', 0)
mesh = self.geyserActor.find('**/mesh_tide1')
joint = self.geyserActor.find('**/uvj_WakeWhiteTide1')
mesh.setTexProjector(mesh.findTextureStage('default'), joint, self.geyserActor)
self.geyserActor.setPos(geyserPlacer.getPos())
self.geyserActor.setZ(geyserPlacer.getZ() - 100.0)
self.geyserPos = geyserPlacer.getPos()
self.geyserPlacer = geyserPlacer
self.startGeyser()
base.sfxPlayer.setCutoffDistance(160)
self.geyserPoolSfx = loader.loadSfx('phase_6/audio/sfx/OZ_Geyser_BuildUp_Loop.ogg')
self.geyserPoolSoundInterval = SoundInterval(self.geyserPoolSfx, node=self.geyserPlacer, listenerNode=base.camera, seamlessLoop=True, volume=1.0, cutOff=120)
self.geyserPoolSoundInterval.loop()
self.bubbles = Bubbles.Bubbles(self.geyserPlacer, render)
self.bubbles.renderParent.setDepthWrite(0)
self.bubbles.start()
self.collBase = render.attachNewNode('collisionBase')
self.geyserCollSphere = CollisionSphere(0, 0, 0, 7.5)
self.geyserCollSphere.setTangible(1)
self.geyserCollNode = CollisionNode('barrelSphere')
self.geyserCollNode.setIntoCollideMask(OTPGlobals.WallBitmask)
self.geyserCollNode.addSolid(self.geyserCollSphere)
self.geyserNodePath = self.collBase.attachNewNode(self.geyserCollNode)
self.geyserNodePath.setPos(self.geyserPos[0], self.geyserPos[1], self.geyserPos[2] - 100.0)
self.waterfallModel = loader.loadModel('phase_6/models/golf/golf_waterfall_model')
if self.waterfallModel:
self.waterfallActor = Actor.Actor(self.waterfallModel)
self.waterfallActor.loadAnims({'idle': 'phase_6/models/golf/golf_waterfall'})
self.waterfallActor.reparentTo(render)
self.waterfallActor.setPlayRate(3.5, 'idle')
self.waterfallActor.loop('idle')
mesh = self.waterfallActor.find('**/mesh_tide1')
joint = self.waterfallActor.find('**/uvj_WakeWhiteTide1')
mesh.setTexProjector(mesh.findTextureStage('default'), joint, self.waterfallActor)
self.waterfallActor.setPos(waterfallPlacer.getPos())
self.accept('clientLogout', self._handleLogout)
return
def exit(self):
self.clearToonTracks()
SafeZoneLoader.exit(self)
self.ignore('clientLogout')
def startGeyser(self, task = None):
if hasattr(base.cr, 'DTimer') and base.cr.DTimer:
self.geyserCycleTime = 20.0
useTime = base.cr.DTimer.getTime()
timeToNextGeyser = 20.0 - useTime % 20.0
taskMgr.doMethodLater(timeToNextGeyser, self.doGeyser, 'geyser Task')
else:
taskMgr.doMethodLater(5.0, self.startGeyser, 'start geyser Task')
def doGeyser(self, task = None):
if not self.done:
self.setGeyserAnim()
useTime = base.cr.DTimer.getTime()
timeToNextGeyser = 20.0 - useTime % 20.0
taskMgr.doMethodLater(timeToNextGeyser, self.doGeyser, 'geyser Task')
return task.done
def restoreLocal(self, task = None):
place = base.cr.playGame.getPlace()
if place:
place.fsm.request('walk')
base.localAvatar.setTeleportAvailable(1)
base.localAvatar.collisionsOn()
base.localAvatar.dropShadow.show()
def restoreRemote(self, remoteAv, task = None):
if remoteAv in Avatar.Avatar.ActiveAvatars:
remoteAv.startSmooth()
remoteAv.dropShadow.show()
def setGeyserAnim(self, task = None):
if self.done:
return
maxSize = 0.4 * random.random() + 0.75
time = 1.0
self.geyserTrack = Sequence()
upPos = Vec3(self.geyserPos[0], self.geyserPos[1], self.geyserPos[2])
downPos = Vec3(self.geyserPos[0], self.geyserPos[1], self.geyserPos[2] - 8.0)
avList = copy.copy(Avatar.Avatar.ActiveAvatars)
avList.append(base.localAvatar)
playSound = 0
self.geyserTrack.append(Func(self.doPrint, 'geyser start'))
self.geyserTrack.append(Func(self.geyserNodePath.setPos, self.geyserPos[0], self.geyserPos[1], self.geyserPos[2]))
self.geyserTrack.append(Parallel(LerpScaleInterval(self.geyserActor, 2.0 * time, 0.75, 0.01), LerpPosInterval(self.geyserActor, 2.0 * time, pos=downPos, startPos=downPos)))
self.geyserTrack.append(Parallel(LerpScaleInterval(self.geyserActor, time, maxSize, 0.75), LerpPosInterval(self.geyserActor, time, pos=upPos, startPos=downPos)))
self.geyserTrack.append(Parallel(LerpScaleInterval(self.geyserActor, 2.0 * time, 0.75, maxSize), LerpPosInterval(self.geyserActor, 2.0 * time, pos=downPos, startPos=upPos)))
self.geyserTrack.append(Parallel(LerpScaleInterval(self.geyserActor, time, maxSize, 0.75), LerpPosInterval(self.geyserActor, time, pos=upPos, startPos=downPos)))
self.geyserTrack.append(Parallel(LerpScaleInterval(self.geyserActor, 2.0 * time, 0.75, maxSize), LerpPosInterval(self.geyserActor, 2.0 * time, pos=downPos, startPos=upPos)))
self.geyserTrack.append(Parallel(LerpScaleInterval(self.geyserActor, time, maxSize, 0.75), LerpPosInterval(self.geyserActor, time, pos=upPos, startPos=downPos)))
self.geyserTrack.append(Parallel(LerpScaleInterval(self.geyserActor, 4.0 * time, 0.01, maxSize), LerpPosInterval(self.geyserActor, 4.0 * time, pos=downPos, startPos=upPos)))
self.geyserTrack.append(Func(self.geyserNodePath.setPos, self.geyserPos[0], self.geyserPos[1], self.geyserPos[2] - 100.0))
self.geyserTrack.append(Func(self.doPrint, 'geyser end'))
self.geyserTrack.start()
if playSound:
self.geyserSoundInterval.start()
else:
self.geyserSoundNoToonInterval.start()
def changeCamera(self, newParent, newPos, newHpr):
camera.reparentTo(newParent)
camera.setPosHpr(newPos, newHpr)
def doPrint(self, thing):
return 0
print thing
def unload(self):
del self.birdSound
SafeZoneLoader.unload(self)
self.done = 1
self.collBase.removeNode()
if self.geyserTrack:
self.geyserTrack.finish()
self.geyserTrack = None
self.geyserActor.cleanup()
self.geyserModel.removeNode()
self.waterfallActor.cleanup()
self.waterfallModel.removeNode()
self.bubbles.destroy()
del self.bubbles
self.geyserPoolSoundInterval.finish()
self.geyserPoolSfx.stop()
self.geyserPoolSfx = None
self.geyserPoolSoundInterval = None
self.geyserSoundInterval.finish()
self.geyserSound.stop()
self.geyserSoundInterval = None
self.geyserSound = None
self.geyserSoundNoToonInterval.finish()
self.geyserSoundNoToon.stop()
self.geyserSoundNoToonInterval = None
self.geyserSoundNoToon = None
return
def enterPlayground(self, requestStatus):
self.playgroundClass = OZPlayground
SafeZoneLoader.enterPlayground(self, requestStatus)
def exitPlayground(self):
taskMgr.remove('titleText')
self.hood.hideTitleText()
SafeZoneLoader.exitPlayground(self)
self.playgroundClass = None
return
def handlePlaygroundDone(self):
status = self.place.doneStatus
self.doneStatus = status
messenger.send(self.doneEvent)
def enteringARace(self, status):
if not status['where'] == 'golfcourse':
return 0
if ZoneUtil.isDynamicZone(status['zoneId']):
return status['hoodId'] == self.hood.hoodId
else:
return ZoneUtil.getHoodId(status['zoneId']) == self.hood.hoodId
def enteringAGolfCourse(self, status):
if not status['where'] == 'golfcourse':
return 0
if ZoneUtil.isDynamicZone(status['zoneId']):
return status['hoodId'] == self.hood.hoodId
else:
return ZoneUtil.getHoodId(status['zoneId']) == self.hood.hoodId
def enterGolfCourse(self, requestStatus):
if requestStatus.has_key('curseId'):
self.golfCourseId = requestStatus['courseId']
else:
self.golfCourseId = 0
self.accept('raceOver', self.handleRaceOver)
self.accept('leavingGolf', self.handleLeftGolf)
base.transitions.irisOut(t=0.2)
def exitGolfCourse(self):
del self.golfCourseId
def handleRaceOver(self):
print 'you done!!'
def handleLeftGolf(self):
req = {'loader': 'safeZoneLoader',
'where': 'playground',
'how': 'teleportIn',
'zoneId': 6000,
'hoodId': 6000,
'shardId': None}
self.fsm.request('quietZone', [req])
return
def _handleLogout(self):
self.clearToonTracks()
def storeToonTrack(self, avId, track):
self.clearToonTrack(avId)
self.__toonTracks[avId] = track
def clearToonTrack(self, avId):
oldTrack = self.__toonTracks.get(avId)
if oldTrack:
oldTrack.pause()
DelayDelete.cleanupDelayDeletes(oldTrack)
del self.__toonTracks[avId]
def clearToonTracks(self):
keyList = []
for key in self.__toonTracks:
keyList.append(key)
for key in keyList:
if self.__toonTracks.has_key(key):
self.clearToonTrack(key)
|
|
import json as jsonlib
import pytz
from urlparse import urljoin
from django.conf import settings
from django.core.urlresolvers import reverse
from django.forms import CheckboxInput
from django.template import defaultfilters
from django.utils import translation
from django.utils.encoding import smart_unicode
import commonware.log
import jinja2
from babel.support import Format
from jingo import env, register
# Needed to make sure our own |f filter overrides jingo's one.
from jingo import helpers # noqa
from jingo_minify import helpers as jingo_minify_helpers
from six import text_type
from tower import ugettext as _
from mkt.translations.helpers import truncate
from mkt.translations.utils import get_locale_from_lang
from mkt.site.utils import append_tz, urlparams
log = commonware.log.getLogger('z.mkt.site')
# Registering some utils as filters:
register.filter(urlparams)
@jinja2.contextfunction
@register.function
def css(context, bundle, media=False, debug=None):
if debug is None:
debug = settings.TEMPLATE_DEBUG
# ?debug=true gives you unminified CSS for testing on -dev/prod.
if context['request'].GET.get('debug'):
debug = True
return jingo_minify_helpers.css(bundle, media, debug)
@jinja2.contextfunction
@register.function
def js(context, bundle, debug=None, defer=False, async=False):
if debug is None:
debug = settings.TEMPLATE_DEBUG
# ?debug=true gives you unminified JS for testing on -dev/prod.
if context['request'].GET.get('debug'):
debug = True
return jingo_minify_helpers.js(bundle, debug, defer, async)
@register.function
def no_results():
# This prints a "No results found" message. That's all. Carry on.
t = env.get_template('site/helpers/no_results.html').render()
return jinja2.Markup(t)
@jinja2.contextfunction
@register.function
def market_button(context, product, receipt_type=None, classes=None):
request = context['request']
purchased = False
classes = (classes or []) + ['button', 'product']
reviewer = receipt_type == 'reviewer'
data_attrs = {'manifest_url': product.get_manifest_url(reviewer),
'is_packaged': jsonlib.dumps(product.is_packaged)}
installed = None
if request.user.is_authenticated():
installed_set = request.user.installed_set
installed = installed_set.filter(addon=product).exists()
# Handle premium apps.
if product.has_premium():
# User has purchased app.
purchased = (request.user.is_authenticated() and
product.pk in request.user.purchase_ids())
# App authors are able to install their apps free of charge.
if (not purchased and
request.check_ownership(product, require_author=True)):
purchased = True
if installed or purchased or not product.has_premium():
label = _('Install')
else:
label = product.get_tier_name()
# Free apps and purchased apps get active install buttons.
if not product.is_premium() or purchased:
classes.append('install')
c = dict(product=product, label=label, purchased=purchased,
data_attrs=data_attrs, classes=' '.join(classes))
t = env.get_template('site/helpers/webapp_button.html')
return jinja2.Markup(t.render(c))
def product_as_dict(request, product, purchased=None, receipt_type=None,
src=''):
receipt_url = (reverse('receipt.issue', args=[product.app_slug]) if
receipt_type else product.get_detail_url('record'))
token_url = reverse('generate-reviewer-token', args=[product.app_slug])
src = src or request.GET.get('src', '')
reviewer = receipt_type == 'reviewer'
# This is the only info. we need to render the app buttons on the
# Reviewer Tools pages.
ret = {
'id': product.id,
'name': product.name,
'categories': product.categories,
'manifest_url': product.get_manifest_url(reviewer),
'recordUrl': urlparams(receipt_url, src=src),
'tokenUrl': token_url,
'is_packaged': product.is_packaged,
'src': src
}
if product.premium:
ret.update({
'price': product.get_price(region=request.REGION.id),
'priceLocale': product.get_price_locale(region=request.REGION.id),
})
if request.user.is_authenticated():
ret['isPurchased'] = purchased
# Jinja2 escape everything except this list so that bool is retained
# for the JSON encoding.
wl = ('categories', 'currencies', 'isPurchased', 'is_packaged', 'previews',
'price', 'priceLocale')
return dict([k, jinja2.escape(v) if k not in wl else v]
for k, v in ret.items())
@register.function
@jinja2.contextfunction
def mkt_breadcrumbs(context, product=None, items=None, crumb_size=40,
add_default=True, cls=None):
"""
Wrapper function for ``breadcrumbs``.
**items**
list of [(url, label)] to be inserted after Add-on.
**product**
Adds the App/Add-on name to the end of the trail. If items are
specified then the App/Add-on will be linked.
**add_default**
Prepends trail back to home when True. Default is True.
"""
if add_default:
crumbs = [(reverse('home'), _('Home'))]
else:
crumbs = []
if product:
if items:
url_ = product.get_detail_url()
else:
# The Product is the end of the trail.
url_ = None
crumbs += [(None, _('Apps')), (url_, product.name)]
if items:
crumbs.extend(items)
if len(crumbs) == 1:
crumbs = []
crumbs = [(u, truncate(label, crumb_size)) for (u, label) in crumbs]
t = env.get_template('site/helpers/breadcrumbs.html').render(
{'breadcrumbs': crumbs, 'cls': cls})
return jinja2.Markup(t)
@register.function
def form_field(field, label=None, tag='div', req=None, opt=False, hint=False,
tooltip=False, some_html=False, cc_startswith=None, cc_for=None,
cc_maxlength=None, grid=False, cls=None, validate=False):
attrs = {}
# Add a `required` attribute so we can do form validation.
# TODO(cvan): Write tests for kumar some day.
if validate and field.field.required:
attrs['required'] = ''
c = dict(field=field, label=label or field.label, tag=tag, req=req,
opt=opt, hint=hint, tooltip=tooltip, some_html=some_html,
cc_startswith=cc_startswith, cc_for=cc_for,
cc_maxlength=cc_maxlength, grid=grid, cls=cls, attrs=attrs)
t = env.get_template('site/helpers/simple_field.html').render(c)
return jinja2.Markup(t)
@register.filter
@jinja2.contextfilter
def timelabel(context, time):
t = env.get_template('site/helpers/timelabel.html').render({'time': time})
return jinja2.Markup(t)
@register.function
def mkt_admin_site_links():
return {
'addons': [
('Fake mail', reverse('zadmin.mail')),
],
'users': [
('Configure groups', reverse('admin:access_group_changelist')),
],
'settings': [
('View site settings', reverse('zadmin.settings')),
('Django admin pages', reverse('zadmin.home')),
],
'tools': [
('View request environment', reverse('mkt.env')),
('View elasticsearch settings', reverse('zadmin.elastic')),
('Purge data from memcache', reverse('zadmin.memcache')),
('Generate error', reverse('zadmin.generate-error')),
('Site Status', reverse('mkt.monitor')),
('Force Manifest Re-validation',
reverse('zadmin.manifest_revalidation'))
],
}
@register.function
@jinja2.contextfunction
def get_doc_template(context, template):
lang = getattr(context['request'], 'LANG', 'en-US')
if lang in settings.AMO_LANGUAGES:
try:
template = env.get_template('%s/%s.html' % (template, lang))
except jinja2.TemplateNotFound:
pass
else:
return jinja2.Markup(template.render())
template = env.get_template('%s/en-US.html' % template)
return jinja2.Markup(template.render())
@register.function
@jinja2.contextfunction
def get_doc_path(context, path, extension):
"""
Gets the path to a localizable document in the current language with
fallback to en-US.
"""
lang = getattr(context['request'], 'LANG', 'en-US')
if lang in settings.AMO_LANGUAGES:
try:
localized_file_path = '%s/%s.%s' % (path, lang, extension)
with open(localized_file_path):
return localized_file_path
except IOError:
return '%s/en-US.%s' % (path, extension)
@register.filter
def absolutify(url, site=None):
"""Takes a URL and prepends the SITE_URL"""
if url.startswith('http'):
return url
else:
return urljoin(site or settings.SITE_URL, url)
def _get_format():
lang = translation.get_language()
return Format(get_locale_from_lang(lang))
@register.filter
def babel_datetime(dt, format='medium'):
return _get_format().datetime(dt, format=format) if dt else ''
@register.filter
def babel_date(date, format='medium'):
return _get_format().date(date, format=format) if date else ''
@register.filter
def is_choice_field(value):
try:
return isinstance(value.field.widget, CheckboxInput)
except AttributeError:
pass
@register.filter
def numberfmt(num, format=None):
return _get_format().decimal(num, format)
@register.function
@jinja2.contextfunction
def page_title(context, title):
title = smart_unicode(title)
base_title = _('Firefox Marketplace')
return u'%s | %s' % (title, base_title)
@register.filter
def timesince(time):
if not time:
return u''
ago = defaultfilters.timesince(time)
# L10n: relative time in the past, like '4 days ago'
return _(u'{0} ago').format(ago)
@register.function
def url(viewname, *args, **kwargs):
"""Helper for Django's ``reverse`` in templates."""
host = kwargs.pop('host', '')
src = kwargs.pop('src', '')
url = '%s%s' % (host, reverse(viewname, args=args, kwargs=kwargs))
if src:
url = urlparams(url, src=src)
return url
@register.filter
def impala_paginator(pager):
t = env.get_template('site/impala_paginator.html')
return jinja2.Markup(t.render({'pager': pager}))
@register.filter
def json(s):
return jsonlib.dumps(s)
@register.function
@jinja2.contextfunction
def media(context, url, key='MEDIA_URL'):
"""Get a MEDIA_URL link with a cache buster querystring."""
if 'BUILD_ID' in context:
build = context['BUILD_ID']
else:
if url.endswith('.js'):
build = context['BUILD_ID_JS']
elif url.endswith('.css'):
build = context['BUILD_ID_CSS']
else:
build = context['BUILD_ID_IMG']
return urljoin(context[key], urlparams(url, b=build))
@register.function
@jinja2.contextfunction
def static(context, url):
"""Get a STATIC_URL link with a cache buster querystring."""
return media(context, url, 'STATIC_URL')
@register.filter
def f(string, *args, **kwargs):
"""This overrides jingo.helpers.f to convert input to unicode if needed.
This is needed because of
https://github.com/jbalogh/jingo/pull/54#issuecomment-36728948
"""
if not isinstance(string, text_type):
string = text_type(string)
return string.format(*args, **kwargs)
def strip_controls(s):
"""
Strips control characters from a string.
"""
# Translation table of control characters.
control_trans = dict((n, None) for n in xrange(32) if n not in [10, 13])
rv = unicode(s).translate(control_trans)
return jinja2.Markup(rv) if isinstance(s, jinja2.Markup) else rv
@register.function
@jinja2.contextfunction
def prefer_signin(context):
return 'has_logged_in' in context['request'].COOKIES
@register.filter
def isotime(t):
"""Date/Time format according to ISO 8601"""
if not hasattr(t, 'tzinfo'):
return
return append_tz(t).astimezone(pytz.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
|
|
from datetime import datetime
import re
import numpy as np
import pytest
from pandas._libs.tslib import iNaT
import pandas.core.algorithms as algos
import pandas.util.testing as tm
@pytest.fixture(params=[True, False])
def writeable(request):
return request.param
# Check that take_nd works both with writeable arrays
# (in which case fast typed memory-views implementation)
# and read-only arrays alike.
@pytest.fixture(
params=[
(np.float64, True),
(np.float32, True),
(np.uint64, False),
(np.uint32, False),
(np.uint16, False),
(np.uint8, False),
(np.int64, False),
(np.int32, False),
(np.int16, False),
(np.int8, False),
(np.object_, True),
(np.bool, False),
]
)
def dtype_can_hold_na(request):
return request.param
@pytest.fixture(
params=[
(np.int8, np.int16(127), np.int8),
(np.int8, np.int16(128), np.int16),
(np.int32, 1, np.int32),
(np.int32, 2.0, np.float64),
(np.int32, 3.0 + 4.0j, np.complex128),
(np.int32, True, np.object_),
(np.int32, "", np.object_),
(np.float64, 1, np.float64),
(np.float64, 2.0, np.float64),
(np.float64, 3.0 + 4.0j, np.complex128),
(np.float64, True, np.object_),
(np.float64, "", np.object_),
(np.complex128, 1, np.complex128),
(np.complex128, 2.0, np.complex128),
(np.complex128, 3.0 + 4.0j, np.complex128),
(np.complex128, True, np.object_),
(np.complex128, "", np.object_),
(np.bool_, 1, np.object_),
(np.bool_, 2.0, np.object_),
(np.bool_, 3.0 + 4.0j, np.object_),
(np.bool_, True, np.bool_),
(np.bool_, "", np.object_),
]
)
def dtype_fill_out_dtype(request):
return request.param
class TestTake:
# Standard incompatible fill error.
fill_error = re.compile("Incompatible type for fill_value")
def test_1d_with_out(self, dtype_can_hold_na, writeable):
dtype, can_hold_na = dtype_can_hold_na
data = np.random.randint(0, 2, 4).astype(dtype)
data.flags.writeable = writeable
indexer = [2, 1, 0, 1]
out = np.empty(4, dtype=dtype)
algos.take_1d(data, indexer, out=out)
expected = data.take(indexer)
tm.assert_almost_equal(out, expected)
indexer = [2, 1, 0, -1]
out = np.empty(4, dtype=dtype)
if can_hold_na:
algos.take_1d(data, indexer, out=out)
expected = data.take(indexer)
expected[3] = np.nan
tm.assert_almost_equal(out, expected)
else:
with pytest.raises(TypeError, match=self.fill_error):
algos.take_1d(data, indexer, out=out)
# No Exception otherwise.
data.take(indexer, out=out)
def test_1d_fill_nonna(self, dtype_fill_out_dtype):
dtype, fill_value, out_dtype = dtype_fill_out_dtype
data = np.random.randint(0, 2, 4).astype(dtype)
indexer = [2, 1, 0, -1]
result = algos.take_1d(data, indexer, fill_value=fill_value)
assert (result[[0, 1, 2]] == data[[2, 1, 0]]).all()
assert result[3] == fill_value
assert result.dtype == out_dtype
indexer = [2, 1, 0, 1]
result = algos.take_1d(data, indexer, fill_value=fill_value)
assert (result[[0, 1, 2, 3]] == data[indexer]).all()
assert result.dtype == dtype
def test_2d_with_out(self, dtype_can_hold_na, writeable):
dtype, can_hold_na = dtype_can_hold_na
data = np.random.randint(0, 2, (5, 3)).astype(dtype)
data.flags.writeable = writeable
indexer = [2, 1, 0, 1]
out0 = np.empty((4, 3), dtype=dtype)
out1 = np.empty((5, 4), dtype=dtype)
algos.take_nd(data, indexer, out=out0, axis=0)
algos.take_nd(data, indexer, out=out1, axis=1)
expected0 = data.take(indexer, axis=0)
expected1 = data.take(indexer, axis=1)
tm.assert_almost_equal(out0, expected0)
tm.assert_almost_equal(out1, expected1)
indexer = [2, 1, 0, -1]
out0 = np.empty((4, 3), dtype=dtype)
out1 = np.empty((5, 4), dtype=dtype)
if can_hold_na:
algos.take_nd(data, indexer, out=out0, axis=0)
algos.take_nd(data, indexer, out=out1, axis=1)
expected0 = data.take(indexer, axis=0)
expected1 = data.take(indexer, axis=1)
expected0[3, :] = np.nan
expected1[:, 3] = np.nan
tm.assert_almost_equal(out0, expected0)
tm.assert_almost_equal(out1, expected1)
else:
for i, out in enumerate([out0, out1]):
with pytest.raises(TypeError, match=self.fill_error):
algos.take_nd(data, indexer, out=out, axis=i)
# No Exception otherwise.
data.take(indexer, out=out, axis=i)
def test_2d_fill_nonna(self, dtype_fill_out_dtype):
dtype, fill_value, out_dtype = dtype_fill_out_dtype
data = np.random.randint(0, 2, (5, 3)).astype(dtype)
indexer = [2, 1, 0, -1]
result = algos.take_nd(data, indexer, axis=0, fill_value=fill_value)
assert (result[[0, 1, 2], :] == data[[2, 1, 0], :]).all()
assert (result[3, :] == fill_value).all()
assert result.dtype == out_dtype
result = algos.take_nd(data, indexer, axis=1, fill_value=fill_value)
assert (result[:, [0, 1, 2]] == data[:, [2, 1, 0]]).all()
assert (result[:, 3] == fill_value).all()
assert result.dtype == out_dtype
indexer = [2, 1, 0, 1]
result = algos.take_nd(data, indexer, axis=0, fill_value=fill_value)
assert (result[[0, 1, 2, 3], :] == data[indexer, :]).all()
assert result.dtype == dtype
result = algos.take_nd(data, indexer, axis=1, fill_value=fill_value)
assert (result[:, [0, 1, 2, 3]] == data[:, indexer]).all()
assert result.dtype == dtype
def test_3d_with_out(self, dtype_can_hold_na):
dtype, can_hold_na = dtype_can_hold_na
data = np.random.randint(0, 2, (5, 4, 3)).astype(dtype)
indexer = [2, 1, 0, 1]
out0 = np.empty((4, 4, 3), dtype=dtype)
out1 = np.empty((5, 4, 3), dtype=dtype)
out2 = np.empty((5, 4, 4), dtype=dtype)
algos.take_nd(data, indexer, out=out0, axis=0)
algos.take_nd(data, indexer, out=out1, axis=1)
algos.take_nd(data, indexer, out=out2, axis=2)
expected0 = data.take(indexer, axis=0)
expected1 = data.take(indexer, axis=1)
expected2 = data.take(indexer, axis=2)
tm.assert_almost_equal(out0, expected0)
tm.assert_almost_equal(out1, expected1)
tm.assert_almost_equal(out2, expected2)
indexer = [2, 1, 0, -1]
out0 = np.empty((4, 4, 3), dtype=dtype)
out1 = np.empty((5, 4, 3), dtype=dtype)
out2 = np.empty((5, 4, 4), dtype=dtype)
if can_hold_na:
algos.take_nd(data, indexer, out=out0, axis=0)
algos.take_nd(data, indexer, out=out1, axis=1)
algos.take_nd(data, indexer, out=out2, axis=2)
expected0 = data.take(indexer, axis=0)
expected1 = data.take(indexer, axis=1)
expected2 = data.take(indexer, axis=2)
expected0[3, :, :] = np.nan
expected1[:, 3, :] = np.nan
expected2[:, :, 3] = np.nan
tm.assert_almost_equal(out0, expected0)
tm.assert_almost_equal(out1, expected1)
tm.assert_almost_equal(out2, expected2)
else:
for i, out in enumerate([out0, out1, out2]):
with pytest.raises(TypeError, match=self.fill_error):
algos.take_nd(data, indexer, out=out, axis=i)
# No Exception otherwise.
data.take(indexer, out=out, axis=i)
def test_3d_fill_nonna(self, dtype_fill_out_dtype):
dtype, fill_value, out_dtype = dtype_fill_out_dtype
data = np.random.randint(0, 2, (5, 4, 3)).astype(dtype)
indexer = [2, 1, 0, -1]
result = algos.take_nd(data, indexer, axis=0, fill_value=fill_value)
assert (result[[0, 1, 2], :, :] == data[[2, 1, 0], :, :]).all()
assert (result[3, :, :] == fill_value).all()
assert result.dtype == out_dtype
result = algos.take_nd(data, indexer, axis=1, fill_value=fill_value)
assert (result[:, [0, 1, 2], :] == data[:, [2, 1, 0], :]).all()
assert (result[:, 3, :] == fill_value).all()
assert result.dtype == out_dtype
result = algos.take_nd(data, indexer, axis=2, fill_value=fill_value)
assert (result[:, :, [0, 1, 2]] == data[:, :, [2, 1, 0]]).all()
assert (result[:, :, 3] == fill_value).all()
assert result.dtype == out_dtype
indexer = [2, 1, 0, 1]
result = algos.take_nd(data, indexer, axis=0, fill_value=fill_value)
assert (result[[0, 1, 2, 3], :, :] == data[indexer, :, :]).all()
assert result.dtype == dtype
result = algos.take_nd(data, indexer, axis=1, fill_value=fill_value)
assert (result[:, [0, 1, 2, 3], :] == data[:, indexer, :]).all()
assert result.dtype == dtype
result = algos.take_nd(data, indexer, axis=2, fill_value=fill_value)
assert (result[:, :, [0, 1, 2, 3]] == data[:, :, indexer]).all()
assert result.dtype == dtype
def test_1d_other_dtypes(self):
arr = np.random.randn(10).astype(np.float32)
indexer = [1, 2, 3, -1]
result = algos.take_1d(arr, indexer)
expected = arr.take(indexer)
expected[-1] = np.nan
tm.assert_almost_equal(result, expected)
def test_2d_other_dtypes(self):
arr = np.random.randn(10, 5).astype(np.float32)
indexer = [1, 2, 3, -1]
# axis=0
result = algos.take_nd(arr, indexer, axis=0)
expected = arr.take(indexer, axis=0)
expected[-1] = np.nan
tm.assert_almost_equal(result, expected)
# axis=1
result = algos.take_nd(arr, indexer, axis=1)
expected = arr.take(indexer, axis=1)
expected[:, -1] = np.nan
tm.assert_almost_equal(result, expected)
def test_1d_bool(self):
arr = np.array([0, 1, 0], dtype=bool)
result = algos.take_1d(arr, [0, 2, 2, 1])
expected = arr.take([0, 2, 2, 1])
tm.assert_numpy_array_equal(result, expected)
result = algos.take_1d(arr, [0, 2, -1])
assert result.dtype == np.object_
def test_2d_bool(self):
arr = np.array([[0, 1, 0], [1, 0, 1], [0, 1, 1]], dtype=bool)
result = algos.take_nd(arr, [0, 2, 2, 1])
expected = arr.take([0, 2, 2, 1], axis=0)
tm.assert_numpy_array_equal(result, expected)
result = algos.take_nd(arr, [0, 2, 2, 1], axis=1)
expected = arr.take([0, 2, 2, 1], axis=1)
tm.assert_numpy_array_equal(result, expected)
result = algos.take_nd(arr, [0, 2, -1])
assert result.dtype == np.object_
def test_2d_float32(self):
arr = np.random.randn(4, 3).astype(np.float32)
indexer = [0, 2, -1, 1, -1]
# axis=0
result = algos.take_nd(arr, indexer, axis=0)
result2 = np.empty_like(result)
algos.take_nd(arr, indexer, axis=0, out=result2)
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=0)
expected[[2, 4], :] = np.nan
tm.assert_almost_equal(result, expected)
# this now accepts a float32! # test with float64 out buffer
out = np.empty((len(indexer), arr.shape[1]), dtype="float32")
algos.take_nd(arr, indexer, out=out) # it works!
# axis=1
result = algos.take_nd(arr, indexer, axis=1)
result2 = np.empty_like(result)
algos.take_nd(arr, indexer, axis=1, out=result2)
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=1)
expected[:, [2, 4]] = np.nan
tm.assert_almost_equal(result, expected)
def test_2d_datetime64(self):
# 2005/01/01 - 2006/01/01
arr = np.random.randint(11045376, 11360736, (5, 3)) * 100000000000
arr = arr.view(dtype="datetime64[ns]")
indexer = [0, 2, -1, 1, -1]
# axis=0
result = algos.take_nd(arr, indexer, axis=0)
result2 = np.empty_like(result)
algos.take_nd(arr, indexer, axis=0, out=result2)
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=0)
expected.view(np.int64)[[2, 4], :] = iNaT
tm.assert_almost_equal(result, expected)
result = algos.take_nd(arr, indexer, axis=0, fill_value=datetime(2007, 1, 1))
result2 = np.empty_like(result)
algos.take_nd(
arr, indexer, out=result2, axis=0, fill_value=datetime(2007, 1, 1)
)
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=0)
expected[[2, 4], :] = datetime(2007, 1, 1)
tm.assert_almost_equal(result, expected)
# axis=1
result = algos.take_nd(arr, indexer, axis=1)
result2 = np.empty_like(result)
algos.take_nd(arr, indexer, axis=1, out=result2)
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=1)
expected.view(np.int64)[:, [2, 4]] = iNaT
tm.assert_almost_equal(result, expected)
result = algos.take_nd(arr, indexer, axis=1, fill_value=datetime(2007, 1, 1))
result2 = np.empty_like(result)
algos.take_nd(
arr, indexer, out=result2, axis=1, fill_value=datetime(2007, 1, 1)
)
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=1)
expected[:, [2, 4]] = datetime(2007, 1, 1)
tm.assert_almost_equal(result, expected)
def test_take_axis_0(self):
arr = np.arange(12).reshape(4, 3)
result = algos.take(arr, [0, -1])
expected = np.array([[0, 1, 2], [9, 10, 11]])
tm.assert_numpy_array_equal(result, expected)
# allow_fill=True
result = algos.take(arr, [0, -1], allow_fill=True, fill_value=0)
expected = np.array([[0, 1, 2], [0, 0, 0]])
tm.assert_numpy_array_equal(result, expected)
def test_take_axis_1(self):
arr = np.arange(12).reshape(4, 3)
result = algos.take(arr, [0, -1], axis=1)
expected = np.array([[0, 2], [3, 5], [6, 8], [9, 11]])
tm.assert_numpy_array_equal(result, expected)
# allow_fill=True
result = algos.take(arr, [0, -1], axis=1, allow_fill=True, fill_value=0)
expected = np.array([[0, 0], [3, 0], [6, 0], [9, 0]])
tm.assert_numpy_array_equal(result, expected)
# GH#26976 make sure we validate along the correct axis
with pytest.raises(IndexError, match="indices are out-of-bounds"):
algos.take(arr, [0, 3], axis=1, allow_fill=True, fill_value=0)
class TestExtensionTake:
# The take method found in pd.api.extensions
def test_bounds_check_large(self):
arr = np.array([1, 2])
with pytest.raises(IndexError):
algos.take(arr, [2, 3], allow_fill=True)
with pytest.raises(IndexError):
algos.take(arr, [2, 3], allow_fill=False)
def test_bounds_check_small(self):
arr = np.array([1, 2, 3], dtype=np.int64)
indexer = [0, -1, -2]
with pytest.raises(ValueError):
algos.take(arr, indexer, allow_fill=True)
result = algos.take(arr, indexer)
expected = np.array([1, 3, 2], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("allow_fill", [True, False])
def test_take_empty(self, allow_fill):
arr = np.array([], dtype=np.int64)
# empty take is ok
result = algos.take(arr, [], allow_fill=allow_fill)
tm.assert_numpy_array_equal(arr, result)
with pytest.raises(IndexError):
algos.take(arr, [0], allow_fill=allow_fill)
def test_take_na_empty(self):
result = algos.take(np.array([]), [-1, -1], allow_fill=True, fill_value=0.0)
expected = np.array([0.0, 0.0])
tm.assert_numpy_array_equal(result, expected)
def test_take_coerces_list(self):
arr = [1, 2, 3]
result = algos.take(arr, [0, 0])
expected = np.array([1, 1])
tm.assert_numpy_array_equal(result, expected)
|
|
import sys
sys.path.insert(1, "../../")
import h2o, tests
def metric_accessors():
cars = h2o.import_file(path=h2o.locate("smalldata/junit/cars_20mpg.csv"))
r = cars[0].runif()
train = cars[r > .2]
valid = cars[r <= .2]
# regression
response_col = "economy"
distribution = "gaussian"
predictors = ["displacement","power","weight","acceleration","year"]
gbm = h2o.gbm(y=train[response_col],
x=train[predictors],
validation_y=valid[response_col],
validation_x=valid[predictors],
nfolds=3,
distribution=distribution,
fold_assignment="Random")
# mse
mse1 = gbm.mse(train=True, valid=False, xval=False)
assert isinstance(mse1, float)
mse2 = gbm.mse(train=False, valid=True, xval=False)
assert isinstance(mse2, float)
mse3 = gbm.mse(train=False, valid=False, xval=True)
assert isinstance(mse3, float)
mse = gbm.mse(train=True, valid=True, xval=False)
assert "train" in mse.keys() and "valid" in mse.keys(), "expected training and validation metrics to be returned, but got {0}".format(mse.keys())
assert len(mse) == 2, "expected only training and validation metrics to be returned, but got {0}".format(mse.keys())
assert isinstance(mse["train"], float) and isinstance(mse["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(mse["train"]), type(mse["valid"]))
assert mse["valid"] == mse2
mse = gbm.mse(train=True, valid=False, xval=True)
assert "train" in mse.keys() and "xval" in mse.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert len(mse) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert isinstance(mse["train"], float) and isinstance(mse["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(mse["train"]), type(mse["xval"]))
assert mse["xval"] == mse3
mse = gbm.mse(train=True, valid=True, xval=True)
assert "train" in mse.keys() and "valid" in mse.keys() and "xval" in mse.keys(), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert len(mse) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert isinstance(mse["train"], float) and isinstance(mse["valid"], float) and isinstance(mse["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(mse["train"]), type(mse["valid"]), type(mse["xval"]))
mse = gbm.mse(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(mse, float)
assert mse == mse1
mse = gbm.mse(train=False, valid=True, xval=True)
assert "valid" in mse.keys() and "xval" in mse.keys(), "expected validation and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert len(mse) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert isinstance(mse["valid"], float) and isinstance(mse["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(mse["valid"]), type(mse["xval"]))
# r2
r21 = gbm.r2(train=True, valid=False, xval=False)
assert isinstance(r21, float)
r22 = gbm.r2(train=False, valid=True, xval=False)
assert isinstance(r22, float)
r23 = gbm.r2(train=False, valid=False, xval=True)
assert isinstance(r23, float)
r2 = gbm.r2(train=True, valid=True, xval=False)
assert "train" in r2.keys() and "valid" in r2.keys(), "expected training and validation metrics to be returned, but got {0}".format(r2.keys())
assert len(r2) == 2, "expected only training and validation metrics to be returned, but got {0}".format(r2.keys())
assert isinstance(r2["train"], float) and isinstance(r2["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(r2["train"]), type(r2["valid"]))
assert r2["valid"] == r22
r2 = gbm.r2(train=True, valid=False, xval=True)
assert "train" in r2.keys() and "xval" in r2.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(r2.keys())
assert len(r2) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(r2.keys())
assert isinstance(r2["train"], float) and isinstance(r2["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(r2["train"]), type(r2["xval"]))
assert r2["xval"] == r23
r2 = gbm.r2(train=True, valid=True, xval=True)
assert "train" in r2.keys() and "valid" in r2.keys() and "xval" in r2.keys(), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(r2.keys())
assert len(r2) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(r2.keys())
assert isinstance(r2["train"], float) and isinstance(r2["valid"], float) and isinstance(r2["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(r2["train"]), type(r2["valid"]), type(r2["xval"]))
r2 = gbm.r2(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(r2, float)
assert r2 == r21
r2 = gbm.r2(train=False, valid=True, xval=True)
assert "valid" in r2.keys() and "xval" in r2.keys(), "expected validation and cross validation metrics to be returned, but got {0}".format(r2.keys())
assert len(r2) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(r2.keys())
assert isinstance(r2["valid"], float) and isinstance(r2["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(r2["valid"]), type(r2["xval"]))
# mean_residual_deviance
mean_residual_deviance1 = gbm.mean_residual_deviance(train=True, valid=False, xval=False)
assert isinstance(mean_residual_deviance1, float)
mean_residual_deviance2 = gbm.mean_residual_deviance(train=False, valid=True, xval=False)
assert isinstance(mean_residual_deviance2, float)
mean_residual_deviance3 = gbm.mean_residual_deviance(train=False, valid=False, xval=True)
assert isinstance(mean_residual_deviance3, float)
mean_residual_deviance = gbm.mean_residual_deviance(train=True, valid=True, xval=False)
assert "train" in mean_residual_deviance.keys() and "valid" in mean_residual_deviance.keys(), "expected training and validation metrics to be returned, but got {0}".format(mean_residual_deviance.keys())
assert len(mean_residual_deviance) == 2, "expected only training and validation metrics to be returned, but got {0}".format(mean_residual_deviance.keys())
assert isinstance(mean_residual_deviance["train"], float) and isinstance(mean_residual_deviance["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(mean_residual_deviance["train"]), type(mean_residual_deviance["valid"]))
assert mean_residual_deviance["valid"] == mean_residual_deviance2
mean_residual_deviance = gbm.mean_residual_deviance(train=True, valid=False, xval=True)
assert "train" in mean_residual_deviance.keys() and "xval" in mean_residual_deviance.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(mean_residual_deviance.keys())
assert len(mean_residual_deviance) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(mean_residual_deviance.keys())
assert isinstance(mean_residual_deviance["train"], float) and isinstance(mean_residual_deviance["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(mean_residual_deviance["train"]), type(mean_residual_deviance["xval"]))
assert mean_residual_deviance["xval"] == mean_residual_deviance3
mean_residual_deviance = gbm.mean_residual_deviance(train=True, valid=True, xval=True)
assert "train" in mean_residual_deviance.keys() and "valid" in mean_residual_deviance.keys() and "xval" in mean_residual_deviance.keys(), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(mean_residual_deviance.keys())
assert len(mean_residual_deviance) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(mean_residual_deviance.keys())
assert isinstance(mean_residual_deviance["train"], float) and isinstance(mean_residual_deviance["valid"], float) and isinstance(mean_residual_deviance["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(mean_residual_deviance["train"]), type(mean_residual_deviance["valid"]), type(mean_residual_deviance["xval"]))
mean_residual_deviance = gbm.mean_residual_deviance(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(mean_residual_deviance, float)
assert mean_residual_deviance == mean_residual_deviance1
mean_residual_deviance = gbm.mean_residual_deviance(train=False, valid=True, xval=True)
assert "valid" in mean_residual_deviance.keys() and "xval" in mean_residual_deviance.keys(), "expected validation and cross validation metrics to be returned, but got {0}".format(mean_residual_deviance.keys())
assert len(mean_residual_deviance) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(mean_residual_deviance.keys())
assert isinstance(mean_residual_deviance["valid"], float) and isinstance(mean_residual_deviance["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(mean_residual_deviance["valid"]), type(mean_residual_deviance["xval"]))
# binomial
cars = h2o.import_file(path=h2o.locate("smalldata/junit/cars_20mpg.csv"))
cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
r = cars[0].runif()
train = cars[r > .2]
valid = cars[r <= .2]
response_col = "economy_20mpg"
distribution = "bernoulli"
predictors = ["displacement","power","weight","acceleration","year"]
gbm = h2o.gbm(y=train[response_col], x=train[predictors], validation_y=valid[response_col], validation_x=valid[predictors], nfolds=3, distribution=distribution, fold_assignment="Random")
# auc
auc1 = gbm.auc(train=True, valid=False, xval=False)
assert isinstance(auc1, float)
auc2 = gbm.auc(train=False, valid=True, xval=False)
assert isinstance(auc2, float)
auc3 = gbm.auc(train=False, valid=False, xval=True)
assert isinstance(auc3, float)
auc = gbm.auc(train=True, valid=True, xval=False)
assert "train" in auc.keys() and "valid" in auc.keys(), "expected training and validation metrics to be returned, but got {0}".format(auc.keys())
assert len(auc) == 2, "expected only training and validation metrics to be returned, but got {0}".format(auc.keys())
assert isinstance(auc["train"], float) and isinstance(auc["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(auc["train"]), type(auc["valid"]))
assert auc["valid"] == auc2
auc = gbm.auc(train=True, valid=False, xval=True)
assert "train" in auc.keys() and "xval" in auc.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(auc.keys())
assert len(auc) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(auc.keys())
assert isinstance(auc["train"], float) and isinstance(auc["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(auc["train"]), type(auc["xval"]))
assert auc["xval"] == auc3
auc = gbm.auc(train=True, valid=True, xval=True)
assert "train" in auc.keys() and "valid" in auc.keys() and "xval" in auc.keys(), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(auc.keys())
assert len(auc) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(auc.keys())
assert isinstance(auc["train"], float) and isinstance(auc["valid"], float) and isinstance(auc["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(auc["train"]), type(auc["valid"]), type(auc["xval"]))
auc = gbm.auc(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(auc, float)
assert auc == auc1
auc = gbm.auc(train=False, valid=True, xval=True)
assert "valid" in auc.keys() and "xval" in auc.keys(), "expected validation and cross validation metrics to be returned, but got {0}".format(auc.keys())
assert len(auc) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(auc.keys())
assert isinstance(auc["valid"], float) and isinstance(auc["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(auc["valid"]), type(auc["xval"]))
# roc
(fprs1, tprs1) = gbm.roc(train=True, valid=False, xval=False)
assert isinstance(fprs1, list)
assert isinstance(tprs1, list)
(fprs2, tprs2) = gbm.roc(train=False, valid=True, xval=False)
assert isinstance(fprs2, list)
assert isinstance(tprs2, list)
(fprs3, tprs3) = gbm.roc(train=False, valid=False, xval=True)
assert isinstance(fprs3, list)
assert isinstance(tprs3, list)
roc = gbm.roc(train=True, valid=True, xval=False)
assert "train" in roc.keys() and "valid" in roc.keys(), "expected training and validation metrics to be returned, but got {0}".format(roc.keys())
assert len(roc) == 2, "expected only training and validation metrics to be returned, but got {0}".format(roc.keys())
assert isinstance(roc["train"], tuple) and isinstance(roc["valid"], tuple), "expected training and validation metrics to be tuples, but got {0} and {1}".format(type(roc["train"]), type(roc["valid"]))
assert roc["valid"][0] == fprs2
assert roc["valid"][1] == tprs2
roc = gbm.roc(train=True, valid=False, xval=True)
assert "train" in roc.keys() and "xval" in roc.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(roc.keys())
assert len(roc) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(roc.keys())
assert isinstance(roc["train"], tuple) and isinstance(roc["xval"], tuple), "expected training and cross validation metrics to be tuples, but got {0} and {1}".format(type(roc["train"]), type(roc["xval"]))
assert roc["xval"][0] == fprs3
assert roc["xval"][1] == tprs3
roc = gbm.roc(train=True, valid=True, xval=True)
assert "train" in roc.keys() and "valid" in roc.keys() and "xval" in roc.keys(), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(roc.keys())
assert len(roc) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(roc.keys())
assert isinstance(roc["train"], tuple) and isinstance(roc["valid"], tuple) and isinstance(roc["xval"], tuple), "expected training, validation, and cross validation metrics to be tuples, but got {0}, {1}, and {2}".format(type(roc["train"]), type(roc["valid"]), type(roc["xval"]))
(fprs, tprs) = gbm.roc(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(fprs, list)
assert isinstance(tprs, list)
assert fprs == fprs1
assert tprs == tprs1
roc = gbm.roc(train=False, valid=True, xval=True)
assert "valid" in roc.keys() and "xval" in roc.keys(), "expected validation and cross validation metrics to be returned, but got {0}".format(roc.keys())
assert len(roc) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(roc.keys())
assert isinstance(roc["valid"], tuple) and isinstance(roc["xval"], tuple), "validation and cross validation metrics to be tuples, but got {0} and {1}".format(type(roc["valid"]), type(roc["xval"]))
# logloss
logloss1 = gbm.logloss(train=True, valid=False, xval=False)
assert isinstance(logloss1, float)
logloss2 = gbm.logloss(train=False, valid=True, xval=False)
assert isinstance(logloss2, float)
logloss3 = gbm.logloss(train=False, valid=False, xval=True)
assert isinstance(logloss3, float)
logloss = gbm.logloss(train=True, valid=True, xval=False)
assert "train" in logloss.keys() and "valid" in logloss.keys(), "expected training and validation metrics to be returned, but got {0}".format(logloss.keys())
assert len(logloss) == 2, "expected only training and validation metrics to be returned, but got {0}".format(logloss.keys())
assert isinstance(logloss["train"], float) and isinstance(logloss["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(logloss["train"]), type(logloss["valid"]))
assert logloss["valid"] == logloss2
logloss = gbm.logloss(train=True, valid=False, xval=True)
assert "train" in logloss.keys() and "xval" in logloss.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert len(logloss) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert isinstance(logloss["train"], float) and isinstance(logloss["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(logloss["train"]), type(logloss["xval"]))
assert logloss["xval"] == logloss3
logloss = gbm.logloss(train=True, valid=True, xval=True)
assert "train" in logloss.keys() and "valid" in logloss.keys() and "xval" in logloss.keys(), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert len(logloss) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert isinstance(logloss["train"], float) and isinstance(logloss["valid"], float) and isinstance(logloss["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(logloss["train"]), type(logloss["valid"]), type(logloss["xval"]))
logloss = gbm.logloss(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(logloss, float)
assert logloss == logloss1
logloss = gbm.logloss(train=False, valid=True, xval=True)
assert "valid" in logloss.keys() and "xval" in logloss.keys(), "expected validation and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert len(logloss) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert isinstance(logloss["valid"], float) and isinstance(logloss["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(logloss["valid"]), type(logloss["xval"]))
# giniCoef
giniCoef1 = gbm.giniCoef(train=True, valid=False, xval=False)
assert isinstance(giniCoef1, float)
giniCoef2 = gbm.giniCoef(train=False, valid=True, xval=False)
assert isinstance(giniCoef2, float)
giniCoef3 = gbm.giniCoef(train=False, valid=False, xval=True)
assert isinstance(giniCoef3, float)
giniCoef = gbm.giniCoef(train=True, valid=True, xval=False)
assert "train" in giniCoef.keys() and "valid" in giniCoef.keys(), "expected training and validation metrics to be returned, but got {0}".format(giniCoef.keys())
assert len(giniCoef) == 2, "expected only training and validation metrics to be returned, but got {0}".format(giniCoef.keys())
assert isinstance(giniCoef["train"], float) and isinstance(giniCoef["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(giniCoef["train"]), type(giniCoef["valid"]))
assert giniCoef["valid"] == giniCoef2
giniCoef = gbm.giniCoef(train=True, valid=False, xval=True)
assert "train" in giniCoef.keys() and "xval" in giniCoef.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(giniCoef.keys())
assert len(giniCoef) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(giniCoef.keys())
assert isinstance(giniCoef["train"], float) and isinstance(giniCoef["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(giniCoef["train"]), type(giniCoef["xval"]))
assert giniCoef["xval"] == giniCoef3
giniCoef = gbm.giniCoef(train=True, valid=True, xval=True)
assert "train" in giniCoef.keys() and "valid" in giniCoef.keys() and "xval" in giniCoef.keys(), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(giniCoef.keys())
assert len(giniCoef) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(giniCoef.keys())
assert isinstance(giniCoef["train"], float) and isinstance(giniCoef["valid"], float) and isinstance(giniCoef["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(giniCoef["train"]), type(giniCoef["valid"]), type(giniCoef["xval"]))
giniCoef = gbm.giniCoef(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(giniCoef, float)
assert giniCoef == giniCoef1
giniCoef = gbm.giniCoef(train=False, valid=True, xval=True)
assert "valid" in giniCoef.keys() and "xval" in giniCoef.keys(), "expected validation and cross validation metrics to be returned, but got {0}".format(giniCoef.keys())
assert len(giniCoef) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(giniCoef.keys())
assert isinstance(giniCoef["valid"], float) and isinstance(giniCoef["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(giniCoef["valid"]), type(giniCoef["xval"]))
# F1
F11 = gbm.F1(train=True, valid=False, xval=False)
F12 = gbm.F1(train=False, valid=True, xval=False)
F13 = gbm.F1(train=False, valid=False, xval=True)
F1 = gbm.F1(train=True, valid=True, xval=False)
F1 = gbm.F1(train=True, valid=False, xval=True)
F1 = gbm.F1(train=True, valid=True, xval=True)
F1 = gbm.F1(train=False, valid=False, xval=False) # default: return training metrics
F1 = gbm.F1(train=False, valid=True, xval=True)
# F0point5
F0point51 = gbm.F0point5(train=True, valid=False, xval=False)
F0point52 = gbm.F0point5(train=False, valid=True, xval=False)
F0point53 = gbm.F0point5(train=False, valid=False, xval=True)
F0point5 = gbm.F0point5(train=True, valid=True, xval=False)
F0point5 = gbm.F0point5(train=True, valid=False, xval=True)
F0point5 = gbm.F0point5(train=True, valid=True, xval=True)
F0point5 = gbm.F0point5(train=False, valid=False, xval=False) # default: return training metrics
F0point5 = gbm.F0point5(train=False, valid=True, xval=True)
# F2
F21 = gbm.F2(train=True, valid=False, xval=False)
F22 = gbm.F2(train=False, valid=True, xval=False)
F23 = gbm.F2(train=False, valid=False, xval=True)
F2 = gbm.F2(train=True, valid=True, xval=False)
F2 = gbm.F2(train=True, valid=False, xval=True)
F2 = gbm.F2(train=True, valid=True, xval=True)
F2 = gbm.F2(train=False, valid=False, xval=False) # default: return training metrics
F2 = gbm.F2(train=False, valid=True, xval=True)
# accuracy
accuracy1 = gbm.accuracy(train=True, valid=False, xval=False)
accuracy2 = gbm.accuracy(train=False, valid=True, xval=False)
accuracy3 = gbm.accuracy(train=False, valid=False, xval=True)
accuracy = gbm.accuracy(train=True, valid=True, xval=False)
accuracy = gbm.accuracy(train=True, valid=False, xval=True)
accuracy = gbm.accuracy(train=True, valid=True, xval=True)
accuracy = gbm.accuracy(train=False, valid=False, xval=False) # default: return training metrics
accuracy = gbm.accuracy(train=False, valid=True, xval=True)
# error
error1 = gbm.error(train=True, valid=False, xval=False)
error2 = gbm.error(train=False, valid=True, xval=False)
error3 = gbm.error(train=False, valid=False, xval=True)
error = gbm.error(train=True, valid=True, xval=False)
error = gbm.error(train=True, valid=False, xval=True)
error = gbm.error(train=True, valid=True, xval=True)
error = gbm.error(train=False, valid=False, xval=False) # default: return training metrics
error = gbm.error(train=False, valid=True, xval=True)
# precision
precision1 = gbm.precision(train=True, valid=False, xval=False)
precision2 = gbm.precision(train=False, valid=True, xval=False)
precision3 = gbm.precision(train=False, valid=False, xval=True)
precision = gbm.precision(train=True, valid=True, xval=False)
precision = gbm.precision(train=True, valid=False, xval=True)
precision = gbm.precision(train=True, valid=True, xval=True)
precision = gbm.precision(train=False, valid=False, xval=False) # default: return training metrics
precision = gbm.precision(train=False, valid=True, xval=True)
# mcc
mcc1 = gbm.mcc(train=True, valid=False, xval=False)
mcc2 = gbm.mcc(train=False, valid=True, xval=False)
mcc3 = gbm.mcc(train=False, valid=False, xval=True)
mcc = gbm.mcc(train=True, valid=True, xval=False)
mcc = gbm.mcc(train=True, valid=False, xval=True)
mcc = gbm.mcc(train=True, valid=True, xval=True)
mcc = gbm.mcc(train=False, valid=False, xval=False) # default: return training metrics
mcc = gbm.mcc(train=False, valid=True, xval=True)
# max_per_class_error
max_per_class_error1 = gbm.max_per_class_error(train=True, valid=False, xval=False)
max_per_class_error2 = gbm.max_per_class_error(train=False, valid=True, xval=False)
max_per_class_error3 = gbm.max_per_class_error(train=False, valid=False, xval=True)
max_per_class_error = gbm.max_per_class_error(train=True, valid=True, xval=False)
max_per_class_error = gbm.max_per_class_error(train=True, valid=False, xval=True)
max_per_class_error = gbm.max_per_class_error(train=True, valid=True, xval=True)
max_per_class_error = gbm.max_per_class_error(train=False, valid=False, xval=False) # default: return training metrics
max_per_class_error = gbm.max_per_class_error(train=False, valid=True, xval=True)
# confusion_matrix
confusion_matrix1 = gbm.confusion_matrix(train=True, valid=False, xval=False)
confusion_matrix2 = gbm.confusion_matrix(train=False, valid=True, xval=False)
confusion_matrix3 = gbm.confusion_matrix(train=False, valid=False, xval=True)
confusion_matrix = gbm.confusion_matrix(train=True, valid=True, xval=False)
confusion_matrix = gbm.confusion_matrix(train=True, valid=False, xval=True)
confusion_matrix = gbm.confusion_matrix(train=True, valid=True, xval=True)
confusion_matrix = gbm.confusion_matrix(train=False, valid=False, xval=False) # default: return training metrics
confusion_matrix = gbm.confusion_matrix(train=False, valid=True, xval=True)
# # plot
# plot1 = gbm.plot(train=True, valid=False, xval=False)
# plot2 = gbm.plot(train=False, valid=True, xval=False)
# plot3 = gbm.plot(train=False, valid=False, xval=True)
# plot = gbm.plot(train=True, valid=True, xval=False)
# plot = gbm.plot(train=True, valid=False, xval=True)
# plot = gbm.plot(train=True, valid=True, xval=True)
# plot = gbm.plot(train=False, valid=False, xval=False) # default: return training metrics
# plot = gbm.plot(train=False, valid=True, xval=True)
# # tpr
# tpr1 = gbm.tpr(train=True, valid=False, xval=False)
# tpr2 = gbm.tpr(train=False, valid=True, xval=False)
# tpr3 = gbm.tpr(train=False, valid=False, xval=True)
# tpr = gbm.tpr(train=True, valid=True, xval=False)
# tpr = gbm.tpr(train=True, valid=False, xval=True)
# tpr = gbm.tpr(train=True, valid=True, xval=True)
# tpr = gbm.tpr(train=False, valid=False, xval=False) # default: return training metrics
# tpr = gbm.tpr(train=False, valid=True, xval=True)
#
# # tnr
# tnr1 = gbm.tnr(train=True, valid=False, xval=False)
# tnr2 = gbm.tnr(train=False, valid=True, xval=False)
# tnr3 = gbm.tnr(train=False, valid=False, xval=True)
# tnr = gbm.tnr(train=True, valid=True, xval=False)
# tnr = gbm.tnr(train=True, valid=False, xval=True)
# tnr = gbm.tnr(train=True, valid=True, xval=True)
# tnr = gbm.tnr(train=False, valid=False, xval=False) # default: return training metrics
# tnr = gbm.tnr(train=False, valid=True, xval=True)
#
# # fnr
# fnr1 = gbm.fnr(train=True, valid=False, xval=False)
# fnr2 = gbm.fnr(train=False, valid=True, xval=False)
# fnr3 = gbm.fnr(train=False, valid=False, xval=True)
# fnr = gbm.fnr(train=True, valid=True, xval=False)
# fnr = gbm.fnr(train=True, valid=False, xval=True)
# fnr = gbm.fnr(train=True, valid=True, xval=True)
# fnr = gbm.fnr(train=False, valid=False, xval=False) # default: return training metrics
# fnr = gbm.fnr(train=False, valid=True, xval=True)
#
# # fpr
# fpr1 = gbm.fpr(train=True, valid=False, xval=False)
# fpr2 = gbm.fpr(train=False, valid=True, xval=False)
# fpr3 = gbm.fpr(train=False, valid=False, xval=True)
# fpr = gbm.fpr(train=True, valid=True, xval=False)
# fpr = gbm.fpr(train=True, valid=False, xval=True)
# fpr = gbm.fpr(train=True, valid=True, xval=True)
# fpr = gbm.fpr(train=False, valid=False, xval=False) # default: return training metrics
# fpr = gbm.fpr(train=False, valid=True, xval=True)
# multinomial
cars = h2o.import_file(path=h2o.locate("smalldata/junit/cars_20mpg.csv"))
cars["cylinders"] = cars["cylinders"].asfactor()
r = cars[0].runif()
train = cars[r > .2]
valid = cars[r <= .2]
response_col = "cylinders"
distribution = "multinomial"
predictors = ["displacement","power","weight","acceleration","year"]
gbm = h2o.gbm(y=train[response_col],
x=train[predictors],
validation_y=valid[response_col],
validation_x=valid[predictors],
nfolds=3,
distribution=distribution,
fold_assignment="Random")
# mse
mse1 = gbm.mse(train=True, valid=False, xval=False)
assert isinstance(mse1, float)
mse2 = gbm.mse(train=False, valid=True, xval=False)
assert isinstance(mse2, float)
mse3 = gbm.mse(train=False, valid=False, xval=True)
assert isinstance(mse3, float)
mse = gbm.mse(train=True, valid=True, xval=False)
assert "train" in mse.keys() and "valid" in mse.keys(), "expected training and validation metrics to be returned, but got {0}".format(mse.keys())
assert len(mse) == 2, "expected only training and validation metrics to be returned, but got {0}".format(mse.keys())
assert isinstance(mse["train"], float) and isinstance(mse["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(mse["train"]), type(mse["valid"]))
assert mse["valid"] == mse2
mse = gbm.mse(train=True, valid=False, xval=True)
assert "train" in mse.keys() and "xval" in mse.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert len(mse) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert isinstance(mse["train"], float) and isinstance(mse["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(mse["train"]), type(mse["xval"]))
assert mse["xval"] == mse3
mse = gbm.mse(train=True, valid=True, xval=True)
assert "train" in mse.keys() and "valid" in mse.keys() and "xval" in mse.keys(), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert len(mse) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert isinstance(mse["train"], float) and isinstance(mse["valid"], float) and isinstance(mse["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(mse["train"]), type(mse["valid"]), type(mse["xval"]))
mse = gbm.mse(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(mse, float)
assert mse == mse1
mse = gbm.mse(train=False, valid=True, xval=True)
assert "valid" in mse.keys() and "xval" in mse.keys(), "expected validation and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert len(mse) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert isinstance(mse["valid"], float) and isinstance(mse["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(mse["valid"]), type(mse["xval"]))
# logloss
logloss1 = gbm.logloss(train=True, valid=False, xval=False)
assert isinstance(logloss1, float)
logloss2 = gbm.logloss(train=False, valid=True, xval=False)
assert isinstance(logloss2, float)
logloss3 = gbm.logloss(train=False, valid=False, xval=True)
assert isinstance(logloss3, float)
logloss = gbm.logloss(train=True, valid=True, xval=False)
assert "train" in logloss.keys() and "valid" in logloss.keys(), "expected training and validation metrics to be returned, but got {0}".format(logloss.keys())
assert len(logloss) == 2, "expected only training and validation metrics to be returned, but got {0}".format(logloss.keys())
assert isinstance(logloss["train"], float) and isinstance(logloss["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(logloss["train"]), type(logloss["valid"]))
assert logloss["valid"] == logloss2
logloss = gbm.logloss(train=True, valid=False, xval=True)
assert "train" in logloss.keys() and "xval" in logloss.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert len(logloss) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert isinstance(logloss["train"], float) and isinstance(logloss["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(logloss["train"]), type(logloss["xval"]))
assert logloss["xval"] == logloss3
logloss = gbm.logloss(train=True, valid=True, xval=True)
assert "train" in logloss.keys() and "valid" in logloss.keys() and "xval" in logloss.keys(), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert len(logloss) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert isinstance(logloss["train"], float) and isinstance(logloss["valid"], float) and isinstance(logloss["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(logloss["train"]), type(logloss["valid"]), type(logloss["xval"]))
logloss = gbm.logloss(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(logloss, float)
assert logloss == logloss1
logloss = gbm.logloss(train=False, valid=True, xval=True)
assert "valid" in logloss.keys() and "xval" in logloss.keys(), "expected validation and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert len(logloss) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert isinstance(logloss["valid"], float) and isinstance(logloss["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(logloss["valid"]), type(logloss["xval"]))
# hit_ratio_table
hit_ratio_table1 = gbm.hit_ratio_table(train=True, valid=False, xval=False)
hit_ratio_table2 = gbm.hit_ratio_table(train=False, valid=True, xval=False)
hit_ratio_table3 = gbm.hit_ratio_table(train=False, valid=False, xval=True)
hit_ratio_table = gbm.hit_ratio_table(train=True, valid=True, xval=False)
hit_ratio_table = gbm.hit_ratio_table(train=True, valid=False, xval=True)
hit_ratio_table = gbm.hit_ratio_table(train=True, valid=True, xval=True)
hit_ratio_table = gbm.hit_ratio_table(train=False, valid=False, xval=False) # default: return training metrics
hit_ratio_table = gbm.hit_ratio_table(train=False, valid=True, xval=True)
# clustering
iris = h2o.import_file(path=h2o.locate("smalldata/iris/iris.csv"))
km = h2o.kmeans(x=iris[0:4],
nfolds=3,
k=3)
# betweenss
betweenss1 = km.betweenss(train=True, valid=False, xval=False)
assert isinstance(betweenss1, float)
betweenss3 = km.betweenss(train=False, valid=False, xval=True)
assert isinstance(betweenss3, float)
betweenss = km.betweenss(train=True, valid=False, xval=True)
assert "train" in betweenss.keys() and "xval" in betweenss.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(betweenss.keys())
assert len(betweenss) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(betweenss.keys())
assert isinstance(betweenss["train"], float) and isinstance(betweenss["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(betweenss["train"]), type(betweenss["xval"]))
assert betweenss["xval"] == betweenss3
betweenss = km.betweenss(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(betweenss, float)
assert betweenss == betweenss1
# totss
totss1 = km.totss(train=True, valid=False, xval=False)
assert isinstance(totss1, float)
totss3 = km.totss(train=False, valid=False, xval=True)
assert isinstance(totss3, float)
totss = km.totss(train=True, valid=False, xval=True)
assert "train" in totss.keys() and "xval" in totss.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(totss.keys())
assert len(totss) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(totss.keys())
assert isinstance(totss["train"], float) and isinstance(totss["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(totss["train"]), type(totss["xval"]))
assert totss["xval"] == totss3
totss = km.totss(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(totss, float)
assert totss == totss1
# tot_withinss
tot_withinss1 = km.tot_withinss(train=True, valid=False, xval=False)
assert isinstance(tot_withinss1, float)
tot_withinss3 = km.tot_withinss(train=False, valid=False, xval=True)
assert isinstance(tot_withinss3, float)
tot_withinss = km.tot_withinss(train=True, valid=False, xval=True)
assert "train" in tot_withinss.keys() and "xval" in tot_withinss.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(tot_withinss.keys())
assert len(tot_withinss) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(tot_withinss.keys())
assert isinstance(tot_withinss["train"], float) and isinstance(tot_withinss["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(tot_withinss["train"]), type(tot_withinss["xval"]))
assert tot_withinss["xval"] == tot_withinss3
tot_withinss = km.tot_withinss(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(tot_withinss, float)
assert tot_withinss == tot_withinss1
# withinss
withinss1 = km.withinss(train=True, valid=False, xval=False)
withinss3 = km.withinss(train=False, valid=False, xval=True)
withinss = km.withinss(train=True, valid=False, xval=True)
withinss = km.withinss(train=False, valid=False, xval=False) # default: return training metrics
# centroid_stats
centroid_stats1 = km.centroid_stats(train=True, valid=False, xval=False)
centroid_stats3 = km.centroid_stats(train=False, valid=False, xval=True)
centroid_stats = km.centroid_stats(train=True, valid=False, xval=True)
centroid_stats = km.centroid_stats(train=False, valid=False, xval=False) # default: return training metrics
# size
size1 = km.size(train=True, valid=False, xval=False)
size3 = km.size(train=False, valid=False, xval=True)
size = km.size(train=True, valid=False, xval=True)
size = km.size(train=False, valid=False, xval=False) # default: return training metrics
if __name__ == "__main__":
tests.run_test(sys.argv, metric_accessors)
|
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for CatchEnvironment."""
from concurrent import futures
from absl.testing import absltest
from dm_env import test_utils
import grpc
import numpy as np
import catch_environment
from dm_env_rpc.v1 import compliance
from dm_env_rpc.v1 import connection as dm_env_rpc_connection
from dm_env_rpc.v1 import dm_env_adaptor
from dm_env_rpc.v1 import dm_env_rpc_pb2
from dm_env_rpc.v1 import dm_env_rpc_pb2_grpc
from dm_env_rpc.v1 import error
from dm_env_rpc.v1 import tensor_utils
class ServerConnection:
def __init__(self):
self._server = grpc.server(
futures.ThreadPoolExecutor(max_workers=1))
servicer = catch_environment.CatchEnvironmentService()
dm_env_rpc_pb2_grpc.add_EnvironmentServicer_to_server(
servicer, self._server)
port = self._server.add_secure_port('[::]:0',
grpc.local_server_credentials())
self._server.start()
self._channel = grpc.secure_channel(f'[::]:{port}',
grpc.local_channel_credentials())
grpc.channel_ready_future(self._channel).result()
self.connection = dm_env_rpc_connection.Connection(self._channel)
def close(self):
self.connection.close()
self._channel.close()
self._server.stop(grace=None)
class JoinedServerConnection(ServerConnection):
def __init__(self):
super().__init__()
response = self.connection.send(dm_env_rpc_pb2.CreateWorldRequest())
self.world_name = response.world_name
response = self.connection.send(dm_env_rpc_pb2.JoinWorldRequest(
world_name=self.world_name))
self.specs = response.specs
def close(self):
try:
self.connection.send(dm_env_rpc_pb2.LeaveWorldRequest())
self.connection.send(
dm_env_rpc_pb2.DestroyWorldRequest(world_name=self.world_name))
finally:
super().close()
class CatchDmEnvRpcStepTest(compliance.Step):
@property
def connection(self):
return self._server_connection.connection
@property
def specs(self):
return self._server_connection.specs
def setUp(self):
super().setUp()
self._server_connection = JoinedServerConnection()
def tearDown(self):
self._server_connection.close()
super().tearDown()
class CatchDmEnvRpcCreateAndDestoryWorldTest(compliance.CreateDestroyWorld):
@property
def connection(self):
return self._server_connection.connection
@property
def required_world_settings(self):
"""A string to Tensor mapping of the minimum set of required settings."""
return {}
@property
def invalid_world_settings(self):
"""World creation settings which are invalid in some way."""
return {'invalid_setting': tensor_utils.pack_tensor(123)}
@property
def has_multiple_world_support(self):
"""Does the server support creating more than one world?"""
return False
def setUp(self):
self._server_connection = ServerConnection()
super().setUp()
def tearDown(self):
super().tearDown()
self._server_connection.close()
class CatchDmEnvRpcJoinAndLeaveWorldTest(compliance.JoinLeaveWorld):
@property
def connection(self):
return self._server_connection.connection
@property
def world_name(self):
return self._world_name
@property
def invalid_join_settings(self):
return {'invalid_setting': tensor_utils.pack_tensor(123)}
def setUp(self):
self._server_connection = ServerConnection()
response = self.connection.send(dm_env_rpc_pb2.CreateWorldRequest())
self._world_name = response.world_name
super().setUp()
def tearDown(self):
super().tearDown()
try:
self.connection.send(
dm_env_rpc_pb2.DestroyWorldRequest(world_name=self.world_name))
finally:
self._server_connection.close()
class CatchDmEnvRpcResetTest(compliance.Reset):
@property
def connection(self):
return self._server_connection.connection
def join_world(self):
"""Joins a world, returning the specs."""
response = self.connection.send(dm_env_rpc_pb2.JoinWorldRequest(
world_name=self.world_name))
return response.specs
@property
def world_name(self):
return self._world_name
def setUp(self):
self._server_connection = ServerConnection()
response = self.connection.send(dm_env_rpc_pb2.CreateWorldRequest())
self._world_name = response.world_name
super().setUp()
def tearDown(self):
super().tearDown()
try:
self.connection.send(dm_env_rpc_pb2.LeaveWorldRequest())
self.connection.send(
dm_env_rpc_pb2.DestroyWorldRequest(world_name=self.world_name))
finally:
self._server_connection.close()
class CatchDmEnvTest(test_utils.EnvironmentTestMixin, absltest.TestCase):
def setUp(self):
self._server_connection = JoinedServerConnection()
self._connection = self._server_connection.connection
self.world_name = self._server_connection.world_name
self._dm_env = dm_env_adaptor.DmEnvAdaptor(
self._connection, self._server_connection.specs)
super().setUp()
def tearDown(self):
super().tearDown()
self._server_connection.close()
def make_object_under_test(self):
return self._dm_env
class CatchTestSettings(absltest.TestCase):
def setUp(self):
super().setUp()
self._server_connection = ServerConnection()
self._connection = self._server_connection.connection
self._world_name = None
def tearDown(self):
try:
if self._world_name:
self._connection.send(dm_env_rpc_pb2.LeaveWorldRequest())
self._connection.send(
dm_env_rpc_pb2.DestroyWorldRequest(world_name=self._world_name))
finally:
self._server_connection.close()
super().tearDown()
def test_reset_world_seed_setting(self):
self._world_name = self._connection.send(
dm_env_rpc_pb2.CreateWorldRequest(
settings={'seed': tensor_utils.pack_tensor(1234)})).world_name
self._connection.send(
dm_env_rpc_pb2.JoinWorldRequest(world_name=self._world_name))
step_response = self._connection.send(dm_env_rpc_pb2.StepRequest())
self._connection.send(
dm_env_rpc_pb2.ResetWorldRequest(
world_name=self._world_name,
settings={'seed': tensor_utils.pack_tensor(1234)}))
self.assertEqual(step_response,
self._connection.send(dm_env_rpc_pb2.StepRequest()))
def test_reset_seed_setting(self):
self._world_name = self._connection.send(
dm_env_rpc_pb2.CreateWorldRequest(
settings={'seed': tensor_utils.pack_tensor(1234)})).world_name
self._connection.send(
dm_env_rpc_pb2.JoinWorldRequest(world_name=self._world_name))
step_response = self._connection.send(dm_env_rpc_pb2.StepRequest())
self._connection.send(
dm_env_rpc_pb2.ResetRequest(
settings={'seed': tensor_utils.pack_tensor(1234)}))
self.assertEqual(step_response,
self._connection.send(dm_env_rpc_pb2.StepRequest()))
class CatchTest(absltest.TestCase):
def setUp(self):
super().setUp()
self._server_connection = ServerConnection()
self._connection = self._server_connection.connection
response = self._connection.send(dm_env_rpc_pb2.CreateWorldRequest())
self._world_name = response.world_name
def tearDown(self):
try:
self._connection.send(dm_env_rpc_pb2.LeaveWorldRequest())
self._connection.send(
dm_env_rpc_pb2.DestroyWorldRequest(world_name=self._world_name))
finally:
self._server_connection.close()
super().tearDown()
def test_can_reset_world_when_joined(self):
self._connection.send(
dm_env_rpc_pb2.JoinWorldRequest(world_name=self._world_name))
self._connection.send(dm_env_rpc_pb2.ResetWorldRequest())
def test_cannot_reset_world_when_not_joined(self):
with self.assertRaises(error.DmEnvRpcError):
self._connection.send(dm_env_rpc_pb2.ResetWorldRequest())
def test_cannot_step_when_not_joined(self):
with self.assertRaises(error.DmEnvRpcError):
self._connection.send(dm_env_rpc_pb2.StepRequest())
def test_cannot_reset_when_not_joined(self):
with self.assertRaises(error.DmEnvRpcError):
self._connection.send(dm_env_rpc_pb2.ResetRequest())
def test_cannot_join_world_with_wrong_name(self):
with self.assertRaises(error.DmEnvRpcError):
self._connection.send(
dm_env_rpc_pb2.JoinWorldRequest(world_name='wrong_name'))
def test_cannot_create_world_when_world_exists(self):
with self.assertRaises(error.DmEnvRpcError):
self._connection.send(dm_env_rpc_pb2.CreateWorldRequest())
def test_cannot_join_when_no_world_exists(self):
self._connection.send(
dm_env_rpc_pb2.DestroyWorldRequest(world_name=self._world_name))
with self.assertRaises(error.DmEnvRpcError):
self._connection.send(
dm_env_rpc_pb2.JoinWorldRequest(world_name=self._world_name))
self._connection.send(dm_env_rpc_pb2.CreateWorldRequest())
def test_cannot_destroy_world_when_still_joined(self):
self._connection.send(
dm_env_rpc_pb2.JoinWorldRequest(world_name=self._world_name))
with self.assertRaises(error.DmEnvRpcError):
self._connection.send(
dm_env_rpc_pb2.DestroyWorldRequest(world_name=self._world_name))
def test_cannot_destroy_world_with_wrong_name(self):
with self.assertRaises(error.DmEnvRpcError):
self._connection.send(
dm_env_rpc_pb2.DestroyWorldRequest(world_name='wrong_name'))
class CatchGameTest(absltest.TestCase):
def setUp(self):
super(CatchGameTest, self).setUp()
self._rows = 3
self._cols = 3
self._game = catch_environment.CatchGame(self._rows, self._cols, 1)
def test_draw_board_correct_initial_state(self):
board = self._game.draw_board()
self.assertEqual(board.shape, (3, 3))
def test_draw_board_ball_in_top_row(self):
board = self._game.draw_board()
self.assertIn(1, board[0])
def test_draw_board_bat_in_center_bottom_row(self):
board = self._game.draw_board()
self.assertTrue(np.array_equal([0, 1, 0], board[2]))
def test_update_drops_ball(self):
self._game.update(action=0)
board = self._game.draw_board()
self.assertNotIn(1, board[0])
self.assertIn(1, board[1])
def test_has_terminated_when_ball_hits_bottom(self):
self.assertFalse(self._game.has_terminated())
self._game.update(action=0)
self.assertFalse(self._game.has_terminated())
self._game.update(action=0)
self.assertTrue(self._game.has_terminated())
def test_update_moves_paddle(self):
self._game.update(action=1)
board = self._game.draw_board()
self.assertTrue(np.array_equal([0, 0, 1], board[2]))
def test_cannot_update_game_when_has_terminated(self):
self._game.update(action=0)
self._game.update(action=0)
with self.assertRaises(RuntimeError):
self._game.update(action=0)
def test_no_reward_when_not_terminated(self):
self.assertEqual(0, self._game.reward())
self._game.update(action=0)
self.assertEqual(0, self._game.reward())
self._game.update(action=0)
def test_has_reward_when_terminated(self):
self._game.update(action=0)
self._game.update(action=0)
self.assertNotEqual(0, self._game.reward())
if __name__ == '__main__':
absltest.main()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Commonly used utils in pandas-on-Spark.
"""
import functools
from collections import OrderedDict
from contextlib import contextmanager
import os
from typing import (
Any,
Callable,
Dict,
Iterator,
List,
Optional,
Tuple,
Union,
TYPE_CHECKING,
cast,
no_type_check,
overload,
)
import warnings
from pyspark.sql import functions as F, Column, DataFrame as SparkDataFrame, SparkSession
from pyspark.sql.types import DoubleType
import pandas as pd
from pandas.api.types import is_list_like
# For running doctests and reference resolution in PyCharm.
from pyspark import pandas as ps # noqa: F401
from pyspark.pandas._typing import Axis, Label, Name, DataFrameOrSeries
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef.typehints import as_spark_type
if TYPE_CHECKING:
from pyspark.pandas.base import IndexOpsMixin
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.internal import InternalFrame
from pyspark.pandas.series import Series
ERROR_MESSAGE_CANNOT_COMBINE = (
"Cannot combine the series or dataframe because it comes from a different dataframe. "
"In order to allow this operation, enable 'compute.ops_on_diff_frames' option."
)
SPARK_CONF_ARROW_ENABLED = "spark.sql.execution.arrow.pyspark.enabled"
class PandasAPIOnSparkAdviceWarning(Warning):
pass
def same_anchor(
this: Union["DataFrame", "IndexOpsMixin", "InternalFrame"],
that: Union["DataFrame", "IndexOpsMixin", "InternalFrame"],
) -> bool:
"""
Check if the anchors of the given DataFrame or Series are the same or not.
"""
from pyspark.pandas.base import IndexOpsMixin
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.internal import InternalFrame
if isinstance(this, InternalFrame):
this_internal = this
else:
assert isinstance(this, (DataFrame, IndexOpsMixin)), type(this)
this_internal = this._internal
if isinstance(that, InternalFrame):
that_internal = that
else:
assert isinstance(that, (DataFrame, IndexOpsMixin)), type(that)
that_internal = that._internal
return (
this_internal.spark_frame is that_internal.spark_frame
and this_internal.index_level == that_internal.index_level
and all(
spark_column_equals(this_scol, that_scol)
for this_scol, that_scol in zip(
this_internal.index_spark_columns, that_internal.index_spark_columns
)
)
)
def combine_frames(
this: "DataFrame",
*args: DataFrameOrSeries,
how: str = "full",
preserve_order_column: bool = False,
) -> "DataFrame":
"""
This method combines `this` DataFrame with a different `that` DataFrame or
Series from a different DataFrame.
It returns a DataFrame that has prefix `this_` and `that_` to distinct
the columns names from both DataFrames
It internally performs a join operation which can be expensive in general.
So, if `compute.ops_on_diff_frames` option is False,
this method throws an exception.
"""
from pyspark.pandas.config import get_option
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.internal import (
InternalField,
InternalFrame,
HIDDEN_COLUMNS,
NATURAL_ORDER_COLUMN_NAME,
SPARK_INDEX_NAME_FORMAT,
)
from pyspark.pandas.series import Series
if all(isinstance(arg, Series) for arg in args):
assert all(
same_anchor(arg, args[0]) for arg in args
), "Currently only one different DataFrame (from given Series) is supported"
assert not same_anchor(this, args[0]), "We don't need to combine. All series is in this."
that = args[0]._psdf[list(args)]
elif len(args) == 1 and isinstance(args[0], DataFrame):
assert isinstance(args[0], DataFrame)
assert not same_anchor(
this, args[0]
), "We don't need to combine. `this` and `that` are same."
that = args[0]
else:
raise AssertionError("args should be single DataFrame or " "single/multiple Series")
if get_option("compute.ops_on_diff_frames"):
def resolve(internal: InternalFrame, side: str) -> InternalFrame:
rename = lambda col: "__{}_{}".format(side, col)
internal = internal.resolved_copy
sdf = internal.spark_frame
sdf = internal.spark_frame.select(
*[
scol_for(sdf, col).alias(rename(col))
for col in sdf.columns
if col not in HIDDEN_COLUMNS
],
*HIDDEN_COLUMNS,
)
return internal.copy(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, rename(col)) for col in internal.index_spark_column_names
],
index_fields=[
field.copy(name=rename(field.name)) for field in internal.index_fields
],
data_spark_columns=[
scol_for(sdf, rename(col)) for col in internal.data_spark_column_names
],
data_fields=[field.copy(name=rename(field.name)) for field in internal.data_fields],
)
this_internal = resolve(this._internal, "this")
that_internal = resolve(that._internal, "that")
this_index_map = list(
zip(
this_internal.index_spark_column_names,
this_internal.index_names,
this_internal.index_fields,
)
)
that_index_map = list(
zip(
that_internal.index_spark_column_names,
that_internal.index_names,
that_internal.index_fields,
)
)
assert len(this_index_map) == len(that_index_map)
join_scols = []
merged_index_scols = []
# Note that the order of each element in index_map is guaranteed according to the index
# level.
this_and_that_index_map = list(zip(this_index_map, that_index_map))
this_sdf = this_internal.spark_frame.alias("this")
that_sdf = that_internal.spark_frame.alias("that")
# If the same named index is found, that's used.
index_column_names = []
index_use_extension_dtypes = []
for (
i,
((this_column, this_name, this_field), (that_column, that_name, that_field)),
) in enumerate(this_and_that_index_map):
if this_name == that_name:
# We should merge the Spark columns into one
# to mimic pandas' behavior.
this_scol = scol_for(this_sdf, this_column)
that_scol = scol_for(that_sdf, that_column)
join_scol = this_scol == that_scol
join_scols.append(join_scol)
column_name = SPARK_INDEX_NAME_FORMAT(i)
index_column_names.append(column_name)
index_use_extension_dtypes.append(
any(field.is_extension_dtype for field in [this_field, that_field])
)
merged_index_scols.append(
F.when(this_scol.isNotNull(), this_scol).otherwise(that_scol).alias(column_name)
)
else:
raise ValueError("Index names must be exactly matched currently.")
assert len(join_scols) > 0, "cannot join with no overlapping index names"
joined_df = this_sdf.join(that_sdf, on=join_scols, how=how)
if preserve_order_column:
order_column = [scol_for(this_sdf, NATURAL_ORDER_COLUMN_NAME)]
else:
order_column = []
joined_df = joined_df.select(
*merged_index_scols,
*(
scol_for(this_sdf, this_internal.spark_column_name_for(label))
for label in this_internal.column_labels
),
*(
scol_for(that_sdf, that_internal.spark_column_name_for(label))
for label in that_internal.column_labels
),
*order_column,
)
index_spark_columns = [scol_for(joined_df, col) for col in index_column_names]
index_columns = set(index_column_names)
new_data_columns = [
col
for col in joined_df.columns
if col not in index_columns and col != NATURAL_ORDER_COLUMN_NAME
]
schema = joined_df.select(*index_spark_columns, *new_data_columns).schema
index_fields = [
InternalField.from_struct_field(struct_field, use_extension_dtypes=use_extension_dtypes)
for struct_field, use_extension_dtypes in zip(
schema.fields[: len(index_spark_columns)], index_use_extension_dtypes
)
]
data_fields = [
InternalField.from_struct_field(
struct_field, use_extension_dtypes=field.is_extension_dtype
)
for struct_field, field in zip(
schema.fields[len(index_spark_columns) :],
this_internal.data_fields + that_internal.data_fields,
)
]
level = max(this_internal.column_labels_level, that_internal.column_labels_level)
def fill_label(label: Optional[Label]) -> List:
if label is None:
return ([""] * (level - 1)) + [None]
else:
return ([""] * (level - len(label))) + list(label)
column_labels = [
tuple(["this"] + fill_label(label)) for label in this_internal.column_labels
] + [tuple(["that"] + fill_label(label)) for label in that_internal.column_labels]
column_label_names = (
cast(List[Optional[Label]], [None]) * (1 + level - this_internal.column_labels_level)
) + this_internal.column_label_names
return DataFrame(
InternalFrame(
spark_frame=joined_df,
index_spark_columns=index_spark_columns,
index_names=this_internal.index_names,
index_fields=index_fields,
column_labels=column_labels,
data_spark_columns=[scol_for(joined_df, col) for col in new_data_columns],
data_fields=data_fields,
column_label_names=column_label_names,
)
)
else:
raise ValueError(ERROR_MESSAGE_CANNOT_COMBINE)
def align_diff_frames(
resolve_func: Callable[
["DataFrame", List[Label], List[Label]], Iterator[Tuple["Series", Label]]
],
this: "DataFrame",
that: "DataFrame",
fillna: bool = True,
how: str = "full",
preserve_order_column: bool = False,
) -> "DataFrame":
"""
This method aligns two different DataFrames with a given `func`. Columns are resolved and
handled within the given `func`.
To use this, `compute.ops_on_diff_frames` should be True, for now.
:param resolve_func: Takes aligned (joined) DataFrame, the column of the current DataFrame, and
the column of another DataFrame. It returns an iterable that produces Series.
>>> from pyspark.pandas.config import set_option, reset_option
>>>
>>> set_option("compute.ops_on_diff_frames", True)
>>>
>>> psdf1 = ps.DataFrame({'a': [9, 8, 7, 6, 5, 4, 3, 2, 1]})
>>> psdf2 = ps.DataFrame({'a': [9, 8, 7, 6, 5, 4, 3, 2, 1]})
>>>
>>> def func(psdf, this_column_labels, that_column_labels):
... psdf # conceptually this is A + B.
...
... # Within this function, Series from A or B can be performed against `psdf`.
... this_label = this_column_labels[0] # this is ('a',) from psdf1.
... that_label = that_column_labels[0] # this is ('a',) from psdf2.
... new_series = (psdf[this_label] - psdf[that_label]).rename(str(this_label))
...
... # This new series will be placed in new DataFrame.
... yield (new_series, this_label)
>>>
>>>
>>> align_diff_frames(func, psdf1, psdf2).sort_index()
a
0 0
1 0
2 0
3 0
4 0
5 0
6 0
7 0
8 0
>>> reset_option("compute.ops_on_diff_frames")
:param this: a DataFrame to align
:param that: another DataFrame to align
:param fillna: If True, it fills missing values in non-common columns in both `this` and `that`.
Otherwise, it returns as are.
:param how: join way. In addition, it affects how `resolve_func` resolves the column conflict.
- full: `resolve_func` should resolve only common columns from 'this' and 'that' DataFrames.
For instance, if 'this' has columns A, B, C and that has B, C, D, `this_columns` and
'that_columns' in this function are B, C and B, C.
- left: `resolve_func` should resolve columns including that columns.
For instance, if 'this' has columns A, B, C and that has B, C, D, `this_columns` is
B, C but `that_columns` are B, C, D.
- inner: Same as 'full' mode; however, internally performs inner join instead.
:return: Aligned DataFrame
"""
from pyspark.pandas.frame import DataFrame
assert how == "full" or how == "left" or how == "inner"
this_column_labels = this._internal.column_labels
that_column_labels = that._internal.column_labels
common_column_labels = set(this_column_labels).intersection(that_column_labels)
# 1. Perform the join given two dataframes.
combined = combine_frames(this, that, how=how, preserve_order_column=preserve_order_column)
# 2. Apply the given function to transform the columns in a batch and keep the new columns.
combined_column_labels = combined._internal.column_labels
that_columns_to_apply: List[Label] = []
this_columns_to_apply: List[Label] = []
additional_that_columns: List[Label] = []
columns_to_keep: List[Union[Series, Column]] = []
column_labels_to_keep: List[Label] = []
for combined_label in combined_column_labels:
for common_label in common_column_labels:
if combined_label == tuple(["this", *common_label]):
this_columns_to_apply.append(combined_label)
break
elif combined_label == tuple(["that", *common_label]):
that_columns_to_apply.append(combined_label)
break
else:
if how == "left" and combined_label in [
tuple(["that", *label]) for label in that_column_labels
]:
# In this case, we will drop `that_columns` in `columns_to_keep` but passes
# it later to `func`. `func` should resolve it.
# Note that adding this into a separate list (`additional_that_columns`)
# is intentional so that `this_columns` and `that_columns` can be paired.
additional_that_columns.append(combined_label)
elif fillna:
columns_to_keep.append(SF.lit(None).cast(DoubleType()).alias(str(combined_label)))
column_labels_to_keep.append(combined_label)
else:
columns_to_keep.append(combined._psser_for(combined_label))
column_labels_to_keep.append(combined_label)
that_columns_to_apply += additional_that_columns
# Should extract columns to apply and do it in a batch in case
# it adds new columns for example.
columns_applied: List[Union[Series, Column]]
column_labels_applied: List[Label]
if len(this_columns_to_apply) > 0 or len(that_columns_to_apply) > 0:
psser_set, column_labels_set = zip(
*resolve_func(combined, this_columns_to_apply, that_columns_to_apply)
)
columns_applied = list(psser_set)
column_labels_applied = list(column_labels_set)
else:
columns_applied = []
column_labels_applied = []
applied: DataFrame = DataFrame(
combined._internal.with_new_columns(
columns_applied + columns_to_keep,
column_labels=column_labels_applied + column_labels_to_keep,
)
)
# 3. Restore the names back and deduplicate columns.
this_labels = OrderedDict()
# Add columns in an order of its original frame.
for this_label in this_column_labels:
for new_label in applied._internal.column_labels:
if new_label[1:] not in this_labels and this_label == new_label[1:]:
this_labels[new_label[1:]] = new_label
# After that, we will add the rest columns.
other_labels = OrderedDict()
for new_label in applied._internal.column_labels:
if new_label[1:] not in this_labels:
other_labels[new_label[1:]] = new_label
psdf = applied[list(this_labels.values()) + list(other_labels.values())]
psdf.columns = psdf.columns.droplevel()
return psdf
def is_testing() -> bool:
"""Indicates whether Spark is currently running tests."""
return "SPARK_TESTING" in os.environ
def default_session(conf: Optional[Dict[str, Any]] = None) -> SparkSession:
if conf is None:
conf = dict()
builder = SparkSession.builder.appName("pandas-on-Spark")
for key, value in conf.items():
builder = builder.config(key, value)
# Currently, pandas-on-Spark is dependent on such join due to 'compute.ops_on_diff_frames'
# configuration. This is needed with Spark 3.0+.
builder.config("spark.sql.analyzer.failAmbiguousSelfJoin", False)
if is_testing():
builder.config("spark.executor.allowSparkContext", False)
return builder.getOrCreate()
@contextmanager
def sql_conf(pairs: Dict[str, Any], *, spark: Optional[SparkSession] = None) -> Iterator[None]:
"""
A convenient context manager to set `value` to the Spark SQL configuration `key` and
then restores it back when it exits.
"""
assert isinstance(pairs, dict), "pairs should be a dictionary."
if spark is None:
spark = default_session()
keys = pairs.keys()
new_values = pairs.values()
old_values = [spark.conf.get(key, None) for key in keys]
for key, new_value in zip(keys, new_values):
spark.conf.set(key, new_value)
try:
yield
finally:
for key, old_value in zip(keys, old_values):
if old_value is None:
spark.conf.unset(key)
else:
spark.conf.set(key, old_value)
def validate_arguments_and_invoke_function(
pobj: Union[pd.DataFrame, pd.Series],
pandas_on_spark_func: Callable,
pandas_func: Callable,
input_args: Dict,
) -> Any:
"""
Invokes a pandas function.
This is created because different versions of pandas support different parameters, and as a
result when we code against the latest version, our users might get a confusing
"got an unexpected keyword argument" error if they are using an older version of pandas.
This function validates all the arguments, removes the ones that are not supported if they
are simply the default value (i.e. most likely the user didn't explicitly specify it). It
throws a TypeError if the user explicitly specify an argument that is not supported by the
pandas version available.
For example usage, look at DataFrame.to_html().
:param pobj: the pandas DataFrame or Series to operate on
:param pandas_on_spark_func: pandas-on-Spark function, used to get default parameter values
:param pandas_func: pandas function, used to check whether pandas supports all the arguments
:param input_args: arguments to pass to the pandas function, often created by using locals().
Make sure locals() call is at the top of the function so it captures only
input parameters, rather than local variables.
:return: whatever pandas_func returns
"""
import inspect
# Makes a copy since whatever passed in is likely created by locals(), and we can't delete
# 'self' key from that.
args = input_args.copy()
del args["self"]
if "kwargs" in args:
# explode kwargs
kwargs = args["kwargs"]
del args["kwargs"]
args = {**args, **kwargs}
pandas_on_spark_params = inspect.signature(pandas_on_spark_func).parameters
pandas_params = inspect.signature(pandas_func).parameters
for param in pandas_on_spark_params.values():
if param.name not in pandas_params:
if args[param.name] == param.default:
del args[param.name]
else:
raise TypeError(
(
"The pandas version [%s] available does not support parameter '%s' "
+ "for function '%s'."
)
% (pd.__version__, param.name, pandas_func.__name__)
)
args["self"] = pobj
return pandas_func(**args)
@no_type_check
def lazy_property(fn: Callable[[Any], Any]) -> property:
"""
Decorator that makes a property lazy-evaluated.
Copied from https://stevenloria.com/lazy-properties/
"""
attr_name = "_lazy_" + fn.__name__
@property
@functools.wraps(fn)
def wrapped_lazy_property(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fn(self))
return getattr(self, attr_name)
def deleter(self):
if hasattr(self, attr_name):
delattr(self, attr_name)
return wrapped_lazy_property.deleter(deleter)
def scol_for(sdf: SparkDataFrame, column_name: str) -> Column:
"""Return Spark Column for the given column name."""
return sdf["`{}`".format(column_name)]
def column_labels_level(column_labels: List[Label]) -> int:
"""Return the level of the column index."""
if len(column_labels) == 0:
return 1
else:
levels = set(1 if label is None else len(label) for label in column_labels)
assert len(levels) == 1, levels
return list(levels)[0]
def name_like_string(name: Optional[Name]) -> str:
"""
Return the name-like strings from str or tuple of str
Examples
--------
>>> name = 'abc'
>>> name_like_string(name)
'abc'
>>> name = ('abc',)
>>> name_like_string(name)
'abc'
>>> name = ('a', 'b', 'c')
>>> name_like_string(name)
'(a, b, c)'
"""
label: Label
if name is None:
label = ("__none__",)
elif is_list_like(name):
label = tuple([str(n) for n in name])
else:
label = (str(name),)
return ("(%s)" % ", ".join(label)) if len(label) > 1 else label[0]
def is_name_like_tuple(value: Any, allow_none: bool = True, check_type: bool = False) -> bool:
"""
Check the given tuple is be able to be used as a name.
Examples
--------
>>> is_name_like_tuple(('abc',))
True
>>> is_name_like_tuple((1,))
True
>>> is_name_like_tuple(('abc', 1, None))
True
>>> is_name_like_tuple(('abc', 1, None), check_type=True)
True
>>> is_name_like_tuple((1.0j,))
True
>>> is_name_like_tuple(tuple())
False
>>> is_name_like_tuple((list('abc'),))
False
>>> is_name_like_tuple(('abc', 1, None), allow_none=False)
False
>>> is_name_like_tuple((1.0j,), check_type=True)
False
"""
if value is None:
return allow_none
elif not isinstance(value, tuple):
return False
elif len(value) == 0:
return False
elif not allow_none and any(v is None for v in value):
return False
elif any(is_list_like(v) or isinstance(v, slice) for v in value):
return False
elif check_type:
return all(
v is None or as_spark_type(type(v), raise_error=False) is not None for v in value
)
else:
return True
def is_name_like_value(
value: Any, allow_none: bool = True, allow_tuple: bool = True, check_type: bool = False
) -> bool:
"""
Check the given value is like a name.
Examples
--------
>>> is_name_like_value('abc')
True
>>> is_name_like_value(1)
True
>>> is_name_like_value(None)
True
>>> is_name_like_value(('abc',))
True
>>> is_name_like_value(1.0j)
True
>>> is_name_like_value(list('abc'))
False
>>> is_name_like_value(None, allow_none=False)
False
>>> is_name_like_value(('abc',), allow_tuple=False)
False
>>> is_name_like_value(1.0j, check_type=True)
False
"""
if value is None:
return allow_none
elif isinstance(value, tuple):
return allow_tuple and is_name_like_tuple(
value, allow_none=allow_none, check_type=check_type
)
elif is_list_like(value) or isinstance(value, slice):
return False
elif check_type:
return as_spark_type(type(value), raise_error=False) is not None
else:
return True
def validate_axis(axis: Optional[Axis] = 0, none_axis: int = 0) -> int:
"""Check the given axis is valid."""
# convert to numeric axis
axis = cast(Dict[Optional[Axis], int], {None: none_axis, "index": 0, "columns": 1}).get(
axis, axis
)
if axis in (none_axis, 0, 1):
return cast(int, axis)
else:
raise ValueError("No axis named {0}".format(axis))
def validate_bool_kwarg(value: Any, arg_name: str) -> Optional[bool]:
"""Ensures that argument passed in arg_name is of type bool."""
if not (isinstance(value, bool) or value is None):
raise TypeError(
'For argument "{}" expected type bool, received '
"type {}.".format(arg_name, type(value).__name__)
)
return value
def validate_how(how: str) -> str:
"""Check the given how for join is valid."""
if how == "full":
warnings.warn(
"Warning: While pandas-on-Spark will accept 'full', you should use 'outer' "
+ "instead to be compatible with the pandas merge API",
UserWarning,
)
if how == "outer":
# 'outer' in pandas equals 'full' in Spark
how = "full"
if how not in ("inner", "left", "right", "full"):
raise ValueError(
"The 'how' parameter has to be amongst the following values: ",
"['inner', 'left', 'right', 'outer']",
)
return how
def validate_mode(mode: str) -> str:
"""Check the given mode for writing is valid."""
if mode in ("w", "w+"):
# 'w' in pandas equals 'overwrite' in Spark
# '+' is meaningless for writing methods, but pandas just pass it as 'w'.
mode = "overwrite"
if mode in ("a", "a+"):
# 'a' in pandas equals 'append' in Spark
# '+' is meaningless for writing methods, but pandas just pass it as 'a'.
mode = "append"
if mode not in (
"w",
"a",
"w+",
"a+",
"overwrite",
"append",
"ignore",
"error",
"errorifexists",
):
raise ValueError(
"The 'mode' parameter has to be amongst the following values: ",
"['w', 'a', 'w+', 'a+', 'overwrite', 'append', 'ignore', 'error', 'errorifexists']",
)
return mode
@overload
def verify_temp_column_name(df: SparkDataFrame, column_name_or_label: str) -> str:
...
@overload
def verify_temp_column_name(df: "DataFrame", column_name_or_label: Name) -> Label:
...
def verify_temp_column_name(
df: Union["DataFrame", SparkDataFrame], column_name_or_label: Union[str, Name]
) -> Union[str, Label]:
"""
Verify that the given column name does not exist in the given pandas-on-Spark or
Spark DataFrame.
The temporary column names should start and end with `__`. In addition, `column_name_or_label`
expects a single string, or column labels when `df` is a pandas-on-Spark DataFrame.
>>> psdf = ps.DataFrame({("x", "a"): ['a', 'b', 'c']})
>>> psdf["__dummy__"] = 0
>>> psdf[("", "__dummy__")] = 1
>>> psdf # doctest: +NORMALIZE_WHITESPACE
x __dummy__
a __dummy__
0 a 0 1
1 b 0 1
2 c 0 1
>>> verify_temp_column_name(psdf, '__tmp__')
('__tmp__', '')
>>> verify_temp_column_name(psdf, ('', '__tmp__'))
('', '__tmp__')
>>> verify_temp_column_name(psdf, '__dummy__')
Traceback (most recent call last):
...
AssertionError: ... `(__dummy__, )` ...
>>> verify_temp_column_name(psdf, ('', '__dummy__'))
Traceback (most recent call last):
...
AssertionError: ... `(, __dummy__)` ...
>>> verify_temp_column_name(psdf, 'dummy')
Traceback (most recent call last):
...
AssertionError: ... should be empty or start and end with `__`: ('dummy', '')
>>> verify_temp_column_name(psdf, ('', 'dummy'))
Traceback (most recent call last):
...
AssertionError: ... should be empty or start and end with `__`: ('', 'dummy')
>>> internal = psdf._internal.resolved_copy
>>> sdf = internal.spark_frame
>>> sdf.select(internal.data_spark_columns).show() # doctest: +NORMALIZE_WHITESPACE
+------+---------+-------------+
|(x, a)|__dummy__|(, __dummy__)|
+------+---------+-------------+
| a| 0| 1|
| b| 0| 1|
| c| 0| 1|
+------+---------+-------------+
>>> verify_temp_column_name(sdf, '__tmp__')
'__tmp__'
>>> verify_temp_column_name(sdf, '__dummy__')
Traceback (most recent call last):
...
AssertionError: ... `__dummy__` ... '(x, a)', '__dummy__', '(, __dummy__)', ...
>>> verify_temp_column_name(sdf, ('', '__dummy__'))
Traceback (most recent call last):
...
AssertionError: <class 'tuple'>
>>> verify_temp_column_name(sdf, 'dummy')
Traceback (most recent call last):
...
AssertionError: ... should start and end with `__`: dummy
"""
from pyspark.pandas.frame import DataFrame
if isinstance(df, DataFrame):
if isinstance(column_name_or_label, str):
column_name = column_name_or_label
level = df._internal.column_labels_level
column_name_or_label = tuple([column_name_or_label] + ([""] * (level - 1)))
else:
column_name = name_like_string(column_name_or_label)
assert any(len(label) > 0 for label in column_name_or_label) and all(
label == "" or (label.startswith("__") and label.endswith("__"))
for label in column_name_or_label
), "The temporary column name should be empty or start and end with `__`: {}".format(
column_name_or_label
)
assert all(
column_name_or_label != label for label in df._internal.column_labels
), "The given column name `{}` already exists in the pandas-on-Spark DataFrame: {}".format(
name_like_string(column_name_or_label), df.columns
)
df = df._internal.resolved_copy.spark_frame
else:
assert isinstance(column_name_or_label, str), type(column_name_or_label)
assert column_name_or_label.startswith("__") and column_name_or_label.endswith(
"__"
), "The temporary column name should start and end with `__`: {}".format(
column_name_or_label
)
column_name = column_name_or_label
assert isinstance(df, SparkDataFrame), type(df)
assert (
column_name not in df.columns
), "The given column name `{}` already exists in the Spark DataFrame: {}".format(
column_name, df.columns
)
return column_name_or_label
def spark_column_equals(left: Column, right: Column) -> bool:
"""
Check both `left` and `right` have the same expressions.
>>> spark_column_equals(SF.lit(0), SF.lit(0))
True
>>> spark_column_equals(SF.lit(0) + 1, SF.lit(0) + 1)
True
>>> spark_column_equals(SF.lit(0) + 1, SF.lit(0) + 2)
False
>>> sdf1 = ps.DataFrame({"x": ['a', 'b', 'c']}).to_spark()
>>> spark_column_equals(sdf1["x"] + 1, sdf1["x"] + 1)
True
>>> sdf2 = ps.DataFrame({"x": ['a', 'b', 'c']}).to_spark()
>>> spark_column_equals(sdf1["x"] + 1, sdf2["x"] + 1)
False
"""
return left._jc.equals(right._jc) # type: ignore[operator]
def compare_null_first(
left: Column,
right: Column,
comp: Callable[[Column, Column], Column],
) -> Column:
return (left.isNotNull() & right.isNotNull() & comp(left, right)) | (
left.isNull() & right.isNotNull()
)
def compare_null_last(
left: Column,
right: Column,
comp: Callable[[Column, Column], Column],
) -> Column:
return (left.isNotNull() & right.isNotNull() & comp(left, right)) | (
left.isNotNull() & right.isNull()
)
def compare_disallow_null(
left: Column,
right: Column,
comp: Callable[[Column, Column], Column],
) -> Column:
return left.isNotNull() & right.isNotNull() & comp(left, right)
def compare_allow_null(
left: Column,
right: Column,
comp: Callable[[Column, Column], Column],
) -> Column:
return left.isNull() | right.isNull() | comp(left, right)
def log_advice(message: str) -> None:
"""
Display advisory logs for functions to be aware of when using pandas API on Spark
for the existing pandas/PySpark users who may not be familiar with distributed environments
or the behavior of pandas.
"""
warnings.warn(message, PandasAPIOnSparkAdviceWarning)
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.utils
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.utils.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]").appName("pyspark.pandas.utils tests").getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.utils,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
|
from django.core.exceptions import ValidationError
from cyder.base.tests import ModelTestMixin
from cyder.core.ctnr.models import Ctnr
from cyder.core.system.models import System
from cyder.cydhcp.interface.static_intr.models import StaticInterface
from cyder.cydhcp.range.models import Range
from cyder.cydhcp.constants import STATIC
from cyder.cydhcp.network.models import Network
from cyder.cydhcp.vrf.models import Vrf
from cyder.cydns.address_record.models import AddressRecord
from cyder.cydns.cname.models import CNAME
from cyder.cydns.domain.models import Domain
from cyder.cydns.nameserver.models import Nameserver
from cyder.cydns.mx.models import MX
from cyder.cydns.ptr.models import PTR
from cyder.cydns.soa.models import SOA
from cyder.cydns.srv.models import SRV
from cyder.cydns.tests.utils import create_zone, DNSTest
from cyder.cydns.txt.models import TXT
class CNAMETests(DNSTest, ModelTestMixin):
def setUp(self):
super(CNAMETests, self).setUp()
self.vrf = Vrf.objects.create(name='test_vrf')
create_zone('128.in-addr.arpa')
self.ctnr2 = Ctnr.objects.create(name='test_ctnr2')
self.g = create_zone('gz')
self.c_g = create_zone('coo.gz')
self.d = create_zone('dz')
Domain.objects.create(name='cd')
self.whatcd = create_zone('what.cd')
for dom in (self.g, self.c_g, self.d, self.whatcd):
self.ctnr.domains.add(dom)
self.r1 = create_zone('10.in-addr.arpa')
self.r1.save()
self.s = System.objects.create(name='test_system', ctnr=self.ctnr)
self.net1 = Network.objects.create(network_str='10.0.0.0/8')
self.net2 = Network.objects.create(network_str='128.193.1.0/30')
self.sr1 = Range.objects.create(
network=self.net1, range_type=STATIC, start_str='10.0.0.1',
end_str='10.0.0.3')
self.sr2 = Range.objects.create(
network=self.net1, range_type=STATIC, start_str='10.193.1.1',
end_str='10.193.1.2')
self.sr3 = Range.objects.create(
network=self.net2, range_type=STATIC, start_str='128.193.1.1',
end_str='128.193.1.2')
for r in (self.sr1, self.sr2, self.sr3):
self.ctnr.ranges.add(r)
def create_cname(self, **kwargs):
kwargs.setdefault('ctnr', self.ctnr)
return CNAME.objects.create(**kwargs)
@property
def objs(self):
"""Create objects for test_create_delete."""
return (
self.create_cname(
label='a', domain=self.g, target='foo.com'),
self.create_cname(
label='bbbbbbbbbbbbbbbbb', domain=self.c_g,
target='foo.foo.com'),
self.create_cname(
label='c-c-c-c-c-c-c-c-c', domain=self.g, target='foo.com'),
self.create_cname(
label='d1d', domain=self.g, target='foo.com'),
)
def test1_add_glob(self):
self.create_cname(label='*foo', domain=self.g, target='foo.com')
self.create_cname(label='*', domain=self.c_g, target='foo.foo.com')
self.assertRaises(
ValidationError, self.create_cname,
label='*.fo1', domain=self.g, target='foo.com')
self.create_cname(
label='*sadfasfd-asdf', domain=self.g, target='foo.com')
def test2_add_glob(self):
self.create_cname(label='*coo', domain=self.g, target='foo.com')
self.create_cname(label='*', domain=self.c_g, target='foo.com')
def test_soa_condition(self):
self.assertRaises(
ValidationError, self.create_cname,
label='', domain=self.c_g, target='foo.com')
def test_add_bad(self):
self.assertRaises(
ValidationError, self.create_cname,
label='', domain=self.g, target='..foo.com')
def test_add_mx_with_cname(self):
def create_mx():
return MX.objects.create(
label='', domain=self.c_g, ctnr=self.ctnr,
server=('cnamederp1.' + self.c_g.name), priority=2, ttl=2222)
create_mx.name = 'MX'
def create_cname():
return CNAME.objects.create(
label='cnamederp1', domain=self.c_g, ctnr=self.ctnr,
target='foo.com')
create_cname.name = 'CNAME'
self.assertObjectsConflict((create_mx, create_cname))
def test_address_record_exists(self):
def create_a():
return AddressRecord.objects.create(
label='testyfoo', ctnr=self.ctnr, domain=self.whatcd,
ip_type='4', ip_str="128.193.1.1")
create_a.name = 'AddressRecord'
def create_cname():
return CNAME.objects.create(
label='testyfoo', ctnr=self.ctnr, domain=self.whatcd,
target='wat')
create_cname.name = 'CNAME'
self.assertObjectsConflict((create_a, create_cname))
def test_address_record_exists_uppercase(self):
def create_a():
return AddressRecord.objects.create(
label='testyfoo', ctnr=self.ctnr, domain=self.whatcd,
ip_type='4', ip_str="128.193.1.1")
create_a.name = 'AddressRecord'
def create_cname():
return CNAME.objects.create(
label='Testyfoo', ctnr=self.ctnr, domain=self.whatcd,
target='wat')
create_cname.name = 'CNAME'
self.assertObjectsConflict((create_a, create_cname))
def test_srv_exists(self):
def create_srv():
return SRV.objects.create(
label='_testyfoo', ctnr=self.ctnr, domain=self.whatcd,
target='asdf', port=2, priority=2, weight=4)
create_srv.name = 'SRV'
def create_cname():
return CNAME.objects.create(
label='_testyfoo', ctnr=self.ctnr, domain=self.whatcd,
target='wat')
create_cname.name = 'CNAME'
self.assertObjectsConflict((create_srv, create_cname))
def test_txt_exists(self):
def create_txt():
return TXT.objects.create(
label='testyfoo', domain=self.whatcd, ctnr=self.ctnr,
txt_data='asdf')
create_txt.name = 'TXT'
def create_cname():
return CNAME.objects.create(
label='testyfoo', domain=self.whatcd, ctnr=self.ctnr,
target='wat')
create_cname.name = 'CNAME'
self.assertObjectsConflict((create_txt, create_cname))
def test_mx_exists(self):
def create_mx():
return MX.objects.create(
label='testyfoo', domain=self.whatcd, ctnr=self.ctnr,
server='asdf', priority=123, ttl=123)
create_mx.name = 'MX'
def create_cname():
return CNAME.objects.create(
label='testyfoo', domain=self.whatcd, ctnr=self.ctnr,
target='wat')
create_cname.name = 'CNAME'
self.assertObjectsConflict((create_mx, create_cname))
def test_ns_exists(self):
bleh = Domain.objects.create(name='bleh.what.cd')
self.ctnr.domains.add(bleh)
def create_ns():
return Nameserver.objects.create(domain=bleh, server='asdf')
create_ns.name = 'Nameserver'
def create_cname():
return CNAME.objects.create(
label='', ctnr=self.ctnr, domain=bleh, target='wat')
create_cname.name = 'CNAME'
self.assertObjectsConflict((create_ns, create_cname))
def test_intr_exists(self):
def create_static_intr():
return StaticInterface.objects.create(
label='testyfoo', domain=self.whatcd, ip_str='10.0.0.1',
ip_type='4', system=self.s, ctnr=self.ctnr,
mac="11:22:33:44:55:66")
create_static_intr.name = 'StaticInterface'
def create_cname():
return CNAME.objects.create(
label='testyfoo', domain=self.whatcd, ctnr=self.ctnr,
target='wat')
create_cname.name = 'CNAME'
self.assertObjectsConflict((create_static_intr, create_cname))
def test_ptr_exists(self):
def create_ptr():
return PTR.objects.create(
ip_str="10.193.1.1", ip_type='4', fqdn='testyfoo.what.cd',
ctnr=self.ctnr)
create_ptr.name = 'PTR'
def create_cname():
return CNAME.objects.create(
label='testyfoo', domain=self.whatcd, ctnr=self.ctnr,
target='wat')
create_cname.name = 'CNAME'
self.assertObjectsConflict((create_ptr, create_cname))
def test_cname_point_to_itself(self):
self.assertRaises(
ValidationError, CNAME.objects.create,
label='foopy', domain=self.whatcd, ctnr=self.ctnr,
target='foopy.what.cd')
def test_domain_ctnr(self):
"""Test that a CNAME's domain must be in the CNAME's container"""
gz = Domain.objects.get(name='gz')
self.ctnr.domains.add(gz)
CNAME.objects.create(
label='bar1', domain=gz, target='foo1.gz', ctnr=self.ctnr)
self.assertRaises(
ValidationError, CNAME.objects.create,
label='bar2', domain=gz, target='foo2.gz', ctnr=self.ctnr2)
def test_name_uniqueness(self):
"""Test that CNAMEs must share a ctnr if they have the same name"""
cn1 = CNAME.objects.create(
label='bar', domain=self.g, target='foo1.gz', ctnr=self.ctnr)
cn2 = CNAME.objects.create(
label='bar', domain=self.g, target='foo2.gz', ctnr=self.ctnr)
self.assertRaises(
ValidationError, CNAME.objects.create,
label='bar', domain=self.g, target='foo3.gz', ctnr=self.ctnr2)
def bootstrap_zone_and_range(self):
d = Domain.objects.create(name='example.gz')
self.ctnr.domains.add(d)
soa = SOA.objects.create(
root_domain=d, primary='ns.example.gz',
contact='root.mail.example.gz')
n = Network.objects.create(
vrf=self.vrf, ip_type='4',
network_str='128.193.0.0/24')
r = Range.objects.create(
network=n, range_type=STATIC, start_str='128.193.0.2',
end_str='128.193.0.100')
self.ctnr.ranges.add(r)
# Cyder has a catch-22 relating to nameservers: If a nameserver's name
# is in the same domain it serves as a nameserver for, a glue record
# must exist before that nameserver can be created, but the nameserver
# must exist before the glue record can be created. Thus, we have to
# set the nameserver's name to something outside the domain it's a
# nameserver for, add the glue record, then fix the nameserver's name.
ns = Nameserver.objects.create(domain=d, server='cyderhack')
glue = AddressRecord.objects.create(
label='ns', domain=d, ip_str='128.193.0.2', ctnr=self.ctnr)
ns.server = 'ns.example.gz'
ns.save()
def test_a_mx_conflict(self):
"""Test that a CNAME cannot have the same name as an AR or MX"""
self.bootstrap_zone_and_range()
e_g = Domain.objects.get(name='example.gz')
def create_cname():
return CNAME.objects.create(
label='foo', domain=e_g, target='bar.example.gz',
ctnr=self.ctnr)
create_cname.name = 'CNAME'
def create_si():
s = System.objects.create(name='test_system', ctnr=self.ctnr)
return StaticInterface.objects.create(
mac='be:ef:fa:ce:11:11', label='foo', domain=e_g,
ip_str='128.193.0.3', ip_type='4', system=s, ctnr=self.ctnr)
create_si.name = 'StaticInterface'
def create_mx():
return MX.objects.create(
label='foo', domain=e_g, server='mail.example.gz', priority=1,
ctnr=self.ctnr)
create_mx.name = 'MX'
self.assertObjectsConflict((create_cname, create_si))
self.assertObjectsConflict((create_cname, create_mx))
def test_soa_conflict(self):
"""Test that a CNAME cannot have the same name as an SOA"""
self.bootstrap_zone_and_range()
f_e_g = Domain.objects.create(name='foo.example.gz')
self.ctnr.domains.add(f_e_g)
def create_cname():
return CNAME.objects.create(
label='', domain=f_e_g.reload(), target='bar.example.gz',
ctnr=self.ctnr)
create_cname.name = 'CNAME'
def create_soa():
return SOA.objects.create(
root_domain=f_e_g.reload(), primary='ns1.example.gz',
contact='root.mail.example.gz')
create_soa.name = 'SOA'
self.assertObjectsConflict((create_cname, create_soa))
def test_target_validation(self):
"""Test that target must be a valid non-IP hostname but need not exist
"""
valid_targets = (
'example.com',
'www.example.com',
'foo.bar.example.com',
)
for target in valid_targets:
cn = CNAME.objects.create(
label='bar', domain=self.g, target=target, ctnr=self.ctnr)
cn.delete()
invalid_targets = (
'10.234.30.253',
'128.193.0.2',
)
for target in invalid_targets:
self.assertRaises(
ValidationError, CNAME.objects.create,
label='bar', domain=self.g, target=target, ctnr=self.ctnr)
def test_staticinterface_conflict(self):
"""Test that a CNAME can't have the same name as a StaticInterface"""
self.bootstrap_zone_and_range()
d = Domain.objects.get(name='example.gz')
def create_cname():
return CNAME.objects.create(
label='foo', domain=d, target='www.example.gz',
ctnr=self.ctnr)
create_cname.name = 'CNAME'
def create_si():
s = System.objects.create(name='test_system', ctnr=self.ctnr)
return StaticInterface.objects.create(
mac='be:ef:fa:ce:11:11', label='foo', domain=d,
ip_str='128.193.0.3', ip_type='4', system=s,
ctnr=self.ctnr)
create_si.name = 'StaticInterface'
self.assertObjectsConflict((create_cname, create_si))
def test_duplicate_cname(self):
def x():
self.create_cname(label='foo', domain=self.g, target='foo.com')
x()
self.assertRaises(ValidationError, x)
|
|
# -*- coding: utf-8 -*-
"""
Tests proxyobject mechanisms with ExampleRawIO
"""
import unittest
import numpy as np
import quantities as pq
from neo.rawio.examplerawio import ExampleRawIO
from neo.io.proxyobjects import (AnalogSignalProxy, SpikeTrainProxy,
EventProxy, EpochProxy)
from neo.core import (Segment, AnalogSignal,
Epoch, Event, SpikeTrain)
from neo.test.tools import (assert_arrays_almost_equal,
assert_neo_object_is_compliant,
assert_same_attributes)
class BaseProxyTest(unittest.TestCase):
def setUp(self):
self.reader = ExampleRawIO(filename='my_filename.fake')
self.reader.parse_header()
class TestAnalogSignalProxy(BaseProxyTest):
def test_AnalogSignalProxy(self):
proxy_anasig = AnalogSignalProxy(rawio=self.reader, global_channel_indexes=None,
block_index=0, seg_index=0,)
assert proxy_anasig.sampling_rate == 10 * pq.kHz
assert proxy_anasig.t_start == 0 * pq.s
assert proxy_anasig.t_stop == 10 * pq.s
assert proxy_anasig.duration == 10 * pq.s
assert proxy_anasig.file_origin == 'my_filename.fake'
# full load
full_anasig = proxy_anasig.load(time_slice=None)
assert isinstance(full_anasig, AnalogSignal)
assert_same_attributes(proxy_anasig, full_anasig)
# slice time
anasig = proxy_anasig.load(time_slice=(2. * pq.s, 5 * pq.s))
assert anasig.t_start == 2. * pq.s
assert anasig.duration == 3. * pq.s
assert anasig.shape == (30000, 16)
assert_same_attributes(proxy_anasig.time_slice(2. * pq.s, 5 * pq.s), anasig)
# ceil next sample when slicing
anasig = proxy_anasig.load(time_slice=(1.99999 * pq.s, 5.000001 * pq.s))
assert anasig.t_start == 2. * pq.s
assert anasig.duration == 3. * pq.s
assert anasig.shape == (30000, 16)
# buggy time slice
with self.assertRaises(AssertionError):
anasig = proxy_anasig.load(time_slice=(2. * pq.s, 15 * pq.s))
anasig = proxy_anasig.load(time_slice=(2. * pq.s, 15 * pq.s), strict_slicing=False)
assert proxy_anasig.t_stop == 10 * pq.s
# select channels
anasig = proxy_anasig.load(channel_indexes=[3, 4, 9])
assert anasig.shape[1] == 3
# select channels and slice times
anasig = proxy_anasig.load(time_slice=(2. * pq.s, 5 * pq.s), channel_indexes=[3, 4, 9])
assert anasig.shape == (30000, 3)
# magnitude mode rescaled
anasig_float = proxy_anasig.load(magnitude_mode='rescaled')
assert anasig_float.dtype == 'float32'
assert anasig_float.units == pq.uV
assert anasig_float.units == proxy_anasig.units
# magnitude mode raw
anasig_int = proxy_anasig.load(magnitude_mode='raw')
assert anasig_int.dtype == 'int16'
assert anasig_int.units == pq.CompoundUnit('0.0152587890625*uV')
assert_arrays_almost_equal(anasig_float, anasig_int.rescale('uV'), 1e-9)
# test array_annotations
assert 'info' in proxy_anasig.array_annotations
assert proxy_anasig.array_annotations['info'].size == 16
assert 'info' in anasig_float.array_annotations
assert anasig_float.array_annotations['info'].size == 16
def test_global_local_channel_indexes(self):
proxy_anasig = AnalogSignalProxy(rawio=self.reader,
global_channel_indexes=slice(0, 10, 2), block_index=0, seg_index=0)
assert proxy_anasig.shape == (100000, 5)
assert '(ch0,ch2,ch4,ch6,ch8)' in proxy_anasig.name
# should be channel ch0 and ch6
anasig = proxy_anasig.load(channel_indexes=[0, 3])
assert anasig.shape == (100000, 2)
assert '(ch0,ch6)' in anasig.name
class TestSpikeTrainProxy(BaseProxyTest):
def test_SpikeTrainProxy(self):
proxy_sptr = SpikeTrainProxy(rawio=self.reader, unit_index=0,
block_index=0, seg_index=0)
assert proxy_sptr.name == 'unit0'
assert proxy_sptr.t_start == 0 * pq.s
assert proxy_sptr.t_stop == 10 * pq.s
assert proxy_sptr.shape == (20,)
assert proxy_sptr.left_sweep == 0.002 * pq.s
assert proxy_sptr.sampling_rate == 10 * pq.kHz
# full load
full_sptr = proxy_sptr.load(time_slice=None)
assert isinstance(full_sptr, SpikeTrain)
assert_same_attributes(proxy_sptr, full_sptr)
assert full_sptr.shape == proxy_sptr.shape
# slice time
sptr = proxy_sptr.load(time_slice=(250 * pq.ms, 500 * pq.ms))
assert sptr.t_start == .25 * pq.s
assert sptr.t_stop == .5 * pq.s
assert sptr.shape == (6,)
assert_same_attributes(proxy_sptr.time_slice(250 * pq.ms, 500 * pq.ms), sptr)
# buggy time slice
with self.assertRaises(AssertionError):
sptr = proxy_sptr.load(time_slice=(2. * pq.s, 15 * pq.s))
sptr = proxy_sptr.load(time_slice=(2. * pq.s, 15 * pq.s), strict_slicing=False)
assert sptr.t_stop == 10 * pq.s
# magnitude mode rescaled
sptr_float = proxy_sptr.load(magnitude_mode='rescaled')
assert sptr_float.dtype == 'float64'
assert sptr_float.units == pq.s
# magnitude mode raw
# TODO when raw mode implemented
# sptr_int = proxy_sptr.load(magnitude_mode='raw')
# assert sptr_int.dtype=='int64'
# assert sptr_int.units==pq.CompoundUnit('1/10000*s')
# assert_arrays_almost_equal(sptr_float, sptr_int.rescale('s'), 1e-9)
# Without waveforms
sptr = proxy_sptr.load(load_waveforms=False)
assert sptr.waveforms is None
# With waveforms
sptr = proxy_sptr.load(load_waveforms=True, magnitude_mode='rescaled')
assert sptr.waveforms is not None
assert sptr.waveforms.shape == (20, 1, 50)
assert sptr.waveforms.units == 1 * pq.uV
# slice waveforms
sptr = proxy_sptr.load(load_waveforms=True, time_slice=(250 * pq.ms, 500 * pq.ms))
assert sptr.waveforms.shape == (6, 1, 50)
class TestEventProxy(BaseProxyTest):
def test_EventProxy(self):
proxy_event = EventProxy(rawio=self.reader, event_channel_index=0,
block_index=0, seg_index=0)
assert proxy_event.name == 'Some events'
assert proxy_event.shape == (6,)
# full load
full_event = proxy_event.load(time_slice=None)
assert isinstance(full_event, Event)
assert_same_attributes(proxy_event, full_event, exclude=('times', 'labels'))
assert full_event.shape == proxy_event.shape
# slice time
event = proxy_event.load(time_slice=(1 * pq.s, 2 * pq.s))
assert event.shape == (2,)
assert event.labels.shape == (2,)
assert_same_attributes(proxy_event.time_slice(1 * pq.s, 2 * pq.s), event)
# buggy time slice
with self.assertRaises(AssertionError):
event = proxy_event.load(time_slice=(2 * pq.s, 15 * pq.s))
event = proxy_event.load(time_slice=(2 * pq.s, 15 * pq.s), strict_slicing=False)
class TestEpochProxy(BaseProxyTest):
def test_EpochProxy(self):
proxy_epoch = EpochProxy(rawio=self.reader, event_channel_index=1,
block_index=0, seg_index=0)
assert proxy_epoch.name == 'Some epochs'
assert proxy_epoch.shape == (10,)
# full load
full_epoch = proxy_epoch.load(time_slice=None)
assert isinstance(full_epoch, Epoch)
assert_same_attributes(proxy_epoch, full_epoch, exclude=('times', 'labels', 'durations'))
assert full_epoch.shape == proxy_epoch.shape
# slice time
epoch = proxy_epoch.load(time_slice=(1 * pq.s, 4 * pq.s))
assert epoch.shape == (3,)
assert epoch.labels.shape == (3,)
assert epoch.durations.shape == (3,)
assert_same_attributes(proxy_epoch.time_slice(1 * pq.s, 4 * pq.s), epoch)
# buggy time slice
with self.assertRaises(AssertionError):
epoch = proxy_epoch.load(time_slice=(2 * pq.s, 15 * pq.s))
epoch = proxy_epoch.load(time_slice=(2 * pq.s, 15 * pq.s), strict_slicing=False)
class TestSegmentWithProxy(BaseProxyTest):
def test_segment_with_proxy(self):
seg = Segment()
proxy_anasig = AnalogSignalProxy(rawio=self.reader,
global_channel_indexes=None,
block_index=0, seg_index=0,)
seg.analogsignals.append(proxy_anasig)
proxy_sptr = SpikeTrainProxy(rawio=self.reader, unit_index=0,
block_index=0, seg_index=0)
seg.spiketrains.append(proxy_sptr)
proxy_event = EventProxy(rawio=self.reader, event_channel_index=0,
block_index=0, seg_index=0)
seg.events.append(proxy_event)
proxy_epoch = EpochProxy(rawio=self.reader, event_channel_index=1,
block_index=0, seg_index=0)
seg.epochs.append(proxy_epoch)
if __name__ == "__main__":
unittest.main()
|
|
import sys
import os
import numpy as np
from pprint import pprint
from datetime import datetime
from datetime import timedelta
import mysql.connector
import math
import calendar
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib import dates
start = datetime(2012,5,20,0,2) #2009 - 20090628 2010 - 20100610 2012 - 20100405
end = datetime(2012,6,1) #2009 - 20090816 2010 - 20100726 2012 - 20100601
timestep = 1./30 #hours
sample_min = 117 #117 for all 2009-2012
sample_max = 123 #123 for all 2009-2012
yag_min = 3.8 #3.8 for all 2009-2012
yag_max = 6 #6 for all 2009-2012
BC_VED_min = 70
BC_VED_max = 400
min_scat_pkht = 20
mass_min = ((BC_VED_min/(10.**7))**3)*(math.pi/6.)*1.8*(10.**15)
mass_max = ((BC_VED_max/(10.**7))**3)*(math.pi/6.)*1.8*(10.**15)
print 'mass limits', mass_min, mass_max
cnx = mysql.connector.connect(user='root', password='Suresh15', host='localhost', database='black_carbon')
cursor = cnx.cursor()
def check_spike_times(particle_start_time,particle_end_time):
cursor.execute('''SELECT count(*)
FROM whi_spike_times_2009to2012
WHERE (spike_start_UTC <= %s AND spike_end_UTC > %s)
OR (spike_start_UTC <= %s AND spike_end_UTC > %s)
''',
(particle_start_time,particle_start_time,particle_end_time,particle_end_time))
spike_count = cursor.fetchall()[0][0]
return spike_count
def get_hysplit_id(particle_start_time):
cursor.execute('''SELECT id
FROM whi_hysplit_hourly_data
WHERE (UNIX_UTC_start_time <= %s AND UNIX_UTC_end_time > %s)
''',
(particle_start_time,particle_start_time))
hy_id_list = cursor.fetchall()
if hy_id_list == []:
hy_id = None
else:
hy_id = hy_id_list[0][0]
return hy_id
def get_met_id(particle_start_time):
cursor.execute('''SELECT id
FROM whi_sampling_conditions
WHERE (UNIX_UTC_start_time <= %s AND UNIX_UTC_end_time > %s)
''',
(particle_start_time,particle_start_time))
met_id_list = cursor.fetchall()
if met_id_list == []:
met_id = None
else:
met_id = met_id_list[0][0]
return met_id
def get_gc_id(particle_start_time):
cursor.execute('''SELECT id
FROM whi_gc_hourly_bc_data
WHERE (UNIX_UTC_start_time <= %s AND UNIX_UTC_end_time > %s)
''',
(particle_start_time,particle_start_time))
gc_id_list = cursor.fetchall()
if gc_id_list == []:
gc_id = None
else:
gc_id = gc_id_list[0][0]
return gc_id
def get_sample_factor(UNIX_start):
date_time = datetime.utcfromtimestamp(UNIX_start)
sample_factors_2012 = [
[datetime(2012,4,4,19,43,4), datetime(2012,4,5,13,47,9), 3.0],
[datetime(2012,4,5,13,47,9), datetime(2012,4,10,3,3,25), 1.0],
[datetime(2012,4,10,3,3,25), datetime(2012,5,16,6,9,13), 3.0],
[datetime(2012,5,16,6,9,13), datetime(2012,6,7,18,14,39), 10.0],
]
if date_time.year in [2009,2010]:
sample_factor = 1.0
if date_time.year == 2012:
for date_range in sample_factors_2012:
start_date = date_range[0]
end_date = date_range[1]
range_sample_factor = date_range[2]
if start_date<= date_time < end_date:
sample_factor = range_sample_factor
return sample_factor
#query to add 1h mass conc data
add_data = ('''INSERT INTO whi_sp2_hourly_data
(UNIX_UTC_start_time,UNIX_UTC_end_time,number_particles,rBC_mass,rBC_mass_err,volume_air_sampled,mean_lag_time,sample_factor,hysplit_hourly_id,whi_sampling_cond_id,gc_hourly_id)
VALUES (%(UNIX_UTC_start_time)s,%(UNIX_UTC_end_time)s,%(number_particles)s,%(rBC_mass)s,%(rBC_mass_err)s,%(volume_air_sampled)s,%(mean_lag_time)s,%(sample_factor)s,%(hysplit_hourly_id)s,%(whi_sampling_cond_id)s,%(gc_hourly_id)s)'''
)
#
multiple_records = []
i=1
while start <= end:
UNIX_start = calendar.timegm(start.utctimetuple())
UNIX_end = UNIX_start + timestep*3600.0
print start, UNIX_start+60
print datetime.utcfromtimestamp(UNIX_end)
st = datetime.now()
#filter on hk data here
cursor.execute('''(SELECT
mn.UNIX_UTC_ts_int_start,
mn.UNIX_UTC_ts_int_end,
mn.rBC_mass_fg_BBHG,
mn.rBC_mass_fg_BBHG_err,
mn.BB_incand_pk_pos,
mn.BB_scat_pk_pos,
mn.BB_scat_pkht,
hk.sample_flow,
mn.BB_incand_HG
FROM whi_sp2_particle_data mn
FORCE INDEX (hourly_binning)
JOIN whi_hk_data hk on mn.HK_id = hk.id
WHERE
mn.UNIX_UTC_ts_int_start >= %s
AND mn.UNIX_UTC_ts_int_end < %s
AND hk.sample_flow >= %s
AND hk.sample_flow < %s
AND hk.yag_power >= %s
AND hk.yag_power < %s)''',
(UNIX_start,UNIX_end,sample_min,sample_max,yag_min,yag_max))
ind_data = cursor.fetchall()
data={
'rBC_mass_fg':[],
'rBC_mass_fg_err':[],
'lag_time':[]
}
total_sample_vol = 0
flows = []
for row in ind_data:
ind_start_time = float(row[0])
ind_end_time = float(row[1])
bbhg_mass_corr = float(row[2])
bbhg_mass_corr_err = float(row[3])
BB_incand_pk_pos = float(row[4])
BB_scat_pk_pos = float(row[5])
BB_scat_pk_ht = float(row[6])
sample_flow = float(row[7]) #in vccm
incand_pkht = float(row[7]) #in vccm
#filter spike times here
if check_spike_times(ind_start_time,ind_end_time):
print 'spike'
continue
#skip the long interval
if (ind_end_time - ind_start_time) > 540:
print 'long interval'
continue
#skip if no sample flow
if sample_flow == None:
print 'no flow'
continue
flows.append(sample_flow)
sample_vol = (sample_flow*(ind_end_time-ind_start_time)/60) #/60 b/c sccm and time in secs
total_sample_vol = total_sample_vol + sample_vol
#bbhg_mass_corr = 0.24826+ 0.003043*incand_pkht
if (mass_min <= bbhg_mass_corr < mass_max):
data['rBC_mass_fg'].append(bbhg_mass_corr)
data['rBC_mass_fg_err'].append(bbhg_mass_corr_err)
#only calc lag time if there is a scattering signal
if BB_scat_pk_ht > min_scat_pkht:
lag_time = (BB_incand_pk_pos-BB_scat_pk_pos)*0.2 #us
data['lag_time'].append(lag_time)
tot_rBC_mass_fg = sum(data['rBC_mass_fg'])
tot_rBC_mass_uncer = sum(data['rBC_mass_fg_err'])
rBC_number = len(data['rBC_mass_fg'])
mean_lag = float(np.mean(data['lag_time']))
if np.isnan(mean_lag):
mean_lag = None
#get sample factor, hysplit id, met Id, gc_id
sample_factor = get_sample_factor(UNIX_start)
#get hysplit_id
hysplit_id = get_hysplit_id(UNIX_start)
#get sampling conditions id
met_id = get_met_id(UNIX_start)
cursor.execute('''(SELECT
pressure_Pa,
room_temp_C
FROM whi_sampling_conditions
WHERE
id = %s
AND id > %s)''',
(met_id,0))
met_data = cursor.fetchall()
pressure = met_data[0][0]
temperature = met_data[0][1]+273.15
#get hysplit_id
gc_id = get_gc_id(UNIX_start)
####
R = 8.3144621 # in m3*Pa/(K*mol)
scorr = (101325*temperature)/(273*pressure)
volume_ambient = (R*temperature)/(pressure)
volume_STP = volume_ambient*(pressure/101325)*(273/temperature)
correction_factor_for_STP = volume_ambient/volume_STP
mconc = (tot_rBC_mass_fg/total_sample_vol)*correction_factor_for_STP
print 'mass-conc', mconc*sample_factor
print tot_rBC_mass_fg,total_sample_vol,total_sample_vol/2
#add to db
single_record = {
'UNIX_UTC_start_time' :UNIX_start,
'UNIX_UTC_end_time' :UNIX_end,
'number_particles' :rBC_number,
'rBC_mass' :tot_rBC_mass_fg,
'rBC_mass_err' :tot_rBC_mass_uncer,
'volume_air_sampled' :total_sample_vol,
'mean_lag_time' :mean_lag,
'sample_factor' :sample_factor,
'hysplit_hourly_id' :hysplit_id,
'whi_sampling_cond_id' :met_id,
'gc_hourly_id' :gc_id,
}
multiple_records.append((single_record))
#bulk insert to db table
if i%24 == 0:
cursor.executemany(add_data, multiple_records)
cnx.commit()
multiple_records = []
#increment count
i+= 1
start += timedelta(hours = timestep)
#bulk insert of remaining records to db
if multiple_records != []:
cursor.executemany(add_data, multiple_records)
cnx.commit()
multiple_records = []
cnx.close()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._gallery_images_operations import build_create_or_update_request_initial, build_delete_request_initial, build_get_request, build_list_by_gallery_request, build_update_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class GalleryImagesOperations:
"""GalleryImagesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2021_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
gallery_image: "_models.GalleryImage",
**kwargs: Any
) -> "_models.GalleryImage":
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryImage"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(gallery_image, 'GalleryImage')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_image_name=gallery_image_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('GalleryImage', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('GalleryImage', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('GalleryImage', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
gallery_image: "_models.GalleryImage",
**kwargs: Any
) -> AsyncLROPoller["_models.GalleryImage"]:
"""Create or update a gallery image definition.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Image Gallery in which the Image Definition is to
be created.
:type gallery_name: str
:param gallery_image_name: The name of the gallery image definition to be created or updated.
The allowed characters are alphabets and numbers with dots, dashes, and periods allowed in the
middle. The maximum length is 80 characters.
:type gallery_image_name: str
:param gallery_image: Parameters supplied to the create or update gallery image operation.
:type gallery_image: ~azure.mgmt.compute.v2021_10_01.models.GalleryImage
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either GalleryImage or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_10_01.models.GalleryImage]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryImage"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_image_name=gallery_image_name,
gallery_image=gallery_image,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('GalleryImage', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
gallery_image: "_models.GalleryImageUpdate",
**kwargs: Any
) -> "_models.GalleryImage":
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryImage"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(gallery_image, 'GalleryImageUpdate')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_image_name=gallery_image_name,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GalleryImage', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}'} # type: ignore
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
gallery_image: "_models.GalleryImageUpdate",
**kwargs: Any
) -> AsyncLROPoller["_models.GalleryImage"]:
"""Update a gallery image definition.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Image Gallery in which the Image Definition is to
be updated.
:type gallery_name: str
:param gallery_image_name: The name of the gallery image definition to be updated. The allowed
characters are alphabets and numbers with dots, dashes, and periods allowed in the middle. The
maximum length is 80 characters.
:type gallery_image_name: str
:param gallery_image: Parameters supplied to the update gallery image operation.
:type gallery_image: ~azure.mgmt.compute.v2021_10_01.models.GalleryImageUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either GalleryImage or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_10_01.models.GalleryImage]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryImage"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_image_name=gallery_image_name,
gallery_image=gallery_image,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('GalleryImage', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
**kwargs: Any
) -> "_models.GalleryImage":
"""Retrieves information about a gallery image definition.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Image Gallery from which the Image Definitions are
to be retrieved.
:type gallery_name: str
:param gallery_image_name: The name of the gallery image definition to be retrieved.
:type gallery_image_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GalleryImage, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_10_01.models.GalleryImage
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryImage"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_image_name=gallery_image_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GalleryImage', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_image_name=gallery_image_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Delete a gallery image.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Image Gallery in which the Image Definition is to
be deleted.
:type gallery_name: str
:param gallery_image_name: The name of the gallery image definition to be deleted.
:type gallery_image_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_image_name=gallery_image_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}'} # type: ignore
@distributed_trace
def list_by_gallery(
self,
resource_group_name: str,
gallery_name: str,
**kwargs: Any
) -> AsyncIterable["_models.GalleryImageList"]:
"""List gallery image definitions in a gallery.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Image Gallery from which Image Definitions are to
be listed.
:type gallery_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either GalleryImageList or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2021_10_01.models.GalleryImageList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryImageList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_gallery_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
template_url=self.list_by_gallery.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_gallery_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("GalleryImageList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_gallery.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images'} # type: ignore
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: bigip_hostname
short_description: Manage the hostname of a BIG-IP
description:
- Manage the hostname of a BIG-IP device.
version_added: "1.0.0"
options:
hostname:
description:
- Hostname of the BIG-IP host.
type: str
required: True
extends_documentation_fragment: f5networks.f5_modules.f5
author:
- Tim Rupp (@caphrim007)
- Matthew Lam (@mryanlam)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Set the hostname of the BIG-IP
bigip_hostname:
hostname: bigip.localhost.localdomain
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
'''
RETURN = r'''
hostname:
description: The new hostname of the device.
returned: changed
type: str
sample: big-ip01.internal
'''
from datetime import datetime
from ansible.module_utils.basic import AnsibleModule
from ..module_utils.bigip import F5RestClient
from ..module_utils.common import (
F5ModuleError, AnsibleF5Parameters, f5_argument_spec
)
from ..module_utils.icontrol import tmos_version
from ..module_utils.teem import send_teem
class Parameters(AnsibleF5Parameters):
api_attributes = ['hostname']
updatables = ['hostname']
returnables = ['hostname']
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
raise
return result
@property
def hostname(self):
if self._values['hostname'] is None:
return None
return str(self._values['hostname'])
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
pass
class Changes(Parameters):
pass
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.have = ApiParameters()
self.want = ModuleParameters(params=self.module.params)
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def exec_module(self):
start = datetime.now().isoformat()
version = tmos_version(self.client)
result = dict()
changed = self.update()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
send_teem(start, self.module, version)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def _read_global_settings_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/global-settings/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return response
raise F5ModuleError(resp.content)
def read_current_from_device(self):
result = self._read_global_settings_from_device()
uri = "https://{0}:{1}/mgmt/tm/cm/device/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
self_device = next((x['name'] for x in response['items'] if x['selfDevice'] == "true"), None)
result['self_device'] = self_device
return ApiParameters(params=result)
raise F5ModuleError(resp.content)
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update_on_device(self):
params = self.want.api_params()
uri = "https://{0}:{1}/mgmt/tm/sys/global-settings/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status not in [200, 201] or 'code' in response and response['code'] not in [200, 201]:
raise F5ModuleError(resp.content)
if self.have.self_device:
uri = "https://{0}:{1}/mgmt/tm/cm/device".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
args = dict(
command='mv',
name=self.have.self_device,
target=self.want.hostname
)
resp = self.client.api.post(uri, json=args)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status not in [200, 201] or 'code' in response and response['code'] not in [200, 201]:
raise F5ModuleError(resp.content)
return True
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
hostname=dict(
required=True
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
|
|
from __future__ import with_statement
import os
import time
import logging
from concurrence import unittest, Tasklet, Channel, Timeout
from concurrence.memcache import MemcacheResult, Memcache, MemcacheProtocol, MemcacheConnection, MemcacheConnectionManager, MemcacheError, MemcacheBehaviour, MemcacheCodec
MEMCACHE_IP = '127.0.0.1'
MEMCACHED_PATHS = ['/opt/memcached/bin/memcached',
'/usr/bin/memcached',
'/opt/local/bin/memcached']
MEMCACHED_BIN = None
for path in MEMCACHED_PATHS:
if os.path.exists(path):
MEMCACHED_BIN = path
break
assert MEMCACHED_BIN is not None, "could not find memcached daemon binary"
#TODO check for memcached version before testing cas/gets commands
TEST_EXT = True
class TestMemcache(unittest.TestCase):
log = logging.getLogger("TestMemcache")
def setUp(self):
self.log.debug("using memcached daemon: %s", MEMCACHED_BIN)
for i in range(4):
cmd = '%s -m 10 -p %d -u nobody -l 127.0.0.1&' % (MEMCACHED_BIN, 11211 + i)
self.log.debug(cmd)
os.system(cmd)
Tasklet.sleep(1.0) #should be enough for memcached to get up and running
def tearDown(self):
MemcacheConnectionManager.create("default").close_all()
cmd = 'killall %s' % MEMCACHED_BIN
self.log.debug(cmd)
os.system(cmd)
Tasklet.sleep(1.0) #should be enough for memcached to go down
def testResultCode(self):
self.assertTrue(MemcacheResult.get('STORED') == MemcacheResult.get('STORED'))
self.assertEquals('blaataap', MemcacheResult.get('CLIENT_ERROR blaataap').msg)
self.assertEquals('blaataap', MemcacheResult.get('SERVER_ERROR blaataap').msg)
self.assertTrue(MemcacheResult.get('CLIENT_ERROR blaataap') == MemcacheResult.get('CLIENT_ERROR blaataap'))
self.assertEquals("MemcacheResult.STORED", repr(MemcacheResult.STORED))
try:
MemcacheResult.get('XXX')
self.fail()
except MemcacheError:
pass
def testModuloBehaviour(self):
try:
MemcacheBehaviour.create("blaataap")
self.fail("expected error")
except MemcacheError:
pass
b = MemcacheBehaviour.create("modulo")
self.assertTrue(b is MemcacheBehaviour.create(b))
b.set_servers([1,2,3,4])
s = set()
for i in range(100):
s.add(b.key_to_addr(i))
self.assertEquals(set([1,2,3,4]), s)
def sharedTestBasic(self, mc):
self.assertEquals(MemcacheResult.STORED, mc.set('test1', '12345'))
self.assertEquals(MemcacheResult.STORED, mc.set('test2', '67890'))
self.assertEquals('12345', mc.get('test1'))
self.assertEquals('67890', mc.get('test2'))
#__setitem__
mc['test1_gsi'] = '12345'
mc['test2_gsi'] = '67890'
#__getitem__
self.assertEquals('12345', mc['test1_gsi'])
self.assertEquals('67890', mc['test2_gsi'])
#get with result:
self.assertEquals((MemcacheResult.OK, '12345'), mc.getr('test1'))
self.assertEquals((MemcacheResult.OK, '67890'), mc.getr('test2'))
self.assertEquals(None, mc.get('test3')) #if not found by default returns None
self.assertEquals('blaat', mc.get('test3', 'blaat')) #but you can make it return some other
self.assertEquals((MemcacheResult.OK, {'test1': '12345', 'test2': '67890'}), mc.get_multi(['test1', 'test2', 'test3']))
#update test2
mc.set('test2', 'hello world!')
self.assertEquals((MemcacheResult.OK, {'test1': '12345', 'test2': 'hello world!'}), mc.get_multi(['test1', 'test2', 'test3']))
#update to int type
mc.set('test2', 10)
self.assertEquals(10, mc.get('test2'))
self.assertEquals(int, type(mc.get('test2')))
#update to long type
mc.set('test2', 10L)
self.assertEquals(10L, mc.get('test2'))
self.assertEquals(long, type(mc.get('test2')))
#update to string type
mc.set('test2', 'blaat')
self.assertEquals('blaat', mc.get('test2'))
self.assertEquals(str, type(mc.get('test2')))
#update to unicode type
mc.set('test2', u'C\xe9line')
self.assertEquals(u'C\xe9line', mc.get('test2'))
self.assertEquals(unicode, type(mc.get('test2')))
#update to some other type
mc.set('test2', {'piet': 'blaat', 10: 20})
self.assertEquals({'piet': 'blaat', 10: 20}, mc.get('test2'))
#test delete
self.assertEquals(MemcacheResult.NOT_FOUND, mc.delete('test_del1'))
self.assertEquals(MemcacheResult.STORED, mc.set('test_del1', 'hello'))
self.assertEquals('hello', mc.get('test_del1'))
self.assertEquals(MemcacheResult.DELETED, mc.delete('test_del1'))
self.assertEquals(None, mc.get('test_del1'))
#test add command
mc.delete('add1')
self.assertEquals(MemcacheResult.STORED, mc.add('add1', '11111'))
self.assertEquals(MemcacheResult.NOT_STORED, mc.add('add1', '22222'))
#test replace
self.assertEquals(MemcacheResult.STORED, mc.set('replace1', '11111'))
self.assertEquals(MemcacheResult.STORED, mc.replace('replace1', '11111'))
self.assertEquals(MemcacheResult.STORED, mc.replace('replace1', '11111'))
self.assertEquals(MemcacheResult.DELETED, mc.delete('replace1'))
self.assertEquals(MemcacheResult.NOT_STORED, mc.replace('replace1', '11111'))
#test expiration
self.assertEquals(MemcacheResult.STORED, mc.set('exp_test', 'blaat', 2)) #expire 2 seconds from now
self.assertEquals('blaat', mc.get('exp_test')) #should still find it
Tasklet.sleep(4)
self.assertEquals(None, mc.get('exp_test')) #should be gone
if TEST_EXT:
#test cas/gets
self.assertEquals(MemcacheResult.NOT_FOUND, mc.cas('cas_test1', 'blaat', 12345))
self.assertEquals(MemcacheResult.STORED, mc.set('cas_test1', 'blaat'))
result, value, cas_unique = mc.gets('cas_test1')
self.assertEquals(MemcacheResult.OK, result)
self.assertEquals('blaat', value)
self.assertEquals(MemcacheResult.STORED, mc.cas('cas_test1', 'blaat2', cas_unique))
self.assertEquals(MemcacheResult.EXISTS, mc.cas('cas_test1', 'blaat2', cas_unique))
result, value, cas_unique = mc.gets('cas_test1_not_there')
self.assertEquals(MemcacheResult.OK, result)
self.assertEquals(None, value)
self.assertEquals(None, cas_unique)
result, value, cas_unique = mc.gets('cas_test1')
self.assertEquals(MemcacheResult.OK, result)
self.assertEquals('blaat2', value)
self.assertEquals(MemcacheResult.STORED, mc.cas('cas_test1', 'blaat3', cas_unique))
self.assertEquals(MemcacheResult.STORED, mc.set('cas_test2', 'blaat4'))
result, values = mc.gets_multi(['cas_test1', 'cas_test2'])
self.assertEquals(MemcacheResult.OK, result)
self.assertTrue('cas_test1' in values)
self.assertTrue('cas_test2' in values)
self.assertEquals('blaat3', values['cas_test1'][0])
self.assertEquals('blaat4', values['cas_test2'][0])
#test append
self.assertEquals(MemcacheResult.NOT_STORED, mc.append('append_test1', 'hello'))
self.assertEquals(MemcacheResult.STORED, mc.set('append_test1', 'hello'))
self.assertEquals(MemcacheResult.STORED, mc.append('append_test1', 'world'))
self.assertEquals('helloworld', mc.get('append_test1'))
#test prepend
self.assertEquals(MemcacheResult.NOT_STORED, mc.prepend('prepend_test1', 'world'))
self.assertEquals(MemcacheResult.STORED, mc.set('prepend_test1', 'world'))
self.assertEquals(MemcacheResult.STORED, mc.prepend('prepend_test1', 'hello'))
self.assertEquals('helloworld', mc.get('prepend_test1'))
#test incr
self.assertEquals((MemcacheResult.NOT_FOUND, None), mc.incr('incr_test1', 1)) #not found
self.assertEquals(MemcacheResult.STORED, mc.set('incr_test1', '0'))
self.assertEquals((MemcacheResult.OK, 1), mc.incr('incr_test1', 1))
self.assertEquals((MemcacheResult.OK, 2), mc.incr('incr_test1', '1'))
self.assertEquals((MemcacheResult.OK, 12), mc.incr('incr_test1', 10))
self.assertEquals(MemcacheResult.STORED, mc.set('incr_test1', '18446744073709551615'))
self.assertEquals((MemcacheResult.OK, 0), mc.incr('incr_test1', 1))
#test decr
self.assertEquals((MemcacheResult.NOT_FOUND, None), mc.decr('decr_test1', 1)) #not found
self.assertEquals(MemcacheResult.STORED, mc.set('decr_test1', '12'))
self.assertEquals((MemcacheResult.OK, 11), mc.decr('decr_test1', 1))
self.assertEquals((MemcacheResult.OK, 10), mc.decr('decr_test1', '1'))
self.assertEquals((MemcacheResult.OK, 0), mc.decr('decr_test1', 10))
self.assertEquals((MemcacheResult.OK, 0), mc.decr('decr_test1', 1))
def testBasicSingle(self):
mc = MemcacheConnection((MEMCACHE_IP, 11211))
self.sharedTestBasic(mc)
def testSingleBatch(self):
mc = MemcacheConnection((MEMCACHE_IP, 11211))
#batch = mc.batch()
#batch.set('bset_1', 10)
#batch.set('bset_2', 20)
#batch.get('bset_1')
#batch.get('bset_2')
#result = batch.execute()
#print result.receive()
#print result.receive()
#print result.receive()
#print result.receive()
N = 400
B = 400
with unittest.timer() as tmr:
for i in range(N):
batch = mc.batch()
for j in range(B):
batch.set('test2', 'hello world!')
for _ in batch.execute(): #TODO why do we need to read out results?
pass
print 'single server batch set keys/sec', tmr.sec(B * N)
def testExtraSingle(self):
"""test stuff that only makes sense on a single server connection"""
mc = MemcacheConnection((MEMCACHE_IP, 11211))
res1, v1 = mc.version()
res2, v2 = mc.version()
self.assertEquals(MemcacheResult.OK, res1)
self.assertEquals(MemcacheResult.OK, res2)
self.assertEquals(str, type(v1))
self.assertEquals(str, type(v2))
self.assertTrue(len(v1) > 1)
self.assertEquals(v1, v2)
def testBasic(self):
mc = Memcache()
mc.set_servers([((MEMCACHE_IP, 11211), 100)])
self.sharedTestBasic(mc)
def testMemcache(self):
mc = Memcache()
mc.set_servers([((MEMCACHE_IP, 11211), 100)])
N = 10000
with unittest.timer() as tmr:
for i in range(N):
mc.set('test2', 'hello world!')
print 'single server single set keys/sec', tmr.sec(N)
def testTimeout(self):
mc = Memcache()
mc.set_servers([((MEMCACHE_IP, 11211), 100)])
def callback(socket, count, event, args, kwargs):
print count, event, Tasklet.current()
if (count, event) == (1, "write"):
pass
elif (count, event) == (2, "read"):
Tasklet.sleep(1.0)
return "OK\r\n"
unittest.TestSocket.install((MEMCACHE_IP, 11211), callback)
with Timeout.push(0.5):
self.assertEquals(MemcacheResult.TIMEOUT, mc.set('blaat', 'aap'))
print 'done (timeout)'
Tasklet.sleep(4.0)
def testMemcacheMultiServer(self):
mc = Memcache()
mc.set_servers([((MEMCACHE_IP, 11211), 100),
((MEMCACHE_IP, 11212), 100),
((MEMCACHE_IP, 11213), 100),
((MEMCACHE_IP, 11214), 100)])
N = 10000
keys = ['test%d' % i for i in range(N)]
with unittest.timer() as tmr:
for i in range(N):
self.assertEquals(MemcacheResult.STORED, mc.set(keys[i], 'hello world %d' % i))
print 'multi server single set keys/sec', tmr.sec(N)
with unittest.timer() as tmr:
for i in range(N):
self.assertEquals('hello world %d' % i, mc.get(keys[i]))
print 'multi server single get keys/sec', tmr.sec(N)
N = 10000
for stride in [10,20,40]:
with unittest.timer() as tmr:
for i in range(0, N, stride):
result, values = mc.get_multi(keys[i:i+stride])
self.assertEquals(MemcacheResult.OK, result)
self.assertEquals(stride, len(values))
print 'multi server multi get (%d) keys/sec' % stride, tmr.sec(N)
def testMultiClientMultiServer(self):
N = 40 * 100
keys = ['test%d' % i for i in range(N)]
mc = Memcache()
mc.set_servers([((MEMCACHE_IP, 11211), 100),
((MEMCACHE_IP, 11212), 100),
((MEMCACHE_IP, 11213), 100),
((MEMCACHE_IP, 11214), 100)])
with unittest.timer() as tmr:
for i in range(N):
self.assertEquals(MemcacheResult.STORED, mc.set(keys[i], 'hello world %d' % i))
print 'single client multi server single set keys/sec', tmr.sec(N)
stride = 40
def fetcher():
for i in range(0, N, stride):
result, values = mc.get_multi(keys[i:i+stride])
self.assertEquals(MemcacheResult.OK, result)
self.assertEquals(stride, len(values))
for nr_clients in [2,4,8,16]:#,32,64,128]:
with unittest.timer() as tmr:
for i in range(nr_clients):
Tasklet.new(fetcher)()
Tasklet.join_children()
print 'multi client (%d), multi server multi get (%d) keys/sec' % (nr_clients, stride), tmr.sec(N * nr_clients)
def testTextProtocol(self):
from concurrence.io import Socket, BufferedStream
from concurrence.memcache.protocol import MemcacheProtocol
socket = Socket.connect((MEMCACHE_IP, 11211))
stream = BufferedStream(socket)
writer = stream.writer
reader = stream.reader
try:
protocol = MemcacheProtocol.create("textblaat")
self.fail("expected error")
except MemcacheError:
pass
protocol = MemcacheProtocol.create("text")
self.assertTrue(protocol is MemcacheProtocol.create(protocol))
protocol.set_codec("raw")
protocol.write_set(writer, 'hello', 'world', 0, 0)
writer.flush()
self.assertEquals((MemcacheResult.STORED, None), protocol.read_set(reader))
N = 100
for i in range(N):
protocol.write_set(writer, 'test%d' % i, 'hello world %d' % i, 0, 0)
writer.flush()
self.assertEquals((MemcacheResult.STORED, None), protocol.read_set(reader))
#single get
for i in range(N):
protocol.write_get(writer, ['test%d' % i])
writer.flush()
result = protocol.read_get(reader)
self.assertEquals((MemcacheResult.OK, {'test%d' % i: 'hello world %d' % i}), result)
#multi get
for i in range(0, N, 10):
keys = ['test%d' % x for x in range(i, i + 10)]
protocol.write_get(writer, keys)
writer.flush()
result, values = protocol.read_get(reader)
self.assertEquals(MemcacheResult.OK, result)
self.assertEquals(10, len(values))
#multi get pipeline, e.g. write N gets, but don't read out the results yet
for i in range(0, N, 10):
keys = ['test%d' % x for x in range(i, i + 10)]
protocol.write_get(writer, keys)
writer.flush()
#now read the results
for i in range(0, N, 10):
result, values = protocol.read_get(reader)
self.assertEquals(10, len(values))
self.assertTrue(('test%d' % i) in values)
#pipelined multiget with same set of keys
protocol.write_get(writer, ['test2', 'test8', 'test9', 'test11', 'test23', 'test24', 'test29', 'test31', 'test34'])
writer.flush()
protocol.write_get(writer, ['test2', 'test8', 'test9', 'test11', 'test23', 'test24', 'test29', 'test31', 'test34'])
writer.flush()
result1 = protocol.read_get(reader)
result2 = protocol.read_get(reader)
self.assertEquals(result1, result2)
def testCodec(self):
try:
MemcacheCodec.create("bla")
self.fail("expected error")
except MemcacheError:
pass
codec = MemcacheCodec.create("raw")
self.assertEquals(('bla', 10), codec.encode('bla', 10))
self.assertEquals('bla', codec.decode(10, 'bla'))
def testConnectionManager(self):
try:
cm = MemcacheConnectionManager()
protocol = MemcacheProtocol.create("text")
connections = []
def connector():
connections.append(cm.get_connection((MEMCACHE_IP, 11211), protocol))
Tasklet.new(connector)()
Tasklet.new(connector)()
Tasklet.new(connector)()
Tasklet.join_children()
Tasklet.new(connector)()
Tasklet.join_children()
self.assertEquals(4, len(connections))
self.assertEquals(1, len(cm._connections))
finally:
cm.close_all()
from concurrence.memcache.ketama import TestKetama
if __name__ == '__main__':
unittest.main(timeout = 100)
|
|
#!/usr/bin/env python
################################################################################
# The MIT License (MIT)
#
# Copyright (c) 2015 Gerley Machado
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
################################################################################
import argparse
import csv
import os
import random
import sys
import time
from xml.dom import minidom
VERSION = "0.1.0"
class ReaderConfig:
def __init__(self, inputFile):
self.inputFile = inputFile
def execute(self):
tableList = []
xmldoc = minidom.parse(self.inputFile)
for tableDoc in xmldoc.getElementsByTagName('table'):
tableList.append(self.getTable(tableDoc))
return tableList
def getTable(self, tableDoc):
name = tableDoc.attributes['name'].value
lenRecords = int(tableDoc.attributes['records'].value)
fieldList = []
for fieldDoc in tableDoc.getElementsByTagName('field'):
fieldList.append(self.getField(fieldDoc))
return {'name': name, 'lenRecords': lenRecords, 'fields': fieldList}
def getField(self, fieldDoc):
if fieldDoc.attributes['type'].value == 'auto_increment':
return self.getAutoIncrementField(fieldDoc)
elif fieldDoc.attributes['type'].value in ('integer', 'float'):
return self.getNumericField(fieldDoc)
elif fieldDoc.attributes['type'].value == 'varchar':
return self.getVarCharField(fieldDoc)
elif fieldDoc.attributes['type'].value == 'bool':
return self.getBoolField(fieldDoc)
elif fieldDoc.attributes['type'].value == 'fixed':
return self.getFixedField(fieldDoc)
elif fieldDoc.attributes['type'].value == 'date':
return self.getDateField(fieldDoc)
elif fieldDoc.attributes['type'].value == 'select':
return self.getSelectField(fieldDoc)
raise Exception('Invalid field name: ' + fieldDoc.attributes['type'].value)
def getAutoIncrementField(self, fieldDoc):
name = fieldDoc.attributes['name'].value
mininumValue = int(fieldDoc.attributes['min_val'].value)
return {'name': name, 'mininumValue': mininumValue, 'type': 'auto_increment'}
def getNumericField(self, fieldDoc):
name = fieldDoc.attributes['name'].value
type = fieldDoc.attributes['type'].value
mininumValue = int(fieldDoc.attributes['min_val'].value)
maximunValue = int(fieldDoc.attributes['max_val'].value)
return {'name': name, 'type': type, 'mininumValue': mininumValue, 'maximunValue': maximunValue}
def getVarCharField(self, fieldDoc):
name = fieldDoc.attributes['name'].value
mininumLen = int(fieldDoc.attributes['min_len'].value)
maximunLen = int(fieldDoc.attributes['max_len'].value)
return {'name': name, 'mininumLen': mininumLen, 'maximunLen': maximunLen, 'type': 'varchar'}
def getBoolField(self, fieldDoc):
name = fieldDoc.attributes['name'].value
return {'name': name, 'type': 'bool'}
def getFixedField(self, fieldDoc):
name = fieldDoc.attributes['name'].value
typeValue = fieldDoc.attributes['type_value'].value
value = fieldDoc.attributes['value'].value
return {'name': name, 'typeValue': typeValue, 'value': value, 'type': 'fixed'}
def getDateField(self, fieldDoc):
name = fieldDoc.attributes['name'].value
format = fieldDoc.attributes['format'].value
mininumValue = fieldDoc.attributes['min_val'].value
maximunValue = fieldDoc.attributes['max_val'].value
return {'name': name, 'type': 'date', 'mininumValue': mininumValue, 'maximunValue': maximunValue, 'format': format}
def getSelectField(self, fieldDoc):
name = fieldDoc.attributes['name'].value
typeValue = fieldDoc.attributes['type_value'].value
options = map(lambda x: x.attributes['value'].value, fieldDoc.getElementsByTagName('option'))
return {'name': name, 'typeValue': typeValue, 'options': options, 'type': 'select'}
class Generater:
def __init__(self, tableList, outputPath):
self.tableList = tableList
self.outputPath = outputPath
def execute(self):
self.generaterFieldValue = GeneraterFieldValue()
for table in self.tableList:
self.generateFile(table)
def generateFile(self, table):
generaterCSV = GeneraterCSV(table, self.outputPath)
for recordIndex in xrange(table['lenRecords']):
generaterCSV.append(self.generateRegister(table, recordIndex))
def generateRegister(self, table, recordIndex):
register = []
for field in table['fields']:
register.append(self.generaterFieldValue.getValue(field, recordIndex))
return register
class GeneraterFieldValue:
def getValue(self, field, recordIndex):
if field['type'] == 'auto_increment':
return self.getAutoIncrementValue(field, recordIndex)
elif field['type'] == 'integer':
return self.getIntegerValue(field)
elif field['type'] == 'float':
return self.getFloatValue(field)
elif field['type'] == 'varchar':
return self.getVarcharValue(field)
elif field['type'] == 'bool':
return self.getBoolValue()
elif field['type'] == 'fixed':
return self.getFixedValue(field)
elif field['type'] == 'date':
return self.getDateValue(field)
elif field['type'] == 'select':
return self.getSelectValue(field)
raise Exception('Invalid field name: ' + field['type'])
def getAutoIncrementValue(self, field, recordIndex):
return recordIndex + field['mininumValue']
def getIntegerValue(self, field):
return random.randint(field['mininumValue'], field['maximunValue'])
def getFloatValue(self, field):
return random.uniform(field['mininumValue'], field['maximunValue'])
def getVarcharValue(self, field):
length = self.getIntegerValue({'mininumValue': field['mininumLen'], 'maximunValue': field['maximunLen']})
listChar = [random.choice('ABCDEFGHIJKLMNOPQRSTUVXZabcdefghijklmnopqrstuvxz') for i in xrange(length)]
return ''.join(listChar)
def getBoolValue(self):
return random.choice([True, False])
def getFixedValue(self, field):
if(field['typeValue'] == 'varchar'):
return field['value']
return int(field['value'])
def getDateValue(self, field):
prop = random.random()
stime = time.mktime(time.strptime(field['mininumValue'], field['format']))
etime = time.mktime(time.strptime(field['maximunValue'], field['format']))
ptime = stime + prop * (etime - stime)
return time.strftime(field['format'], time.localtime(ptime))
def getSelectValue(self, field):
return random.choice(field['options'])
class GeneraterCSV:
def __init__(self, table, outputPath):
self.table = table
self.outputPath = outputPath
pathFile = os.path.join(outputPath, table['name'] + '.csv')
self.file = csv.writer(open(pathFile, 'w'))
listField = map(lambda x: x['name'], table['fields'])
self.file.writerow(listField)
def append(self, register):
self.file.writerow(register)
def main(inputfile, outputpath):
tableList = ReaderConfig(inputfile).execute()
Generater(tableList, outputpath).execute()
if __name__ == "__main__":
parser = argparse.ArgumentParser(usage="%(prog)s [options]")
parser.add_argument("-i", "--ifile", dest="inputfile", default="config.xml", help="input config file")
parser.add_argument("-o", "--opath", dest="outputpath", required=True, help="output path folder")
parser.add_argument('-v', "--version", action="version", version="%(prog)s {0}".format(VERSION))
options = parser.parse_args()
main(options.inputfile, options.outputpath)
|
|
# Copyright 2022 The DDSP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Library of decoder layers."""
from ddsp import core
from ddsp.training import nn
import gin
import tensorflow as tf
tfkl = tf.keras.layers
# ------------------ Decoders --------------------------------------------------
@gin.register
class RnnFcDecoder(nn.DictLayer):
"""RNN and FC stacks for f0 and loudness."""
def __init__(self,
rnn_channels=512,
rnn_type='gru',
ch=512,
layers_per_stack=3,
stateless=False,
input_keys=('ld_scaled', 'f0_scaled', 'z'),
output_splits=(('amps', 1), ('harmonic_distribution', 40)),
**kwargs):
"""Constructor.
Args:
rnn_channels: Dims for the RNN layer.
rnn_type: Either 'gru' or 'lstm'.
ch: Dims of the fully connected layers.
layers_per_stack: Fully connected layers per a stack.
stateless: Change api to explicitly pass in and out RNN state. Needed for
SavedModel/TFLite inference. Uses nn.StatelessRnn.
input_keys: Create a fully connected stack for each input.
output_splits: Splits the outputs into these dimensions.
**kwargs: Keras-specific kwargs.
Returns:
Dictionary with keys from output_splits. Also has 'state' key if
`stateless=True`, for manually handling state.
"""
# Manually handle state if stateless.
self.stateless = stateless
# Always put state as the last input and output.
self.output_splits = output_splits
output_keys = [v[0] for v in output_splits]
if self.stateless:
input_keys = list(input_keys) + ['state']
output_keys = list(output_keys) + ['state']
super().__init__(input_keys=input_keys, output_keys=output_keys, **kwargs)
# Don't create a stack for manual RNN state.
stack = lambda: nn.FcStack(ch, layers_per_stack)
n_stacks = len(self.input_keys)
if self.stateless:
n_stacks -= 1
rnn_cls = nn.StatelessRnn if stateless else nn.Rnn
# Layers.
self.input_stacks = [stack() for _ in range(n_stacks)]
self.rnn = rnn_cls(rnn_channels, rnn_type)
self.out_stack = stack()
# Copied from OutputSplitsLayer to handle stateless logic.
n_out = sum([v[1] for v in output_splits])
self.dense_out = tfkl.Dense(n_out)
def call(self, *inputs, **unused_kwargs):
# Last input is always carried state for stateless RNN.
inputs = list(inputs)
if self.stateless:
state = inputs.pop()
# Initial processing.
inputs = [stack(x) for stack, x in zip(self.input_stacks, inputs)]
# Run an RNN over the latents.
x = tf.concat(inputs, axis=-1)
if self.stateless:
x, new_state = self.rnn(x, state)
else:
x = self.rnn(x)
x = tf.concat(inputs + [x], axis=-1)
# Final processing.
x = self.out_stack(x)
x = self.dense_out(x)
output_dict = nn.split_to_dict(x, self.output_splits)
if self.stateless:
output_dict['state'] = new_state
return output_dict
@gin.register
class MidiDecoder(nn.DictLayer):
"""Decodes MIDI notes (& velocities) to f0 (& loudness)."""
def __init__(self,
net=None,
f0_residual=True,
center_loudness=True,
norm=True,
**kwargs):
"""Constructor."""
super().__init__(**kwargs)
self.net = net
self.f0_residual = f0_residual
self.center_loudness = center_loudness
self.dense_out = tfkl.Dense(2)
self.norm = nn.Normalize('layer') if norm else None
def call(self, z_pitch, z_vel=None, z=None) -> ['f0_midi', 'loudness']:
"""Forward pass for the MIDI decoder.
Args:
z_pitch: Tensor containing encoded pitch in MIDI scale. [batch, time, 1].
z_vel: Tensor containing encoded velocity in MIDI scale. [batch, time, 1].
z: Additional non-MIDI latent tensor. [batch, time, n_z]
Returns:
f0_midi, loudness: Reconstructed f0 and loudness.
"""
# pylint: disable=unused-argument
# x = tf.concat([z_pitch, z_vel], axis=-1) # TODO(jesse): Allow velocity.
x = z_pitch
x = self.net(x) if z is None else self.net([x, z])
if self.norm is not None:
x = self.norm(x)
x = self.dense_out(x)
f0_midi = x[..., 0:1]
loudness = x[..., 1:2]
if self.f0_residual:
f0_midi += z_pitch
if self.center_loudness:
loudness = loudness * 30.0 - 70.0
return f0_midi, loudness
@gin.register
class MidiToHarmonicDecoder(nn.DictLayer):
"""Decodes MIDI notes (& velocities) to f0, amps, hd, noise."""
def __init__(self,
net=None,
f0_residual=True,
norm=True,
output_splits=(('f0_midi', 1),
('amplitudes', 1),
('harmonic_distribution', 60),
('magnitudes', 65)),
midi_zero_silence=True,
**kwargs):
"""Constructor."""
self.output_splits = output_splits
self.n_out = sum([v[1] for v in output_splits])
output_keys = [v[0] for v in output_splits] + ['f0_hz']
super().__init__(output_keys=output_keys, **kwargs)
# Layers.
self.net = net
self.f0_residual = f0_residual
self.dense_out = tfkl.Dense(self.n_out)
self.norm = nn.Normalize('layer') if norm else None
self.midi_zero_silence = midi_zero_silence
def call(self, z_pitch, z_vel=None, z=None):
"""Forward pass for the MIDI decoder.
Args:
z_pitch: Tensor containing encoded pitch in MIDI scale. [batch, time, 1].
z_vel: Tensor containing encoded velocity in MIDI scale. [batch, time, 1].
z: Additional non-MIDI latent tensor. [batch, time, n_z]
Returns:
A dictionary to feed into a processor group.
"""
# pylint: disable=unused-argument
# x = tf.concat([z_pitch, z_vel], axis=-1) # TODO(jesse): Allow velocity.
x = z_pitch
x = self.net(x) if z is None else self.net([x, z])
if self.norm is not None:
x = self.norm(x)
x = self.dense_out(x)
outputs = nn.split_to_dict(x, self.output_splits)
if self.f0_residual:
outputs['f0_midi'] += z_pitch
outputs['f0_hz'] = core.midi_to_hz(outputs['f0_midi'],
midi_zero_silence=self.midi_zero_silence)
return outputs
@gin.register
class DilatedConvDecoder(nn.OutputSplitsLayer):
"""WaveNet style 1-D dilated convolution with optional conditioning."""
def __init__(self,
ch=256,
kernel_size=3,
layers_per_stack=5,
stacks=2,
dilation=2,
norm_type='layer',
resample_stride=1,
stacks_per_resample=1,
resample_after_convolve=True,
input_keys=('ld_scaled', 'f0_scaled'),
output_splits=(('amps', 1), ('harmonic_distribution', 60)),
conditioning_keys=('z'),
precondition_stack=None,
spectral_norm=False,
ortho_init=False,
**kwargs):
"""Constructor, combines input_keys and conditioning_keys."""
self.conditioning_keys = ([] if conditioning_keys is None else
list(conditioning_keys))
input_keys = list(input_keys) + self.conditioning_keys
super().__init__(input_keys, output_splits, **kwargs)
# Conditioning.
self.n_conditioning = len(self.conditioning_keys)
self.conditional = bool(self.conditioning_keys)
if not self.conditional and precondition_stack is not None:
raise ValueError('You must specify conditioning keys if you specify'
'a precondition stack.')
# Layers.
self.precondition_stack = precondition_stack
self.dilated_conv_stack = nn.DilatedConvStack(
ch=ch,
kernel_size=kernel_size,
layers_per_stack=layers_per_stack,
stacks=stacks,
dilation=dilation,
norm_type=norm_type,
resample_type='upsample' if resample_stride > 1 else None,
resample_stride=resample_stride,
stacks_per_resample=stacks_per_resample,
resample_after_convolve=resample_after_convolve,
conditional=self.conditional,
spectral_norm=spectral_norm,
ortho_init=ortho_init)
def _parse_inputs(self, inputs):
"""Split x and z inputs and run preconditioning."""
if self.conditional:
x = tf.concat(inputs[:-self.n_conditioning], axis=-1)
z = tf.concat(inputs[-self.n_conditioning:], axis=-1)
if self.precondition_stack is not None:
z = self.precondition_stack(z)
return [x, z]
else:
return tf.concat(inputs, axis=-1)
def compute_output(self, *inputs):
stack_inputs = self._parse_inputs(inputs)
return self.dilated_conv_stack(stack_inputs)
|
|
# coding=utf-8
__author__ = 'bjorne'
"""An module for analysis on data from Evalans SensiStep.
Calculates the error between the peakvalues in an exerciseresult and the targetvalue.
It produces a result for the chosen exerciseresult and another result with mean errors of several other exerciseresult. """
from AnalysisPackage import analysismodule, moduleUtilities
import time
import numpy as np
import sys
import json
from flask import jsonify
import logging
class peakerror_analysis(analysismodule.AnalysisModule):
def analyse(self, username, configParams, dbHandler, logger):
"""
Calculates the error between the peak loadings and the target loading for the chosen session.
The module then calculates the mean error and compare it to the mean error of all other available sessions.
Args:
username (String): The username
configParams (String): JSON string on the form:
{
"max_number_of_old_results": [5],
"exerciseResultID": ["something"],
"rehabilitationSetID": ["somethingelse"]}
}
dbHandler (DatabaseHandler): The databasehandler.
logger (logger): The logging class.
Returns:
{
"status": "Ready",
"results":
[
{
"name": "currentErrors",
"type": "plot",
"data": [
3.1,
-8.2,
0.6
],
"priority": 1,
"legend": "currentErrors",
"subtype": "lines",
"plotID": "1"
},
{
"name": "currentMeanError",
"type": "plot",
"data": 5.6,
"priority": 1,
"legend": "currentMeanError",
"subtype": "lines",
"plotID": "1"
},
{
"name": "oldErrors",
"type": "plot",
"data": [
3.6,
-9.2,
4.4
],
"priority": 1,
"legend": "oldErrors",
"subtype": "lines",
"plotID": "1"
},
{
"name": "oldMeanError",
"type": "plot",
"data": 2.6,
"priority": 1,
"legend": "oldMeanError",
"subtype": "lines",
"plotID": "1"
}
]
}
"""
logger.info("Starting analysis...")
#Gather data
currentExerciseResultID = configParams["exerciseResultID"][0]
rehabilitationSetID = configParams["rehabilitationSetID"][0]
oldDatasets = []
logger.info("username: " + username)
logger.info("currentExerciseResultID: " + currentExerciseResultID)
logger.info("rehabilitationSetID: " + rehabilitationSetID)
newExerciseResult = moduleUtilities.gather_exerciseresult(username, dbHandler, logger, currentExerciseResultID) #Maybe need to load?
logger.debug("newExerciseResult: " + str(newExerciseResult))
currentErrors = self._get_peak_errors_from_exercise_result(newExerciseResult, logger)
currentMeanError = np.mean(currentErrors)
rehabilitationSet = moduleUtilities.gather_rehabilitationset(username, dbHandler, logger, rehabilitationSetID)
logger.debug("RehabilitationSet: " + str(rehabilitationSet))
exerciseResultIDs = rehabilitationSet.get("exerciseResultIDs")
exerciseResultIDs = exerciseResultIDs.split(";")
logger.debug("exerciseResultIDs: " + str(exerciseResultIDs))
numberOfOldResults = 0
try:
maxNumberOfOldResults = int(configParams[moduleUtilities.MAX_NUMBER_OF_OLD_RESULTS["name"]][0])
except Exception as e:
logger.error(str(e))
maxNumberOfOldResults = int(moduleUtilities.MAX_NUMBER_OF_OLD_RESULTS["default"][0])
logger.info("maxNumberOfOldResults: " + str(maxNumberOfOldResults))
oldExerciseResults = []
for exerciseResultID in exerciseResultIDs:
logger.debug("exerciseResultID: " + str(exerciseResultID))
if (exerciseResultID == currentExerciseResultID): #Dont use same exerciseResult again
continue
elif (numberOfOldResults >= maxNumberOfOldResults):
break
else:
oldExerciseResult = moduleUtilities.gather_exerciseresult(username, dbHandler, logger, exerciseResultID)
if (json.loads(oldExerciseResult.get("values")).get("peaks")) is None:
continue
numberOfOldResults = numberOfOldResults + 1
logger.debug("numberOfOldResults = " + str(numberOfOldResults))
oldExerciseResults.append(oldExerciseResult)
oldErrorMatrix = []
for result in oldExerciseResults:
oldErrorMatrix.append(self._get_peak_errors_from_exercise_result(result, logger))
#transpose the oldErrorMatrix and calculate mean error for each peak
oldErrorsTransposed = zip(*oldErrorMatrix)
oldErrors = map(np.mean, oldErrorsTransposed)
oldMeanError = np.mean(oldErrors)
result = self._construct_result_object(currentErrors, currentMeanError, oldErrors, oldMeanError)
return result
def _construct_result_object(self, currentErrors, currentMeanError, oldErrors, oldMeanError):
currentErrors = map(lambda x,y: [x,y], list(xrange(len(currentErrors))),currentErrors)
currentMeanError = [[0, currentMeanError], [len(currentErrors)-1, currentMeanError]]
oldErrors = map(lambda x,y: [x,y], list(xrange(len(oldErrors))),oldErrors)
oldMeanError = [[0, oldMeanError], [len(oldErrors)-1, oldMeanError]]
plotData = []
plotData.append(moduleUtilities.construct_single_plotdata(data=currentErrors, legend="currentErrors", subtype="lines"))
plotData.append(moduleUtilities.construct_single_plotdata(data=currentMeanError, legend="currentMeanError", subtype="lines"))
plotData.append(moduleUtilities.construct_single_plotdata(data=oldErrors, legend="oldErrors", subtype="lines"))
plotData.append(moduleUtilities.construct_single_plotdata(data=oldMeanError, legend="oldMeanError", subtype="lines"))
plotResult = [moduleUtilities.construct_plotresult(title="Peak errors", plotDatas=plotData, priority=1, x_label="Time", y_label="Weight (kg)")]
return moduleUtilities.construct_results_object(plotResult)
def _get_peaks_from_exercise_result(self, exerciseResult, logger):
"""
Extracts the peaks from an exerciseResult.
Args:
exerciseResult (String): A JSON-formatted string with an exerciseResult.
logger (logger): The logging class.
Returns:
peaks ([float]): An array with all the peaks from the exerciseResult.
"""
values = json.loads(exerciseResult.get("values"))
peakData = values.get("peaks")
logger.debug("peakData: " + str(peakData))
peaks = []
for peak in peakData:
peaks.append(float(peak[1])) #removes timestamp
logger.debug("peaks: " + str(peaks))
return peaks
def _get_peak_errors_from_exercise_result(self, exerciseResult, logger):
"""
Extracts the peakerrors from an exerciseResult.
Args:
exerciseResult (String): A JSON-formatted string with an exerciseResult.
logger (logger): The logging class.
Returns:
error ([float]): An array with all the errors from the peaks in the exerciseResult.
"""
logger.debug("in _get_peak_errors_from_exercise_result...")
settings = json.loads(exerciseResult.get("settings"))
targetValue = float(settings["target"])
peaks = self._get_peaks_from_exercise_result(exerciseResult, logger)
errors = []
for peak in peaks:
errors.append(peak-targetValue)
logger.debug("Errors: " + str(errors))
return errors
def necessary_config_params(self):
return [moduleUtilities.MAX_NUMBER_OF_OLD_RESULTS, moduleUtilities.EXERCISE_RESULT_ID, moduleUtilities.REHABILITATIONSET_ID]
def description(self):
description = "An module for analysis on data from Evalans SensiStep. Calculates the error between the peakvalues in an exerciseresult and the targetvalue. It produces a result for the chosen exerciseresult and another result with mean errors of several other exerciseresult. "
return description
def permission_level(self):
return 6
def __init__(self):
return
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The flavor access extension."""
import webob
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova.compute import flavors
from nova import exception
from nova.openstack.common.gettextutils import _
ALIAS = 'os-flavor-access'
authorize = extensions.soft_extension_authorizer('compute', 'v3:' + ALIAS)
def make_flavor(elem):
elem.set('{%s}is_public' % FlavorAccess.namespace,
'%s:is_public' % FlavorAccess.alias)
def make_flavor_access(elem):
elem.set('flavor_id')
elem.set('tenant_id')
class FlavorTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('flavor', selector='flavor')
make_flavor(root)
alias = FlavorAccess.alias
namespace = FlavorAccess.namespace
return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace})
class FlavorsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('flavors')
elem = xmlutil.SubTemplateElement(root, 'flavor', selector='flavors')
make_flavor(elem)
alias = FlavorAccess.alias
namespace = FlavorAccess.namespace
return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace})
class FlavorAccessTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('flavor_access')
elem = xmlutil.SubTemplateElement(root, 'access',
selector='flavor_access')
make_flavor_access(elem)
return xmlutil.MasterTemplate(root, 1)
def _marshall_flavor_access(flavor_id):
rval = []
try:
access_list = flavors.get_flavor_access_by_flavor_id(flavor_id)
except exception.FlavorNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
for access in access_list:
rval.append({'flavor_id': flavor_id,
'tenant_id': access['project_id']})
return {'flavor_access': rval}
class FlavorAccessController(object):
"""The flavor access API controller for the OpenStack API."""
def __init__(self):
super(FlavorAccessController, self).__init__()
@extensions.expected_errors(404)
@wsgi.serializers(xml=FlavorAccessTemplate)
def index(self, req, flavor_id):
context = req.environ['nova.context']
authorize(context)
try:
flavor = flavors.get_flavor_by_flavor_id(flavor_id, ctxt=context)
except exception.FlavorNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
# public flavor to all projects
if flavor['is_public']:
explanation = _("Access list not available for public flavors.")
raise webob.exc.HTTPNotFound(explanation=explanation)
# private flavor to listed projects only
return _marshall_flavor_access(flavor_id)
class FlavorActionController(wsgi.Controller):
"""The flavor access API controller for the OpenStack API."""
def _get_flavor_refs(self, context):
"""Return a dictionary mapping flavorid to flavor_ref."""
flavor_refs = flavors.get_all_flavors(context)
rval = {}
for name, obj in flavor_refs.iteritems():
rval[obj['flavorid']] = obj
return rval
def _extend_flavor(self, flavor_rval, flavor_ref):
key = "%s:is_public" % (FlavorAccess.alias)
flavor_rval[key] = flavor_ref['is_public']
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if authorize(context):
# Attach our slave template to the response object
resp_obj.attach(xml=FlavorTemplate())
db_flavor = req.get_db_flavor(id)
self._extend_flavor(resp_obj.obj['flavor'], db_flavor)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
# Attach our slave template to the response object
resp_obj.attach(xml=FlavorsTemplate())
flavors = list(resp_obj.obj['flavors'])
for flavor_rval in flavors:
db_flavor = req.get_db_flavor(flavor_rval['id'])
self._extend_flavor(flavor_rval, db_flavor)
@wsgi.extends(action='create')
def create(self, req, body, resp_obj):
context = req.environ['nova.context']
if authorize(context):
# Attach our slave template to the response object
resp_obj.attach(xml=FlavorTemplate())
db_flavor = req.get_db_flavor(resp_obj.obj['flavor']['id'])
self._extend_flavor(resp_obj.obj['flavor'], db_flavor)
@extensions.expected_errors((400, 404, 409))
@wsgi.serializers(xml=FlavorAccessTemplate)
@wsgi.action("add_tenant_access")
def _add_tenant_access(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
if not self.is_valid_body(body, 'add_tenant_access'):
raise webob.exc.HTTPBadRequest(explanation=_("Invalid request"))
vals = body['add_tenant_access']
try:
tenant = vals['tenant_id']
except KeyError:
raise webob.exc.HTTPBadRequest(
explanation=_("tenant_id is required"))
try:
flavors.add_flavor_access(id, tenant, context)
except exception.FlavorAccessExists as err:
raise webob.exc.HTTPConflict(explanation=err.format_message())
except exception.FlavorNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
return _marshall_flavor_access(id)
@extensions.expected_errors((400, 404))
@wsgi.serializers(xml=FlavorAccessTemplate)
@wsgi.action("remove_tenant_access")
def _remove_tenant_access(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
if not self.is_valid_body(body, 'remove_tenant_access'):
raise webob.exc.HTTPBadRequest(explanation=_("Invalid request"))
vals = body['remove_tenant_access']
try:
tenant = vals['tenant_id']
except KeyError:
raise webob.exc.HTTPBadRequest(
explanation=_("tenant_id is required"))
try:
flavors.remove_flavor_access(id, tenant, context)
except (exception.FlavorAccessNotFound,
exception.FlavorNotFound) as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
return _marshall_flavor_access(id)
class FlavorAccess(extensions.V3APIExtensionBase):
"""Flavor access support."""
name = "FlavorAccess"
alias = ALIAS
namespace = "http://docs.openstack.org/compute/ext/%s/api/v3" % ALIAS
version = 1
def get_resources(self):
res = extensions.ResourceExtension(
ALIAS,
controller=FlavorAccessController(),
parent=dict(member_name='flavor', collection_name='flavors'))
return [res]
def get_controller_extensions(self):
extension = extensions.ControllerExtension(
self, 'flavors', FlavorActionController())
return [extension]
|
|
"""
Evaluation of Python code in |jedi| is based on three assumptions:
* The code uses as least side effects as possible. Jedi understands certain
list/tuple/set modifications, but there's no guarantee that Jedi detects
everything (list.append in different modules for example).
* No magic is being used:
- metaclasses
- ``setattr()`` / ``__import__()``
- writing to ``globals()``, ``locals()``, ``object.__dict__``
* The programmer is not a total dick, e.g. like `this
<https://github.com/davidhalter/jedi/issues/24>`_ :-)
The actual algorithm is based on a principle called lazy evaluation. That
said, the typical entry point for static analysis is calling
``eval_expr_stmt``. There's separate logic for autocompletion in the API, the
evaluator is all about evaluating an expression.
TODO this paragraph is not what jedi does anymore.
Now you need to understand what follows after ``eval_expr_stmt``. Let's
make an example::
import datetime
datetime.date.toda# <-- cursor here
First of all, this module doesn't care about completion. It really just cares
about ``datetime.date``. At the end of the procedure ``eval_expr_stmt`` will
return the ``date`` class.
To *visualize* this (simplified):
- ``Evaluator.eval_expr_stmt`` doesn't do much, because there's no assignment.
- ``Context.eval_node`` cares for resolving the dotted path
- ``Evaluator.find_types`` searches for global definitions of datetime, which
it finds in the definition of an import, by scanning the syntax tree.
- Using the import logic, the datetime module is found.
- Now ``find_types`` is called again by ``eval_node`` to find ``date``
inside the datetime module.
Now what would happen if we wanted ``datetime.date.foo.bar``? Two more
calls to ``find_types``. However the second call would be ignored, because the
first one would return nothing (there's no foo attribute in ``date``).
What if the import would contain another ``ExprStmt`` like this::
from foo import bar
Date = bar.baz
Well... You get it. Just another ``eval_expr_stmt`` recursion. It's really
easy. Python can obviously get way more complicated then this. To understand
tuple assignments, list comprehensions and everything else, a lot more code had
to be written.
Jedi has been tested very well, so you can just start modifying code. It's best
to write your own test first for your "new" feature. Don't be scared of
breaking stuff. As long as the tests pass, you're most likely to be fine.
I need to mention now that lazy evaluation is really good because it
only *evaluates* what needs to be *evaluated*. All the statements and modules
that are not used are just being ignored.
"""
import sys
from parso.python import tree
import parso
from jedi import debug
from jedi import parser_utils
from jedi.evaluate.utils import unite
from jedi.evaluate import imports
from jedi.evaluate import recursion
from jedi.evaluate.cache import evaluator_function_cache
from jedi.evaluate import compiled
from jedi.evaluate import helpers
from jedi.evaluate.filters import TreeNameDefinition, ParamName
from jedi.evaluate.base_context import ContextualizedName, ContextualizedNode, \
ContextSet, NO_CONTEXTS, iterate_contexts
from jedi.evaluate.context import ClassContext, FunctionContext, \
AnonymousInstance, BoundMethod
from jedi.evaluate.context.iterable import CompForContext
from jedi.evaluate.syntax_tree import eval_trailer, eval_expr_stmt, \
eval_node, check_tuple_assignments
class Evaluator(object):
def __init__(self, grammar, project):
self.grammar = grammar
self.latest_grammar = parso.load_grammar(version='3.6')
self.memoize_cache = {} # for memoize decorators
# To memorize modules -> equals `sys.modules`.
self.modules = {} # like `sys.modules`.
self.compiled_cache = {} # see `evaluate.compiled.create()`
self.inferred_element_counts = {}
self.mixed_cache = {} # see `evaluate.compiled.mixed._create()`
self.analysis = []
self.dynamic_params_depth = 0
self.is_analysis = False
self.python_version = sys.version_info[:2]
self.project = project
project.add_evaluator(self)
self.reset_recursion_limitations()
# Constants
self.BUILTINS = compiled.get_special_object(self, 'BUILTINS')
def reset_recursion_limitations(self):
self.recursion_detector = recursion.RecursionDetector()
self.execution_recursion_detector = recursion.ExecutionRecursionDetector(self)
def eval_element(self, context, element):
if isinstance(context, CompForContext):
return eval_node(context, element)
if_stmt = element
while if_stmt is not None:
if_stmt = if_stmt.parent
if if_stmt.type in ('if_stmt', 'for_stmt'):
break
if parser_utils.is_scope(if_stmt):
if_stmt = None
break
predefined_if_name_dict = context.predefined_names.get(if_stmt)
if predefined_if_name_dict is None and if_stmt and if_stmt.type == 'if_stmt':
if_stmt_test = if_stmt.children[1]
name_dicts = [{}]
# If we already did a check, we don't want to do it again -> If
# context.predefined_names is filled, we stop.
# We don't want to check the if stmt itself, it's just about
# the content.
if element.start_pos > if_stmt_test.end_pos:
# Now we need to check if the names in the if_stmt match the
# names in the suite.
if_names = helpers.get_names_of_node(if_stmt_test)
element_names = helpers.get_names_of_node(element)
str_element_names = [e.value for e in element_names]
if any(i.value in str_element_names for i in if_names):
for if_name in if_names:
definitions = self.goto_definitions(context, if_name)
# Every name that has multiple different definitions
# causes the complexity to rise. The complexity should
# never fall below 1.
if len(definitions) > 1:
if len(name_dicts) * len(definitions) > 16:
debug.dbg('Too many options for if branch evaluation %s.', if_stmt)
# There's only a certain amount of branches
# Jedi can evaluate, otherwise it will take to
# long.
name_dicts = [{}]
break
original_name_dicts = list(name_dicts)
name_dicts = []
for definition in definitions:
new_name_dicts = list(original_name_dicts)
for i, name_dict in enumerate(new_name_dicts):
new_name_dicts[i] = name_dict.copy()
new_name_dicts[i][if_name.value] = ContextSet(definition)
name_dicts += new_name_dicts
else:
for name_dict in name_dicts:
name_dict[if_name.value] = definitions
if len(name_dicts) > 1:
result = ContextSet()
for name_dict in name_dicts:
with helpers.predefine_names(context, if_stmt, name_dict):
result |= eval_node(context, element)
return result
else:
return self._eval_element_if_evaluated(context, element)
else:
if predefined_if_name_dict:
return eval_node(context, element)
else:
return self._eval_element_if_evaluated(context, element)
def _eval_element_if_evaluated(self, context, element):
"""
TODO This function is temporary: Merge with eval_element.
"""
parent = element
while parent is not None:
parent = parent.parent
predefined_if_name_dict = context.predefined_names.get(parent)
if predefined_if_name_dict is not None:
return eval_node(context, element)
return self._eval_element_cached(context, element)
@evaluator_function_cache(default=NO_CONTEXTS)
def _eval_element_cached(self, context, element):
return eval_node(context, element)
def goto_definitions(self, context, name):
def_ = name.get_definition(import_name_always=True)
if def_ is not None:
type_ = def_.type
if type_ == 'classdef':
return [ClassContext(self, context, name.parent)]
elif type_ == 'funcdef':
return [FunctionContext(self, context, name.parent)]
if type_ == 'expr_stmt':
is_simple_name = name.parent.type not in ('power', 'trailer')
if is_simple_name:
return eval_expr_stmt(context, def_, name)
if type_ == 'for_stmt':
container_types = context.eval_node(def_.children[3])
cn = ContextualizedNode(context, def_.children[3])
for_types = iterate_contexts(container_types, cn)
c_node = ContextualizedName(context, name)
return check_tuple_assignments(self, c_node, for_types)
if type_ in ('import_from', 'import_name'):
return imports.infer_import(context, name)
return helpers.evaluate_call_of_leaf(context, name)
def goto(self, context, name):
definition = name.get_definition(import_name_always=True)
if definition is not None:
type_ = definition.type
if type_ == 'expr_stmt':
# Only take the parent, because if it's more complicated than just
# a name it's something you can "goto" again.
is_simple_name = name.parent.type not in ('power', 'trailer')
if is_simple_name:
return [TreeNameDefinition(context, name)]
elif type_ == 'param':
return [ParamName(context, name)]
elif type_ in ('funcdef', 'classdef'):
return [TreeNameDefinition(context, name)]
elif type_ in ('import_from', 'import_name'):
module_names = imports.infer_import(context, name, is_goto=True)
return module_names
par = name.parent
node_type = par.type
if node_type == 'argument' and par.children[1] == '=' and par.children[0] == name:
# Named param goto.
trailer = par.parent
if trailer.type == 'arglist':
trailer = trailer.parent
if trailer.type != 'classdef':
if trailer.type == 'decorator':
context_set = context.eval_node(trailer.children[1])
else:
i = trailer.parent.children.index(trailer)
to_evaluate = trailer.parent.children[:i]
if to_evaluate[0] == 'await':
to_evaluate.pop(0)
context_set = context.eval_node(to_evaluate[0])
for trailer in to_evaluate[1:]:
context_set = eval_trailer(context, context_set, trailer)
param_names = []
for context in context_set:
try:
get_param_names = context.get_param_names
except AttributeError:
pass
else:
for param_name in get_param_names():
if param_name.string_name == name.value:
param_names.append(param_name)
return param_names
elif node_type == 'dotted_name': # Is a decorator.
index = par.children.index(name)
if index > 0:
new_dotted = helpers.deep_ast_copy(par)
new_dotted.children[index - 1:] = []
values = context.eval_node(new_dotted)
return unite(
value.py__getattribute__(name, name_context=context, is_goto=True)
for value in values
)
if node_type == 'trailer' and par.children[0] == '.':
values = helpers.evaluate_call_of_leaf(context, name, cut_own_trailer=True)
return unite(
value.py__getattribute__(name, name_context=context, is_goto=True)
for value in values
)
else:
stmt = tree.search_ancestor(
name, 'expr_stmt', 'lambdef'
) or name
if stmt.type == 'lambdef':
stmt = name
return context.py__getattribute__(
name,
position=stmt.start_pos,
search_global=True, is_goto=True
)
def create_context(self, base_context, node, node_is_context=False, node_is_object=False):
def parent_scope(node):
while True:
node = node.parent
if parser_utils.is_scope(node):
return node
elif node.type in ('argument', 'testlist_comp'):
if node.children[1].type == 'comp_for':
return node.children[1]
elif node.type == 'dictorsetmaker':
for n in node.children[1:4]:
# In dictionaries it can be pretty much anything.
if n.type == 'comp_for':
return n
def from_scope_node(scope_node, child_is_funcdef=None, is_nested=True, node_is_object=False):
if scope_node == base_node:
return base_context
is_funcdef = scope_node.type in ('funcdef', 'lambdef')
parent_scope = parser_utils.get_parent_scope(scope_node)
parent_context = from_scope_node(parent_scope, child_is_funcdef=is_funcdef)
if is_funcdef:
if isinstance(parent_context, AnonymousInstance):
func = BoundMethod(
self, parent_context, parent_context.class_context,
parent_context.parent_context, scope_node
)
else:
func = FunctionContext(
self,
parent_context,
scope_node
)
if is_nested and not node_is_object:
return func.get_function_execution()
return func
elif scope_node.type == 'classdef':
class_context = ClassContext(self, parent_context, scope_node)
if child_is_funcdef:
# anonymous instance
return AnonymousInstance(self, parent_context, class_context)
else:
return class_context
elif scope_node.type == 'comp_for':
if node.start_pos >= scope_node.children[-1].start_pos:
return parent_context
return CompForContext.from_comp_for(parent_context, scope_node)
raise Exception("There's a scope that was not managed.")
base_node = base_context.tree_node
if node_is_context and parser_utils.is_scope(node):
scope_node = node
else:
if node.parent.type in ('funcdef', 'classdef') and node.parent.name == node:
# When we're on class/function names/leafs that define the
# object itself and not its contents.
node = node.parent
scope_node = parent_scope(node)
return from_scope_node(scope_node, is_nested=True, node_is_object=node_is_object)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Flango: A simple WSGI based webframework for learning.
Flango is a WSGI based webframework in pure Python, without any third-party dependency.
flango include a simple router, which provide the request routing, a template engine
for template rendering, a simple wrapper for WSGI request and response, and
a ORM framework for sqlite3.
"""
import json
import os
import time
import sys
import traceback
import threading
import mimetypes
from urllib import quote
from .server import ServerAdapter, WSGIRefServer
from .template import Loader
from .wrappers import Request, Response
from .router import Router, RouterException
class _Stack(threading.local):
def __init__(self):
super(_Stack, self).__init__()
self._stack = []
def push(self, app):
self._stack.append(app)
def pop(self):
try:
self._stack.pop()
except IndexError:
return None
def top(self):
try:
return self._stack[-1]
except IndexError:
return None
def __len__(self):
return len(self._stack)
def empty(self):
return len(self._stack) == 0
def __repr__(self):
return 'app_stack with {0} applications'.format(len(self))
class FlangoException(Exception):
def __init__(self, code, response, server_handler, DEBUG=False):
self._DEBUG = DEBUG
self._response = response
self._response.set_status(code)
self._server_handler = server_handler
def __call__(self):
if self._DEBUG:
body = '<br>'.join([self._response.status, traceback.format_exc().replace('\n', '<br>')])
else:
body = self._response.status
self._response.set_body(body)
self._server_handler(self._response.status, self._response.headerlist)
return [self._response.body]
class Flango(object):
"""Main object of this funny web frameWork."""
def __init__(self, pkg_name, template='template', static='static'):
# router
self._router = Router()
# request and response
self._request = Request()
self._response = Response(None)
# template
self.package_name = pkg_name
# where is the app root located?
self.root_path = self._get_package_path(self.package_name).replace('\\', '\\\\') # '\u' escape
self.loader = Loader(os.sep.join([self.root_path, template]))
# static file
self.static_folder = static
self.abspath = None
self.modified = None
self.static_url_cache = {}
# session
self._session = self._request.cookies
# server handler
self._server_handler = None
# debug
self.DEBUG = False
# config
self.config = {}
self.config.setdefault('DATABASE_NAME', 'flango.db')
# push to the _app_stack
global app_stack
app_stack.push(self)
def _get_package_path(self, name):
"""Returns the path to a package or cwd if that cannot be found."""
try:
return os.path.abspath(os.path.dirname(sys.modules[name].__file__))
except (KeyError, AttributeError):
return os.getcwd()
def route(self, path, methods=['GET']):
if path is None:
raise RouterException()
methods = [m.upper() for m in methods]
def wrapper(fn):
self._router.register(path, fn, methods)
return fn
return wrapper
@property
def session(self):
return self._session
def run(self, server=WSGIRefServer, host='localhost', port=8000, DEBUG=False):
self.DEBUG = DEBUG
if isinstance(server, type) and issubclass(server, ServerAdapter):
server = server(host=host, port=port)
else:
raise RuntimeError('Server must be a subclass of ServerAdapter.')
print('running on {0}:{1}'.format(host, port))
try:
server.run(self)
except KeyboardInterrupt:
pass
def jsonify(self, *args, **kwargs):
response = Response(body=json.dumps(dict(*args, **kwargs)), code=200)
response.set_content_type('application/json')
return response
def render(self, filename, **context):
app_namespace = sys.modules[self.package_name].__dict__
context.update(globals())
context.update(app_namespace)
return self.loader.load(filename).render(**context)
def not_found(self):
return Response(body='<h1>404 Not Found</h1>', code=404)
def not_modified(self):
response = Response('', code=304)
# Don't need Content-Type here.
# del response.headers['Content-Type']
return response
def redirect(self, location, code=302):
response = Response(body='<p>Redirecting...</p>', code=code)
response.headers['Location'] = location
return response
def url_for(self, fn, filename=None, **kwargs):
# URLs for static files are constructed according to
# current wsgi environ(HTTP_HOST, SERVER_NAME, etc.)
if fn == self.static_folder and filename:
if filename in self.static_url_cache.keys():
return self.static_url_cache[filename]
else:
url = self.construct_url(filename)
# Cache the URL
self.static_url_cache[filename] = url
return url
# Router function URLs are given by the router.
if kwargs:
return self._router.url_for(fn, **kwargs)
return self._router.url_for(fn)
def construct_url(self, filename):
environ = self._request.headers
url = environ['wsgi.url_scheme'] + '://'
if environ.get('HTTP_HOST'):
url += environ['HTTP_HOST']
else:
url += environ['SERVER_NAME']
if environ['wsgi.url_scheme'] == 'https':
if environ['SERVER_PORT'] != '443':
url += ':' + environ['SERVER_PORT']
else:
if environ['SERVER_PORT'] != '80':
url += ':' + environ['SERVER_PORT']
url += quote(environ.get('SCRIPT_NAME', ''))
if environ.get('QUERY_STRING'):
url += '?' + environ['QUERY_STRING']
url += '/' + '/'.join([self.static_folder, filename])
return url
@property
def request(self):
return self._request
@property
def response(self):
return self._response
def get_content_type(self):
fallback_content_type = 'text/plain'
mime_type = mimetypes.guess_type(self.abspath)[0]
if mime_type:
return mime_type
else:
return fallback_content_type
def get_modified_time(self):
stats = os.stat(self.abspath)
last_modified_time = time.gmtime(stats.st_mtime)
return last_modified_time
def should_return_304(self):
if_modified_since_str = self._request.if_modified_since
if if_modified_since_str:
if_modified_since_time = time.strptime(if_modified_since_str, '%a, %d %b %Y %H:%M:%S %Z')
if if_modified_since_time >= self.modified:
return True
return False
def is_static_file_request(self):
return self._request.path.lstrip('/').startswith(self.static_folder)
def handle_static(self, path):
response = Response(None)
# This is the absolute path of a static file on the filesystem
self.abspath = self.root_path + path
if not os.path.exists(self.abspath) or not os.path.isfile(self.abspath):
return self.not_found()
content_type = self.get_content_type()
response.set_content_type(content_type)
self.modified = self.get_modified_time()
if self.should_return_304():
return self.not_modified()
if 'Last-Modified' not in response.headers.keys():
last_modified_str = time.strftime(
'%a, %d %b %Y %H:%M:%S UTC', self.modified)
response.headers['Last-Modified'] = last_modified_str
with open(self.abspath, 'r') as f:
response.set_body(body=(f.read()))
return response
def handle_router(self):
try:
handler, args = self._router.get(self._request.path, self._request.method)
except RouterException:
# No handler is found, assume it's a 404.
return self.not_found()
return handler(**args) if args else handler()
def __call__(self, environ, start_response):
self._response = Response(None)
self._request = Request(None)
self._server_handler = start_response
self._request.bind(environ)
if self.is_static_file_request():
r = self.handle_static(self._request.path)
else:
try:
r = self.handle_router()
except Exception:
return FlangoException(500, self._response, self._server_handler, self.DEBUG)()
# Static files, 302, 304 and 404
if isinstance(r, Response):
self._response = r
self._server_handler(r.status, r.headerlist)
return [r.body]
# Normal html
self._response.set_body(body=r)
self._response.set_status(200)
start_response(self._response.status, self._response.headerlist)
return [self._response.body]
"""
default methods and properties
"""
# global app stack.
app_stack = _Stack()
# default app
default_app = app_stack.top()
if not default_app: # hack for shell
default_app = Flango('/')
# shell
request = app_stack.top().request
response = app_stack.top().response
session = app_stack.top().session
|
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
DEPS = [
'depot_tools/bot_update',
'depot_tools/gclient',
'ios',
'recipe_engine/path',
'recipe_engine/platform',
'recipe_engine/properties',
'webrtc'
]
def RunSteps(api):
api.gclient.set_config('webrtc_ios')
api.ios.host_info()
api.bot_update.ensure_checkout()
api.path['checkout'] = api.path['slave_build'].join('src')
build_config_dir = api.path['checkout'].join(
'webrtc',
'build',
'ios',
api.properties['mastername'],
)
include_dir = api.path['checkout'].join(
'webrtc',
'build',
'ios',
'tests',
)
buildername = api.properties['buildername'].replace(' ', '_')
api.ios.read_build_config(build_config_dir=build_config_dir,
include_dir=include_dir,
buildername=buildername)
mb_config_path = api.path['checkout'].join(
'webrtc',
'build',
'mb_config.pyl',
)
api.ios.build(mb_config_path=mb_config_path)
api.ios.test()
def GenTests(api):
yield (
api.test('basic')
+ api.platform('mac', 64)
+ api.properties(
buildername='ios debug',
buildnumber='0',
mastername='chromium.fake',
slavename='fake-vm',
)
+ api.ios.make_test_build_config({
'xcode version': 'fake xcode version',
'GYP_DEFINES': {
'fake gyp define 1': 'fake value 1',
'fake gyp define 2': 'fake value 2',
'use_goma': '1',
},
'compiler': 'ninja',
'configuration': 'Debug',
'sdk': 'iphonesimulator8.0',
'tests': [
{
'app': 'fake tests 1',
'device type': 'fake device',
'os': '8.0',
},
{
'app': 'fake tests 2',
'device type': 'fake device',
'os': '7.1',
},
],
})
)
yield (
api.test('gn_build')
+ api.platform('mac', 64)
+ api.properties(
buildername='ios',
buildnumber='0',
mastername='chromium.fake',
slavename='fake-vm',
)
+ api.ios.make_test_build_config({
'xcode version': 'fake xcode version',
'GYP_DEFINES': {
},
"gn_args": [
"is_debug=true"
],
"mb_type": "gn",
'compiler': 'ninja',
'configuration': 'Debug',
'sdk': 'iphoneos8.0',
'tests': [
],
})
)
yield (
api.test('no_tests')
+ api.platform('mac', 64)
+ api.properties(
buildername='ios',
buildnumber='0',
mastername='chromium.fake',
slavename='fake-vm',
)
+ api.ios.make_test_build_config({
'xcode version': 'fake xcode version',
'GYP_DEFINES': {
'fake gyp define 1': 'fake value 1',
'fake gyp define 2': 'fake value 2',
'use_goma': '1',
},
'compiler': 'ninja',
'configuration': 'Release',
'sdk': 'iphoneos8.0',
'tests': [
],
})
)
yield (
api.test('trybot')
+ api.platform('mac', 64)
+ api.properties(
buildername='ios',
buildnumber='0',
mastername='chromium.fake',
slavename='fake-vm',
)
+ api.ios.make_test_build_config({
'xcode version': 'fake xcode version',
'GYP_DEFINES': {
'fake gyp define 1': 'fake value 1',
'fake gyp define 2': 'fake value 2',
'use_goma': '1',
},
'use_analyze': 'false',
'compiler': 'ninja',
'configuration': 'Release',
'sdk': 'iphoneos8.0',
'tests': [
],
})
)
yield (
api.test('test_failure')
+ api.platform('mac', 64)
+ api.properties(patch_url='patch url')
+ api.properties(
buildername='ios',
buildnumber='0',
mastername='chromium.fake',
slavename='fake-vm',
)
+ api.ios.make_test_build_config({
'xcode version': 'fake xcode version',
'GYP_DEFINES': {
'fake gyp define 1': 'fake value 1',
'fake gyp define 2': 'fake value 2',
},
'compiler': 'xcodebuild',
'configuration': 'Debug',
'sdk': 'iphonesimulator8.0',
'tests': [
{
'app': 'fake tests 1',
'device type': 'fake device',
'os': '8.0',
},
{
'app': 'fake tests 2',
'device type': 'fake device',
'os': '7.1',
},
],
})
+ api.step_data(
'fake tests 1 (fake device iOS 8.0)',
retcode=1
)
)
yield (
api.test('infrastructure_failure')
+ api.platform('mac', 64)
+ api.properties(
buildername='ios',
buildnumber='0',
mastername='chromium.fake',
slavename='fake-vm',
)
+ api.ios.make_test_build_config({
'xcode version': 'fake xcode version',
'GYP_DEFINES': {
'fake gyp define 1': 'fake value 1',
'fake gyp define 2': 'fake value 2',
},
'compiler': 'ninja',
'configuration': 'Debug',
'sdk': 'iphonesimulator8.0',
'tests': [
{
'app': 'fake tests 1',
'device type': 'fake device',
'os': '8.0',
},
{
'app': 'fake tests 2',
'device type': 'fake device',
'os': '7.1',
},
],
})
+ api.step_data(
'fake tests 1 (fake device iOS 8.0)',
retcode=2,
)
)
|
|
'''
Creates tiles of when tree cover loss coincides with burning or preceded burning by one year.
There are four steps to this: 1) acquire raw hdfs from MODIS burned area sftp; 2) make tifs of burned area for
each year in each MODUS h-v tile; 3) make annual Hansen-style (extent, res, etc.) tiles of burned area;
4) make tiles of where TCL and burning coincided (same year or with 1 year lag).
To update this, steps 1-3 can be run on only the latest year of MODIS burned area product. Only step 4 needs to be run
on the entire time series. That is, steps 1-3 operate on burned area products separately for each year, so adding
another year of data won't change steps 1-3 for preceding years.
NOTE: The step in which hdf files are opened and converted to tifs (step 2) requires
osgeo/gdal:ubuntu-full-X.X.X Docker image. The "small' Docker image doesn't have an hdf driver in gdal, so it can't read
the hdf files on the ftp site. The rest of the burned area analysis can be done with a 'small' version of the Docker image
(though that would require terminating the Docker container and restarting it, which would only make sense if the
analysis was being continued later).
Step 4 takes many hours to run, mostly because it only uses five processors since each one requires so much memory.
The other steps might take an hour or two to run.
This is still basically as Sam Gibbes wrote it in early 2018, with file name changes and other input/output changes
by David Gibbs. The real processing code is still all by Sam's parts.
'''
import multiprocessing
from functools import partial
import pandas as pd
import datetime
import glob
import shutil
import argparse
from subprocess import Popen, PIPE, STDOUT, check_call
import os
import sys
import utilities
sys.path.append('../')
import constants_and_names as cn
import universal_util as uu
sys.path.append(os.path.join(cn.docker_app,'burn_date'))
import stack_ba_hv
import clip_year_tiles
import hansen_burnyear_final
def mp_burn_year(tile_id_list, run_date = None, no_upload = None):
os.chdir(cn.docker_base_dir)
# If a full model run is specified, the correct set of tiles for the particular script is listed
if tile_id_list == 'all':
# List of tiles to run in the model
tile_id_list = uu.tile_list_s3(cn.pixel_area_dir)
uu.print_log(tile_id_list)
uu.print_log("There are {} tiles to process".format(str(len(tile_id_list))) + "\n")
# List of output directories and output file name patterns
output_dir_list = [cn.burn_year_dir]
output_pattern_list = [cn.pattern_burn_year]
# A date can optionally be provided by the full model script or a run of this script.
# This replaces the date in constants_and_names.
# Only done if output upload is enabled.
if run_date is not None and no_upload is not None:
output_dir_list = uu.replace_output_dir_date(output_dir_list, run_date)
global_grid_hv = ["h00v08", "h00v09", "h00v10", "h01v07", "h01v08", "h01v09", "h01v10", "h01v11", "h02v06",
"h02v08", "h02v09", "h02v10", "h02v11", "h03v06", "h03v07", "h03v09", "h03v10", "h03v11",
"h04v09", "h04v10", "h04v11", "h05v10", "h05v11", "h05v13", "h06v03", "h06v11", "h07v03",
"h07v05", "h07v06", "h07v07", "h08v03", "h08v04", "h08v05", "h08v06", "h08v07", "h08v08",
"h08v09", "h08v11", "h09v02", "h09v03", "h09v04", "h09v05", "h09v06", "h09v07", "h09v08",
"h09v09", "h10v02", "h10v03", "h10v04", "h10v05", "h10v06", "h10v07", "h10v08", "h10v09",
"h10v10", "h10v11", "h11v02", "h11v03", "h11v04", "h11v05", "h11v06", "h11v07", "h11v08",
"h11v09", "h11v10", "h11v11", "h11v12", "h12v02", "h12v03", "h12v04", "h12v05", "h12v07",
"h12v08", "h12v09", "h12v10", "h12v11", "h12v12", "h12v13", "h13v02", "h13v03", "h13v04",
"h13v08", "h13v09", "h13v10", "h13v11", "h13v12", "h13v13", "h13v14", "h14v02", "h14v03",
"h14v04", "h14v09", "h14v10", "h14v11", "h14v14", "h15v02", "h15v03", "h15v05", "h15v07",
"h15v11", "h16v02", "h16v05", "h16v06", "h16v07", "h16v08", "h16v09", "h17v02", "h17v03",
"h17v04", "h17v05", "h17v06", "h17v07", "h17v08", "h17v10", "h17v12", "h17v13", "h18v02",
"h18v03", "h18v04", "h18v05", "h18v06", "h18v07", "h18v08", "h18v09", "h19v02", "h19v03",
"h19v04", "h19v05", "h19v06", "h19v07", "h19v08", "h19v09", "h19v10", "h19v11", "h19v12",
"h20v02", "h20v03", "h20v04", "h20v05", "h20v06", "h20v07", "h20v08", "h20v09", "h20v10",
"h20v11", "h20v12", "h20v13", "h21v02", "h21v03", "h21v04", "h21v05", "h21v06", "h21v07",
"h21v08", "h21v09", "h21v10", "h21v11", "h21v13", "h22v02", "h22v03", "h22v04", "h22v05",
"h22v06", "h22v07", "h22v08", "h22v09", "h22v10", "h22v11", "h22v13", "h23v02", "h23v03",
"h23v04", "h23v05", "h23v06", "h23v07", "h23v08", "h23v09", "h23v10", "h23v11", "h24v02",
"h24v03", "h24v04", "h24v05", "h24v06", "h24v07", "h24v12", "h25v02", "h25v03", "h25v04",
"h25v05", "h25v06", "h25v07", "h25v08", "h25v09", "h26v02", "h26v03", "h26v04", "h26v05",
"h26v06", "h26v07", "h26v08", "h27v03", "h27v04", "h27v05", "h27v06", "h27v07", "h27v08",
"h27v09", "h27v10", "h27v11", "h27v12", "h28v03", "h28v04", "h28v05", "h28v06", "h28v07",
"h28v08", "h28v09", "h28v10", "h28v11", "h28v12", "h28v13", "h29v03", "h29v05", "h29v06",
"h29v07", "h29v08", "h29v09", "h29v10", "h29v11", "h29v12", "h29v13", "h30v06", "h30v07",
"h30v08", "h30v09", "h30v10", "h30v11", "h30v12", "h30v13", "h31v06", "h31v07", "h31v08",
"h31v09", "h31v10", "h31v11", "h31v12", "h31v13", "h32v07", "h32v08", "h32v09", "h32v10",
"h32v11", "h32v12", "h33v07", "h33v08", "h33v09", "h33v10", "h33v11", "h34v07", "h34v08",
"h34v09", "h34v10", "h35v08", "h35v09", "h35v10"]
# Step 1: download hdf files for relevant year(s) from sftp site.
# This only needs to be done for the most recent year of data.
'''
Downloading the hdf files from the sftp burned area site is done outside the script in the sftp shell on the command line.
This will download all the 2020 hdfs to the spot machine. It will take a few minutes before the first
hdf is downloaded but then it should go quickly.
Change 2020 to other year for future years of downloads.
https://modis-fire.umd.edu/files/MODIS_C6_BA_User_Guide_1.3.pdf, page 24, section 4.1.3
sftp fire@fuoco.geog.umd.edu
[For password] burnt
cd data/MODIS/C6/MCD64A1/HDF
ls [to check that it's the folder with all the tile folders]
get h??v??/MCD64A1.A2020*
bye //exits the stfp shell
'''
# Uploads the latest year of raw burn area hdfs to s3.
# All hdfs go in this folder
cmd = ['aws', 's3', 'cp', '{0}/burn_date/'.format(cn.docker_app), cn.burn_year_hdf_raw_dir, '--recursive', '--exclude', '*', '--include', '*hdf']
uu.log_subprocess_output_full(cmd)
# Step 2:
# Makes burned area rasters for each year for each MODIS horizontal-vertical tile.
# This only needs to be done for the most recent year of data (set in stach_ba_hv).
uu.print_log("Stacking hdf into MODIS burned area tifs by year and MODIS hv tile...")
count = multiprocessing.cpu_count()
pool = multiprocessing.Pool(processes=count - 10)
pool.map(stack_ba_hv.stack_ba_hv, global_grid_hv)
pool.close()
pool.join()
# # For single processor use
# for hv_tile in global_grid_hv:
# stack_ba_hv.stack_ba_hv(hv_tile)
# Step 3:
# Creates a 10x10 degree wgs 84 tile of .00025 res burned year.
# Downloads all MODIS hv tiles from s3,
# makes a mosaic for each year, and warps to Hansen extent.
# Range is inclusive at lower end and exclusive at upper end (e.g., 2001, 2021 goes from 2001 to 2020).
# This only needs to be done for the most recent year of data.
# NOTE: The first time I ran this for the 2020 TCL update, I got an error about uploading the log to s3
# after most of the tiles were processed. I didn't know why it happened, so I reran the step and it went fine.
for year in range(2020, 2021):
uu.print_log("Processing", year)
# Downloads all hv tifs for this year
include = '{0}_*.tif'.format(year)
year_tifs_folder = "{}_year_tifs".format(year)
utilities.makedir(year_tifs_folder)
uu.print_log("Downloading MODIS burn date files from s3...")
cmd = ['aws', 's3', 'cp', cn.burn_year_stacked_hv_tif_dir, year_tifs_folder]
cmd += ['--recursive', '--exclude', "*", '--include', include]
uu.log_subprocess_output_full(cmd)
uu.print_log("Creating vrt of MODIS files...")
vrt_name = "global_vrt_{}.vrt".format(year)
# Builds list of vrt files
with open('vrt_files.txt', 'w') as vrt_files:
vrt_tifs = glob.glob(year_tifs_folder + "/*.tif")
for tif in vrt_tifs:
vrt_files.write(tif + "\n")
# Creates vrt with wgs84 MODIS tiles.
cmd = ['gdalbuildvrt', '-input_file_list', 'vrt_files.txt', vrt_name]
uu.log_subprocess_output_full(cmd)
uu.print_log("Reprojecting vrt...")
# Builds new vrt and virtually project it
# This reprojection could be done as part of the clip_year_tiles function but Sam had it out here like this and
# so I'm leaving it like that.
vrt_wgs84 = 'global_vrt_{}_wgs84.vrt'.format(year)
cmd = ['gdalwarp', '-of', 'VRT', '-t_srs', "EPSG:4326", '-tap', '-tr', '.00025', '.00025', '-overwrite',
vrt_name, vrt_wgs84]
uu.log_subprocess_output_full(cmd)
# Creates a list of lists, with year and tile id to send to multi processor
tile_year_list = []
for tile_id in tile_id_list:
tile_year_list.append([tile_id, year])
# Given a list of tiles and years ['00N_000E', 2017] and a VRT of burn data,
# the global vrt has pixels representing burned or not. This process clips the global VRT
# and changes the pixel value to represent the year the pixel was burned. Each tile has value of
# year burned and NoData.
count = multiprocessing.cpu_count()
pool = multiprocessing.Pool(processes=count-5)
pool.map(partial(clip_year_tiles.clip_year_tiles, no_upload=no_upload), tile_year_list)
pool.close()
pool.join()
# # For single processor use
# for tile_year in tile_year_list:
# clip_year_tiles.clip_year_tiles(tile_year, no_upload)
uu.print_log("Processing for {} done. Moving to next year.".format(year))
# Step 4:
# Creates a single Hansen tile covering all years that represents where burning coincided with tree cover loss
# or preceded TCL by one year.
# This needs to be done on all years each time burned area is updated.
# Downloads the loss tiles
uu.s3_folder_download(cn.loss_dir, '.', 'std', cn.pattern_loss)
uu.print_log("Extracting burn year data that coincides with tree cover loss...")
# Downloads the 10x10 deg burn year tiles (1 for each year in which there was burned area), stack and evaluate
# to return burn year values on hansen loss pixels within 1 year of loss date
if cn.count == 96:
processes = 5
# 6 processors = >750 GB peak (1 processor can use up to 130 GB of memory)
else:
processes = 1
pool = multiprocessing.Pool(processes)
pool.map(partial(hansen_burnyear_final.hansen_burnyear, no_upload=no_upload), tile_id_list)
pool.close()
pool.join()
# # For single processor use
# for tile_id in tile_id_list:
# hansen_burnyear_final.hansen_burnyear(tile_id, no_upload)
# If no_upload flag is not activated (by choice or by lack of AWS credentials), output is uploaded
if not no_upload:
uu.upload_final_set(output_dir_list[0], output_pattern_list[0])
if __name__ == '__main__':
# The arguments for what kind of model run is being run (standard conditions or a sensitivity analysis) and
# the tiles to include
parser = argparse.ArgumentParser(
description='Creates tiles of the year in which pixels were burned')
parser.add_argument('--tile_id_list', '-l', required=True,
help='List of tile ids to use in the model. Should be of form 00N_110E or 00N_110E,00N_120E or all.')
parser.add_argument('--run-date', '-d', required=False,
help='Date of run. Must be format YYYYMMDD.')
parser.add_argument('--no-upload', '-nu', action='store_true',
help='Disables uploading of outputs to s3')
args = parser.parse_args()
tile_id_list = args.tile_id_list
run_date = args.run_date
no_upload = args.no_upload
# Create the output log
uu.initiate_log(tile_id_list=tile_id_list, sensit_type='std', run_date=run_date, no_upload=no_upload)
# Checks whether the tile_id_list argument is valid
tile_id_list = uu.tile_id_list_check(tile_id_list)
mp_burn_year(tile_id_list=tile_id_list, run_date=run_date, no_upload=no_upload)
|
|
# Copyright 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import types
import mock
from oslo_config import cfg
from ironic.common import dhcp_factory
from ironic.common import exception
from ironic.common import image_service
from ironic.common import keystone
from ironic.common import pxe_utils
from ironic.common import states
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers.modules import agent
from ironic.drivers.modules import agent_client
from ironic.drivers.modules import fake
from ironic.drivers.modules.ilo import power as ilo_power
from ironic.drivers.modules import ipmitool
from ironic.drivers.modules.irmc import deploy as irmc_deploy
from ironic.drivers.modules.irmc import power as irmc_power
from ironic.tests.conductor import utils as mgr_utils
from ironic.tests.db import base as db_base
from ironic.tests.db import utils as db_utils
from ironic.tests.objects import utils as object_utils
INSTANCE_INFO = db_utils.get_test_agent_instance_info()
DRIVER_INFO = db_utils.get_test_agent_driver_info()
DRIVER_INTERNAL_INFO = db_utils.get_test_agent_driver_internal_info()
CONF = cfg.CONF
class TestAgentMethods(db_base.DbTestCase):
def setUp(self):
super(TestAgentMethods, self).setUp()
self.node = object_utils.create_test_node(self.context,
driver='fake_agent')
def test_build_agent_options_conf(self):
self.config(api_url='api-url', group='conductor')
options = agent.build_agent_options(self.node)
self.assertEqual('api-url', options['ipa-api-url'])
self.assertEqual('fake_agent', options['ipa-driver-name'])
self.assertEqual(0, options['coreos.configdrive'])
@mock.patch.object(keystone, 'get_service_url', autospec=True)
def test_build_agent_options_keystone(self, get_url_mock):
self.config(api_url=None, group='conductor')
get_url_mock.return_value = 'api-url'
options = agent.build_agent_options(self.node)
self.assertEqual('api-url', options['ipa-api-url'])
self.assertEqual('fake_agent', options['ipa-driver-name'])
self.assertEqual(0, options['coreos.configdrive'])
def test_build_agent_options_root_device_hints(self):
self.config(api_url='api-url', group='conductor')
self.node.properties['root_device'] = {'model': 'fake_model'}
options = agent.build_agent_options(self.node)
self.assertEqual('api-url', options['ipa-api-url'])
self.assertEqual('fake_agent', options['ipa-driver-name'])
self.assertEqual('model=fake_model', options['root_device'])
@mock.patch.object(image_service, 'GlanceImageService', autospec=True)
def test_build_instance_info_for_deploy_glance_image(self, glance_mock):
i_info = self.node.instance_info
i_info['image_source'] = '733d1c44-a2ea-414b-aca7-69decf20d810'
self.node.instance_info = i_info
self.node.save()
image_info = {'checksum': 'aa', 'disk_format': 'qcow2',
'container_format': 'bare'}
glance_mock.return_value.show = mock.MagicMock(spec_set=[],
return_value=image_info)
mgr_utils.mock_the_extension_manager(driver='fake_agent')
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
agent.build_instance_info_for_deploy(task)
glance_mock.assert_called_once_with(version=2,
context=task.context)
glance_mock.return_value.show.assert_called_once_with(
self.node.instance_info['image_source'])
glance_mock.return_value.swift_temp_url.assert_called_once_with(
image_info)
@mock.patch.object(image_service.HttpImageService, 'validate_href',
autospec=True)
def test_build_instance_info_for_deploy_nonglance_image(
self, validate_href_mock):
i_info = self.node.instance_info
i_info['image_source'] = 'http://image-ref'
i_info['image_checksum'] = 'aa'
self.node.instance_info = i_info
self.node.save()
mgr_utils.mock_the_extension_manager(driver='fake_agent')
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
info = agent.build_instance_info_for_deploy(task)
self.assertEqual(self.node.instance_info['image_source'],
info['image_url'])
validate_href_mock.assert_called_once_with(
mock.ANY, 'http://image-ref')
@mock.patch.object(image_service.HttpImageService, 'validate_href',
autospec=True)
def test_build_instance_info_for_deploy_nonsupported_image(
self, validate_href_mock):
validate_href_mock.side_effect = iter(
[exception.ImageRefValidationFailed(
image_href='file://img.qcow2', reason='fail')])
i_info = self.node.instance_info
i_info['image_source'] = 'file://img.qcow2'
i_info['image_checksum'] = 'aa'
self.node.instance_info = i_info
self.node.save()
mgr_utils.mock_the_extension_manager(driver='fake_agent')
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
self.assertRaises(exception.ImageRefValidationFailed,
agent.build_instance_info_for_deploy, task)
class TestAgentDeploy(db_base.DbTestCase):
def setUp(self):
super(TestAgentDeploy, self).setUp()
mgr_utils.mock_the_extension_manager(driver='fake_agent')
self.driver = agent.AgentDeploy()
n = {
'driver': 'fake_agent',
'instance_info': INSTANCE_INFO,
'driver_info': DRIVER_INFO,
'driver_internal_info': DRIVER_INTERNAL_INFO,
}
self.node = object_utils.create_test_node(self.context, **n)
self.ports = [object_utils.create_test_port(self.context,
node_id=self.node.id)]
def test_get_properties(self):
expected = agent.COMMON_PROPERTIES
self.assertEqual(expected, self.driver.get_properties())
def test_validate(self):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
self.driver.validate(task)
def test_validate_driver_info_missing_params(self):
self.node.driver_info = {}
self.node.save()
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
e = self.assertRaises(exception.MissingParameterValue,
self.driver.validate, task)
self.assertIn('driver_info.deploy_ramdisk', str(e))
self.assertIn('driver_info.deploy_kernel', str(e))
def test_validate_driver_info_manage_tftp_false(self):
self.config(manage_tftp=False, group='agent')
self.node.driver_info = {}
self.node.save()
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
self.driver.validate(task)
def test_validate_instance_info_missing_params(self):
self.node.instance_info = {}
self.node.save()
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
e = self.assertRaises(exception.MissingParameterValue,
self.driver.validate, task)
self.assertIn('instance_info.image_source', str(e))
def test_validate_nonglance_image_no_checksum(self):
i_info = self.node.instance_info
i_info['image_source'] = 'http://image-ref'
del i_info['image_checksum']
self.node.instance_info = i_info
self.node.save()
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
self.assertRaises(exception.MissingParameterValue,
self.driver.validate, task)
def test_validate_agent_fail_partition_image(self):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
task.node.driver_internal_info['is_whole_disk_image'] = False
self.assertRaises(exception.InvalidParameterValue,
self.driver.validate, task)
def test_validate_invalid_root_device_hints(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.properties['root_device'] = {'size': 'not-int'}
self.assertRaises(exception.InvalidParameterValue,
task.driver.deploy.validate, task)
@mock.patch.object(agent, '_cache_tftp_images', autospec=True)
@mock.patch.object(pxe_utils, 'create_pxe_config', autospec=True)
@mock.patch.object(agent, '_build_pxe_config_options', autospec=True)
@mock.patch.object(agent, '_get_tftp_image_info', autospec=True)
def test__prepare_pxe_boot(self, pxe_info_mock, options_mock,
create_mock, cache_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
agent._prepare_pxe_boot(task)
pxe_info_mock.assert_called_once_with(task.node)
options_mock.assert_called_once_with(task.node, mock.ANY)
create_mock.assert_called_once_with(
task, mock.ANY, CONF.agent.agent_pxe_config_template)
cache_mock.assert_called_once_with(task.context, task.node,
mock.ANY)
@mock.patch.object(agent, '_cache_tftp_images', autospec=True)
@mock.patch.object(pxe_utils, 'create_pxe_config', autospec=True)
@mock.patch.object(agent, '_build_pxe_config_options', autospec=True)
@mock.patch.object(agent, '_get_tftp_image_info', autospec=True)
def test__prepare_pxe_boot_manage_tftp_false(
self, pxe_info_mock, options_mock, create_mock, cache_mock):
self.config(manage_tftp=False, group='agent')
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
agent._prepare_pxe_boot(task)
self.assertFalse(pxe_info_mock.called)
self.assertFalse(options_mock.called)
self.assertFalse(create_mock.called)
self.assertFalse(cache_mock.called)
@mock.patch.object(dhcp_factory.DHCPFactory, 'update_dhcp', autospec=True)
@mock.patch('ironic.conductor.utils.node_set_boot_device', autospec=True)
@mock.patch('ironic.conductor.utils.node_power_action', autospec=True)
def test_deploy(self, power_mock, bootdev_mock, dhcp_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
dhcp_opts = pxe_utils.dhcp_options_for_instance(task)
driver_return = self.driver.deploy(task)
self.assertEqual(driver_return, states.DEPLOYWAIT)
dhcp_mock.assert_called_once_with(mock.ANY, task, dhcp_opts, None)
bootdev_mock.assert_called_once_with(task, 'pxe', persistent=True)
power_mock.assert_called_once_with(task,
states.REBOOT)
@mock.patch('ironic.conductor.utils.node_power_action', autospec=True)
def test_tear_down(self, power_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
driver_return = self.driver.tear_down(task)
power_mock.assert_called_once_with(task, states.POWER_OFF)
self.assertEqual(driver_return, states.DELETED)
@mock.patch.object(pxe_utils, 'clean_up_pxe_config', autospec=True)
@mock.patch.object(agent, 'AgentTFTPImageCache', autospec=True)
@mock.patch('ironic.common.utils.unlink_without_raise', autospec=True)
@mock.patch.object(agent, '_get_tftp_image_info', autospec=True)
def test__clean_up_pxe(self, info_mock, unlink_mock, cache_mock,
clean_mock):
info_mock.return_value = {'label': ['fake1', 'fake2']}
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
agent._clean_up_pxe(task)
info_mock.assert_called_once_with(task.node)
unlink_mock.assert_called_once_with('fake2')
clean_mock.assert_called_once_with(task)
@mock.patch.object(pxe_utils, 'clean_up_pxe_config', autospec=True)
@mock.patch.object(agent.AgentTFTPImageCache, 'clean_up', autospec=True)
@mock.patch('ironic.common.utils.unlink_without_raise', autospec=True)
@mock.patch.object(agent, '_get_tftp_image_info', autospec=True)
def test__clean_up_pxe_manage_tftp_false(
self, info_mock, unlink_mock, cache_mock, clean_mock):
self.config(manage_tftp=False, group='agent')
info_mock.return_value = {'label': ['fake1', 'fake2']}
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
agent._clean_up_pxe(task)
self.assertFalse(info_mock.called)
self.assertFalse(unlink_mock.called)
self.assertFalse(cache_mock.called)
self.assertFalse(clean_mock.called)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi.delete_cleaning_ports',
autospec=True)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi.create_cleaning_ports',
autospec=True)
@mock.patch('ironic.drivers.modules.agent._do_pxe_boot', autospec=True)
@mock.patch('ironic.drivers.modules.agent._prepare_pxe_boot',
autospec=True)
def test_prepare_cleaning(self, prepare_mock, boot_mock, create_mock,
delete_mock):
ports = [{'ports': self.ports}]
create_mock.return_value = ports
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
self.assertEqual(states.CLEANWAIT,
self.driver.prepare_cleaning(task))
prepare_mock.assert_called_once_with(task)
boot_mock.assert_called_once_with(task, ports)
create_mock.assert_called_once_with(mock.ANY, task)
delete_mock.assert_called_once_with(mock.ANY, task)
self.assertEqual(task.node.driver_internal_info.get(
'agent_erase_devices_iterations'), 1)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi.delete_cleaning_ports',
autospec=True)
@mock.patch('ironic.drivers.modules.agent._clean_up_pxe', autospec=True)
@mock.patch('ironic.conductor.utils.node_power_action', autospec=True)
def test_tear_down_cleaning(self, power_mock, cleanup_mock, neutron_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
self.assertIsNone(self.driver.tear_down_cleaning(task))
power_mock.assert_called_once_with(task, states.POWER_OFF)
cleanup_mock.assert_called_once_with(task)
neutron_mock.assert_called_once_with(mock.ANY, task)
@mock.patch('ironic.drivers.modules.deploy_utils.agent_get_clean_steps',
autospec=True)
def test_get_clean_steps(self, mock_get_clean_steps):
# Test getting clean steps
mock_steps = [{'priority': 10, 'interface': 'deploy',
'step': 'erase_devices'}]
mock_get_clean_steps.return_value = mock_steps
with task_manager.acquire(self.context, self.node.uuid) as task:
steps = self.driver.get_clean_steps(task)
mock_get_clean_steps.assert_called_once_with(task)
self.assertEqual(mock_steps, steps)
@mock.patch('ironic.drivers.modules.deploy_utils.agent_get_clean_steps',
autospec=True)
def test_get_clean_steps_config_priority(self, mock_get_clean_steps):
# Test that we can override the priority of get clean steps
# Use 0 because it is an edge case (false-y) and used in devstack
self.config(agent_erase_devices_priority=0, group='agent')
mock_steps = [{'priority': 10, 'interface': 'deploy',
'step': 'erase_devices'}]
expected_steps = [{'priority': 0, 'interface': 'deploy',
'step': 'erase_devices'}]
mock_get_clean_steps.return_value = mock_steps
with task_manager.acquire(self.context, self.node.uuid) as task:
steps = self.driver.get_clean_steps(task)
mock_get_clean_steps.assert_called_once_with(task)
self.assertEqual(expected_steps, steps)
class TestAgentVendor(db_base.DbTestCase):
def setUp(self):
super(TestAgentVendor, self).setUp()
mgr_utils.mock_the_extension_manager(driver="fake_agent")
self.passthru = agent.AgentVendorInterface()
n = {
'driver': 'fake_agent',
'instance_info': INSTANCE_INFO,
'driver_info': DRIVER_INFO,
'driver_internal_info': DRIVER_INTERNAL_INFO,
}
self.node = object_utils.create_test_node(self.context, **n)
def test_continue_deploy(self):
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
test_temp_url = 'http://image'
expected_image_info = {
'urls': [test_temp_url],
'id': 'fake-image',
'checksum': 'checksum',
'disk_format': 'qcow2',
'container_format': 'bare',
}
client_mock = mock.MagicMock(spec_set=['prepare_image'])
self.passthru._client = client_mock
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.passthru.continue_deploy(task)
client_mock.prepare_image.assert_called_with(task.node,
expected_image_info)
self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
self.assertEqual(states.ACTIVE,
task.node.target_provision_state)
def test_continue_deploy_image_source_is_url(self):
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
test_temp_url = 'http://image'
expected_image_info = {
'urls': [test_temp_url],
'id': self.node.instance_info['image_source'],
'checksum': 'checksum',
'disk_format': 'qcow2',
'container_format': 'bare',
}
client_mock = mock.MagicMock(spec_set=['prepare_image'])
self.passthru._client = client_mock
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.passthru.continue_deploy(task)
client_mock.prepare_image.assert_called_with(task.node,
expected_image_info)
self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
self.assertEqual(states.ACTIVE,
task.node.target_provision_state)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(agent_client.AgentClient, 'power_off',
spec=types.FunctionType)
@mock.patch('ironic.conductor.utils.node_set_boot_device', autospec=True)
@mock.patch('ironic.drivers.modules.agent.AgentVendorInterface'
'.check_deploy_success', autospec=True)
@mock.patch.object(agent, '_clean_up_pxe', autospec=True)
def _test_reboot_to_instance(self, clean_pxe_mock, check_deploy_mock,
bootdev_mock, power_off_mock,
node_power_action_mock,
get_power_state_mock,
uses_pxe=True):
check_deploy_mock.return_value = None
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
get_power_state_mock.return_value = states.POWER_OFF
task.node.driver_internal_info['is_whole_disk_image'] = True
self.passthru.reboot_to_instance(task)
if uses_pxe:
clean_pxe_mock.assert_called_once_with(task)
else:
self.assertFalse(clean_pxe_mock.called)
check_deploy_mock.assert_called_once_with(mock.ANY, task.node)
bootdev_mock.assert_called_once_with(task, 'disk', persistent=True)
power_off_mock.assert_called_once_with(task.node)
get_power_state_mock.assert_called_once_with(task)
node_power_action_mock.assert_called_once_with(
task, states.POWER_ON)
self.assertEqual(states.ACTIVE, task.node.provision_state)
self.assertEqual(states.NOSTATE, task.node.target_provision_state)
@mock.patch.object(fake.FakePower, 'get_power_state',
spec=types.FunctionType)
def test_reboot_to_instance_fake_driver(self, get_power_state_mock):
self._test_reboot_to_instance(
get_power_state_mock=get_power_state_mock)
@mock.patch.object(ipmitool.IPMIPower, 'get_power_state',
spec=types.FunctionType)
def test_reboot_to_instance_agent_ipmitool_driver(
self, get_power_state_mock):
mgr_utils.mock_the_extension_manager(driver='agent_ipmitool')
self.node.driver = 'agent_ipmitool'
self.node.save()
self._test_reboot_to_instance(
get_power_state_mock=get_power_state_mock)
@mock.patch.object(ilo_power.IloPower, 'get_power_state',
spec=types.FunctionType)
def test_reboot_to_instance_agent_ilo_driver(self, get_power_state_mock):
mgr_utils.mock_the_extension_manager(driver='agent_ilo')
self.node.driver = 'agent_ilo'
self.node.save()
self._test_reboot_to_instance(
get_power_state_mock=get_power_state_mock, uses_pxe=False)
@mock.patch.object(irmc_power.IRMCPower, 'get_power_state',
spec=types.FunctionType)
def test_reboot_to_instance_agent_irmc_driver(self, get_power_state_mock):
irmc_deploy._check_share_fs_mounted_patcher.start()
mgr_utils.mock_the_extension_manager(driver='agent_irmc')
self.node.driver = 'agent_irmc'
self.node.save()
self._test_reboot_to_instance(
get_power_state_mock=get_power_state_mock, uses_pxe=False)
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_deploy_has_started(self, mock_get_cmd):
with task_manager.acquire(self.context, self.node.uuid) as task:
mock_get_cmd.return_value = []
self.assertFalse(self.passthru.deploy_has_started(task))
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_deploy_has_started_is_done(self, mock_get_cmd):
with task_manager.acquire(self.context, self.node.uuid) as task:
mock_get_cmd.return_value = [{'command_name': 'prepare_image',
'command_status': 'SUCCESS'}]
self.assertTrue(self.passthru.deploy_has_started(task))
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_deploy_has_started_did_start(self, mock_get_cmd):
with task_manager.acquire(self.context, self.node.uuid) as task:
mock_get_cmd.return_value = [{'command_name': 'prepare_image',
'command_status': 'RUNNING'}]
self.assertTrue(self.passthru.deploy_has_started(task))
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_deploy_has_started_multiple_commands(self, mock_get_cmd):
with task_manager.acquire(self.context, self.node.uuid) as task:
mock_get_cmd.return_value = [{'command_name': 'cache_image',
'command_status': 'SUCCESS'},
{'command_name': 'prepare_image',
'command_status': 'RUNNING'}]
self.assertTrue(self.passthru.deploy_has_started(task))
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_deploy_has_started_other_commands(self, mock_get_cmd):
with task_manager.acquire(self.context, self.node.uuid) as task:
mock_get_cmd.return_value = [{'command_name': 'cache_image',
'command_status': 'SUCCESS'}]
self.assertFalse(self.passthru.deploy_has_started(task))
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_deploy_is_done(self, mock_get_cmd):
with task_manager.acquire(self.context, self.node.uuid) as task:
mock_get_cmd.return_value = [{'command_name': 'prepare_image',
'command_status': 'SUCCESS'}]
self.assertTrue(self.passthru.deploy_is_done(task))
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_deploy_is_done_empty_response(self, mock_get_cmd):
with task_manager.acquire(self.context, self.node.uuid) as task:
mock_get_cmd.return_value = []
self.assertFalse(self.passthru.deploy_is_done(task))
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_deploy_is_done_race(self, mock_get_cmd):
with task_manager.acquire(self.context, self.node.uuid) as task:
mock_get_cmd.return_value = [{'command_name': 'some_other_command',
'command_status': 'SUCCESS'}]
self.assertFalse(self.passthru.deploy_is_done(task))
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_deploy_is_done_still_running(self, mock_get_cmd):
with task_manager.acquire(self.context, self.node.uuid) as task:
mock_get_cmd.return_value = [{'command_name': 'prepare_image',
'command_status': 'RUNNING'}]
self.assertFalse(self.passthru.deploy_is_done(task))
def _build_pxe_config_options(self, root_device_hints=False):
self.config(api_url='api-url', group='conductor')
self.config(agent_pxe_append_params='foo bar', group='agent')
if root_device_hints:
self.node.properties['root_device'] = {'model': 'FakeModel'}
pxe_info = {
'deploy_kernel': ('glance://deploy-kernel',
'fake-node/deploy_kernel'),
'deploy_ramdisk': ('glance://deploy-ramdisk',
'fake-node/deploy_ramdisk'),
}
options = agent._build_pxe_config_options(self.node, pxe_info)
expected = {'deployment_aki_path': 'fake-node/deploy_kernel',
'deployment_ari_path': 'fake-node/deploy_ramdisk',
'ipa-api-url': 'api-url',
'ipa-driver-name': u'fake_agent',
'coreos.configdrive': 0,
'pxe_append_params': 'foo bar'}
if root_device_hints:
expected['root_device'] = 'model=FakeModel'
self.assertEqual(expected, options)
def test__build_pxe_config_options(self):
self._build_pxe_config_options()
def test__build_pxe_config_options_root_device_hints(self):
self._build_pxe_config_options(root_device_hints=True)
|
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
import os
import pickle
import sys
import mock
import pytest # type: ignore
from google.auth import _helpers
from google.auth import exceptions
from google.oauth2 import _credentials_async as _credentials_async
from google.oauth2 import credentials
from tests.oauth2 import test_credentials
class TestCredentials:
TOKEN_URI = "https://example.com/oauth2/token"
REFRESH_TOKEN = "refresh_token"
CLIENT_ID = "client_id"
CLIENT_SECRET = "client_secret"
@classmethod
def make_credentials(cls):
return _credentials_async.Credentials(
token=None,
refresh_token=cls.REFRESH_TOKEN,
token_uri=cls.TOKEN_URI,
client_id=cls.CLIENT_ID,
client_secret=cls.CLIENT_SECRET,
enable_reauth_refresh=True,
)
def test_default_state(self):
credentials = self.make_credentials()
assert not credentials.valid
# Expiration hasn't been set yet
assert not credentials.expired
# Scopes aren't required for these credentials
assert not credentials.requires_scopes
# Test properties
assert credentials.refresh_token == self.REFRESH_TOKEN
assert credentials.token_uri == self.TOKEN_URI
assert credentials.client_id == self.CLIENT_ID
assert credentials.client_secret == self.CLIENT_SECRET
@mock.patch("google.oauth2._reauth_async.refresh_grant", autospec=True)
@mock.patch(
"google.auth._helpers.utcnow",
return_value=datetime.datetime.min + _helpers.REFRESH_THRESHOLD,
)
@pytest.mark.asyncio
async def test_refresh_success(self, unused_utcnow, refresh_grant):
token = "token"
expiry = _helpers.utcnow() + datetime.timedelta(seconds=500)
grant_response = {"id_token": mock.sentinel.id_token}
rapt_token = "rapt_token"
refresh_grant.return_value = (
# Access token
token,
# New refresh token
None,
# Expiry,
expiry,
# Extra data
grant_response,
# Rapt token
rapt_token,
)
request = mock.AsyncMock(spec=["transport.Request"])
creds = self.make_credentials()
# Refresh credentials
await creds.refresh(request)
# Check jwt grant call.
refresh_grant.assert_called_with(
request,
self.TOKEN_URI,
self.REFRESH_TOKEN,
self.CLIENT_ID,
self.CLIENT_SECRET,
None,
None,
True,
)
# Check that the credentials have the token and expiry
assert creds.token == token
assert creds.expiry == expiry
assert creds.id_token == mock.sentinel.id_token
assert creds.rapt_token == rapt_token
# Check that the credentials are valid (have a token and are not
# expired)
assert creds.valid
@pytest.mark.asyncio
async def test_refresh_no_refresh_token(self):
request = mock.AsyncMock(spec=["transport.Request"])
credentials_ = _credentials_async.Credentials(token=None, refresh_token=None)
with pytest.raises(exceptions.RefreshError, match="necessary fields"):
await credentials_.refresh(request)
request.assert_not_called()
@mock.patch("google.oauth2._reauth_async.refresh_grant", autospec=True)
@mock.patch(
"google.auth._helpers.utcnow",
return_value=datetime.datetime.min + _helpers.REFRESH_THRESHOLD,
)
@pytest.mark.asyncio
async def test_credentials_with_scopes_requested_refresh_success(
self, unused_utcnow, refresh_grant
):
scopes = ["email", "profile"]
token = "token"
expiry = _helpers.utcnow() + datetime.timedelta(seconds=500)
grant_response = {"id_token": mock.sentinel.id_token}
rapt_token = "rapt_token"
refresh_grant.return_value = (
# Access token
token,
# New refresh token
None,
# Expiry,
expiry,
# Extra data
grant_response,
# Rapt token
rapt_token,
)
request = mock.AsyncMock(spec=["transport.Request"])
creds = _credentials_async.Credentials(
token=None,
refresh_token=self.REFRESH_TOKEN,
token_uri=self.TOKEN_URI,
client_id=self.CLIENT_ID,
client_secret=self.CLIENT_SECRET,
scopes=scopes,
rapt_token="old_rapt_token",
)
# Refresh credentials
await creds.refresh(request)
# Check jwt grant call.
refresh_grant.assert_called_with(
request,
self.TOKEN_URI,
self.REFRESH_TOKEN,
self.CLIENT_ID,
self.CLIENT_SECRET,
scopes,
"old_rapt_token",
False,
)
# Check that the credentials have the token and expiry
assert creds.token == token
assert creds.expiry == expiry
assert creds.id_token == mock.sentinel.id_token
assert creds.has_scopes(scopes)
assert creds.rapt_token == rapt_token
# Check that the credentials are valid (have a token and are not
# expired.)
assert creds.valid
@mock.patch("google.oauth2._reauth_async.refresh_grant", autospec=True)
@mock.patch(
"google.auth._helpers.utcnow",
return_value=datetime.datetime.min + _helpers.REFRESH_THRESHOLD,
)
@pytest.mark.asyncio
async def test_credentials_with_scopes_returned_refresh_success(
self, unused_utcnow, refresh_grant
):
scopes = ["email", "profile"]
token = "token"
expiry = _helpers.utcnow() + datetime.timedelta(seconds=500)
grant_response = {"id_token": mock.sentinel.id_token, "scope": " ".join(scopes)}
rapt_token = "rapt_token"
refresh_grant.return_value = (
# Access token
token,
# New refresh token
None,
# Expiry,
expiry,
# Extra data
grant_response,
# Rapt token
rapt_token,
)
request = mock.AsyncMock(spec=["transport.Request"])
creds = _credentials_async.Credentials(
token=None,
refresh_token=self.REFRESH_TOKEN,
token_uri=self.TOKEN_URI,
client_id=self.CLIENT_ID,
client_secret=self.CLIENT_SECRET,
scopes=scopes,
)
# Refresh credentials
await creds.refresh(request)
# Check jwt grant call.
refresh_grant.assert_called_with(
request,
self.TOKEN_URI,
self.REFRESH_TOKEN,
self.CLIENT_ID,
self.CLIENT_SECRET,
scopes,
None,
False,
)
# Check that the credentials have the token and expiry
assert creds.token == token
assert creds.expiry == expiry
assert creds.id_token == mock.sentinel.id_token
assert creds.has_scopes(scopes)
assert creds.rapt_token == rapt_token
# Check that the credentials are valid (have a token and are not
# expired.)
assert creds.valid
@mock.patch("google.oauth2._reauth_async.refresh_grant", autospec=True)
@mock.patch(
"google.auth._helpers.utcnow",
return_value=datetime.datetime.min + _helpers.REFRESH_THRESHOLD,
)
@pytest.mark.asyncio
async def test_credentials_with_scopes_refresh_failure_raises_refresh_error(
self, unused_utcnow, refresh_grant
):
scopes = ["email", "profile"]
scopes_returned = ["email"]
token = "token"
expiry = _helpers.utcnow() + datetime.timedelta(seconds=500)
grant_response = {
"id_token": mock.sentinel.id_token,
"scope": " ".join(scopes_returned),
}
rapt_token = "rapt_token"
refresh_grant.return_value = (
# Access token
token,
# New refresh token
None,
# Expiry,
expiry,
# Extra data
grant_response,
# Rapt token
rapt_token,
)
request = mock.AsyncMock(spec=["transport.Request"])
creds = _credentials_async.Credentials(
token=None,
refresh_token=self.REFRESH_TOKEN,
token_uri=self.TOKEN_URI,
client_id=self.CLIENT_ID,
client_secret=self.CLIENT_SECRET,
scopes=scopes,
rapt_token=None,
)
# Refresh credentials
with pytest.raises(
exceptions.RefreshError, match="Not all requested scopes were granted"
):
await creds.refresh(request)
# Check jwt grant call.
refresh_grant.assert_called_with(
request,
self.TOKEN_URI,
self.REFRESH_TOKEN,
self.CLIENT_ID,
self.CLIENT_SECRET,
scopes,
None,
False,
)
# Check that the credentials have the token and expiry
assert creds.token == token
assert creds.expiry == expiry
assert creds.id_token == mock.sentinel.id_token
assert creds.has_scopes(scopes)
# Check that the credentials are valid (have a token and are not
# expired.)
assert creds.valid
def test_apply_with_quota_project_id(self):
creds = _credentials_async.Credentials(
token="token",
refresh_token=self.REFRESH_TOKEN,
token_uri=self.TOKEN_URI,
client_id=self.CLIENT_ID,
client_secret=self.CLIENT_SECRET,
quota_project_id="quota-project-123",
)
headers = {}
creds.apply(headers)
assert headers["x-goog-user-project"] == "quota-project-123"
def test_apply_with_no_quota_project_id(self):
creds = _credentials_async.Credentials(
token="token",
refresh_token=self.REFRESH_TOKEN,
token_uri=self.TOKEN_URI,
client_id=self.CLIENT_ID,
client_secret=self.CLIENT_SECRET,
)
headers = {}
creds.apply(headers)
assert "x-goog-user-project" not in headers
def test_with_quota_project(self):
creds = _credentials_async.Credentials(
token="token",
refresh_token=self.REFRESH_TOKEN,
token_uri=self.TOKEN_URI,
client_id=self.CLIENT_ID,
client_secret=self.CLIENT_SECRET,
quota_project_id="quota-project-123",
)
new_creds = creds.with_quota_project("new-project-456")
assert new_creds.quota_project_id == "new-project-456"
headers = {}
creds.apply(headers)
assert "x-goog-user-project" in headers
def test_from_authorized_user_info(self):
info = test_credentials.AUTH_USER_INFO.copy()
creds = _credentials_async.Credentials.from_authorized_user_info(info)
assert creds.client_secret == info["client_secret"]
assert creds.client_id == info["client_id"]
assert creds.refresh_token == info["refresh_token"]
assert creds.token_uri == credentials._GOOGLE_OAUTH2_TOKEN_ENDPOINT
assert creds.scopes is None
scopes = ["email", "profile"]
creds = _credentials_async.Credentials.from_authorized_user_info(info, scopes)
assert creds.client_secret == info["client_secret"]
assert creds.client_id == info["client_id"]
assert creds.refresh_token == info["refresh_token"]
assert creds.token_uri == credentials._GOOGLE_OAUTH2_TOKEN_ENDPOINT
assert creds.scopes == scopes
def test_from_authorized_user_file(self):
info = test_credentials.AUTH_USER_INFO.copy()
creds = _credentials_async.Credentials.from_authorized_user_file(
test_credentials.AUTH_USER_JSON_FILE
)
assert creds.client_secret == info["client_secret"]
assert creds.client_id == info["client_id"]
assert creds.refresh_token == info["refresh_token"]
assert creds.token_uri == credentials._GOOGLE_OAUTH2_TOKEN_ENDPOINT
assert creds.scopes is None
scopes = ["email", "profile"]
creds = _credentials_async.Credentials.from_authorized_user_file(
test_credentials.AUTH_USER_JSON_FILE, scopes
)
assert creds.client_secret == info["client_secret"]
assert creds.client_id == info["client_id"]
assert creds.refresh_token == info["refresh_token"]
assert creds.token_uri == credentials._GOOGLE_OAUTH2_TOKEN_ENDPOINT
assert creds.scopes == scopes
def test_to_json(self):
info = test_credentials.AUTH_USER_INFO.copy()
creds = _credentials_async.Credentials.from_authorized_user_info(info)
# Test with no `strip` arg
json_output = creds.to_json()
json_asdict = json.loads(json_output)
assert json_asdict.get("token") == creds.token
assert json_asdict.get("refresh_token") == creds.refresh_token
assert json_asdict.get("token_uri") == creds.token_uri
assert json_asdict.get("client_id") == creds.client_id
assert json_asdict.get("scopes") == creds.scopes
assert json_asdict.get("client_secret") == creds.client_secret
# Test with a `strip` arg
json_output = creds.to_json(strip=["client_secret"])
json_asdict = json.loads(json_output)
assert json_asdict.get("token") == creds.token
assert json_asdict.get("refresh_token") == creds.refresh_token
assert json_asdict.get("token_uri") == creds.token_uri
assert json_asdict.get("client_id") == creds.client_id
assert json_asdict.get("scopes") == creds.scopes
assert json_asdict.get("client_secret") is None
def test_pickle_and_unpickle(self):
creds = self.make_credentials()
unpickled = pickle.loads(pickle.dumps(creds))
# make sure attributes aren't lost during pickling
assert list(creds.__dict__).sort() == list(unpickled.__dict__).sort()
for attr in list(creds.__dict__):
assert getattr(creds, attr) == getattr(unpickled, attr)
def test_pickle_with_missing_attribute(self):
creds = self.make_credentials()
# remove an optional attribute before pickling
# this mimics a pickle created with a previous class definition with
# fewer attributes
del creds.__dict__["_quota_project_id"]
unpickled = pickle.loads(pickle.dumps(creds))
# Attribute should be initialized by `__setstate__`
assert unpickled.quota_project_id is None
# pickles are not compatible across versions
@pytest.mark.skipif(
sys.version_info < (3, 5),
reason="pickle file can only be loaded with Python >= 3.5",
)
def test_unpickle_old_credentials_pickle(self):
# make sure a credentials file pickled with an older
# library version (google-auth==1.5.1) can be unpickled
with open(
os.path.join(test_credentials.DATA_DIR, "old_oauth_credentials_py3.pickle"),
"rb",
) as f:
credentials = pickle.load(f)
assert credentials.quota_project_id is None
class TestUserAccessTokenCredentials(object):
def test_instance(self):
cred = _credentials_async.UserAccessTokenCredentials()
assert cred._account is None
cred = cred.with_account("account")
assert cred._account == "account"
@mock.patch("google.auth._cloud_sdk.get_auth_access_token", autospec=True)
def test_refresh(self, get_auth_access_token):
get_auth_access_token.return_value = "access_token"
cred = _credentials_async.UserAccessTokenCredentials()
cred.refresh(None)
assert cred.token == "access_token"
def test_with_quota_project(self):
cred = _credentials_async.UserAccessTokenCredentials()
quota_project_cred = cred.with_quota_project("project-foo")
assert quota_project_cred._quota_project_id == "project-foo"
assert quota_project_cred._account == cred._account
@mock.patch(
"google.oauth2._credentials_async.UserAccessTokenCredentials.apply",
autospec=True,
)
@mock.patch(
"google.oauth2._credentials_async.UserAccessTokenCredentials.refresh",
autospec=True,
)
def test_before_request(self, refresh, apply):
cred = _credentials_async.UserAccessTokenCredentials()
cred.before_request(mock.Mock(), "GET", "https://example.com", {})
refresh.assert_called()
apply.assert_called()
|
|
"""Testing backup functionality
"""
import unittest
import tarfile
import os.path as pth
from itertools import chain
import gludb.config
from gludb.simple import DBObject, Field
from gludb.backup import Backup, is_backup_class, backup_name, strip_line
# Note that we expect our s3server.py mock server to be running, which will
# automatically created the bucket BACKUP_BUCKET_NAME
from utils import compare_data_objects, S3_DIR, BACKUP_BUCKET_NAME
@DBObject(table_name='SimpleTest')
class SimpleData(object):
name = Field('default name')
descrip = Field()
age = Field(42)
@DBObject(table_name='SetupTest')
class ComplexData(object):
name = Field('')
complex_data = Field(dict)
@DBObject(table_name='InheritedTest')
class InheritedData(SimpleData, ComplexData):
only_inherited = Field(42)
def no_blanks(t):
return list(filter(None, t))
def extract_backup(bucketname, keyname):
"""Return a dict of (table-name, JSON-list) from the specified backup"""
filename = pth.join(S3_DIR, bucketname, keyname)
print("Opening backup archive %s" % filename)
backup_dict = dict()
with tarfile.open(filename, mode="r:gz") as backup:
for member in backup:
file = backup.extractfile(member)
data = no_blanks([strip_line(i) for i in file.readlines()])
backup_dict[member.name] = data
file.close()
return backup_dict
def extract_one(backup_dict, cls):
"""Given the dict from extract_backup and a class, return a list of objects
from the backup"""
name = backup_name(cls) + '.json' # Don't forget file extension
return [cls.from_data(i) for i in backup_dict[name]]
class BackupPlumbingTesting(unittest.TestCase):
"""Test basic functions and helpers in the backup module"""
def setUp(self):
gludb.config.default_database(gludb.config.Database(
'sqlite',
filename=':memory:'
))
def tearDown(self):
# Undo any database setup
gludb.config.clear_database_config()
def assertObjEq(self, obj1, obj2):
self.assertTrue(compare_data_objects(obj1, obj2))
def test_backup_obj_check(self):
class NadaClass(object):
pass
def somefunc():
pass
@DBObject(table_name='Tiny')
class TinyBackup(object):
f = Field('')
self.assertFalse(is_backup_class(None))
self.assertFalse(is_backup_class(''))
self.assertFalse(is_backup_class(42))
self.assertFalse(is_backup_class([]))
self.assertFalse(is_backup_class(NadaClass))
self.assertFalse(is_backup_class(somefunc))
self.assertFalse(is_backup_class(NadaClass))
self.assertFalse(is_backup_class(somefunc))
self.assertTrue(is_backup_class(TinyBackup))
self.assertTrue(is_backup_class(SimpleData))
self.assertTrue(is_backup_class(ComplexData))
class BackupRunTesting(unittest.TestCase):
def setUp(self):
gludb.config.default_database(gludb.config.Database(
'sqlite',
filename=':memory:'
))
SimpleData.ensure_table()
ComplexData.ensure_table()
InheritedData.ensure_table()
self.backup = Backup(
aws_access_key='testing',
aws_secret_key='testing',
bucketname=BACKUP_BUCKET_NAME
)
def tearDown(self):
# Undo any database setup
print('\n'.join(self.backup.backup_log))
gludb.config.clear_database_config()
def assertObjEq(self, obj1, obj2):
self.assertTrue(compare_data_objects(obj1, obj2))
def assertObjListsEq(self, lst1, lst2):
def key(obj):
return getattr(obj, 'id', '')
for o1, o2 in zip(sorted(lst1, key=key), sorted(lst2, key=key)):
self.assertObjEq(o1, o2)
def test_simple_backup(self):
simple = [
SimpleData(name='Name', descrip='Descrip', age=i)
for i in range(7)
]
complex = [
ComplexData(name='Name'+str(i), complex_data={'a': i})
for i in range(7)
]
for obj in chain(simple, complex):
obj.save()
self.assertEquals(
1,
self.backup.add_class(SimpleData, include_bases=False)
)
self.assertEquals(
1,
self.backup.add_class(ComplexData, include_bases=False)
)
self.assertEquals(2, len(self.backup.classes))
bucketname, keyname = self.backup.run_backup()
backups = extract_backup(bucketname, keyname)
self.assertObjListsEq(simple, extract_one(backups, SimpleData))
self.assertObjListsEq(complex, extract_one(backups, ComplexData))
def test_include_bases_backup(self):
simple = [
SimpleData(name='Name', descrip='Descrip', age=i)
for i in range(7)
]
complex = [
ComplexData(name='Name'+str(i), complex_data={'a': i})
for i in range(7)
]
inherited = [InheritedData(only_inherited=i) for i in range(7)]
for obj in chain(simple, complex, inherited):
obj.save()
self.assertEquals(
3,
self.backup.add_class(InheritedData, include_bases=True)
)
self.assertEquals(3, len(self.backup.classes))
bucketname, keyname = self.backup.run_backup()
backups = extract_backup(bucketname, keyname)
self.assertObjListsEq(simple, extract_one(backups, SimpleData))
self.assertObjListsEq(complex, extract_one(backups, ComplexData))
self.assertObjListsEq(inherited, extract_one(backups, InheritedData))
def test_include_package(self):
from testpkg.module import TopData
from testpkg.subpkg1.module import MidData1
from testpkg.subpkg2.module import MidData2
from testpkg.subpkg1.subsubpkg.module import BottomData
expected_dict = dict()
for cls in [TopData, MidData1, MidData2, BottomData]:
cls.ensure_table()
data = [cls(name='Name'+str(i)) for i in range(7)]
for d in data:
d.save()
expected_dict[backup_name(cls)] = data
self.backup.add_package("testpkg")
self.assertEquals(4, len(self.backup.classes))
bucketname, keyname = self.backup.run_backup()
backups = extract_backup(bucketname, keyname)
for cls in [TopData, MidData1, MidData2, BottomData]:
expected = expected_dict[backup_name(cls)]
self.assertObjListsEq(expected, extract_one(backups, cls))
|
|
import os, csv, re, difflib, math, sys, time, json, sqlite3, plistlib
from pprint import pprint
reload(sys)
sys.setdefaultencoding('utf-8')
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
# parse options from command line
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-d", "--db",dest="db",help="Source database")
parser.add_option("-o", "--output-prefix", dest="output_prefix")
kwargs, inputs = parser.parse_args()
if not kwargs.db:
parser.error("-d <dbname> - Source db is required")
if not kwargs.output_prefix:
output_prefix = kwargs.db.replace(".db","")
else:
output_prefix = kwargs.output_prefix
# setup the database
db_name = kwargs.db #'cxx.db'
dbconn = sqlite3.connect(db_name)
dbconn.text_factory = str
dbconn.row_factory = dict_factory
dbcursor = dbconn.cursor()
# math utils
from math import radians, cos, sin, asin, sqrt
def haversine(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
km = 6367 * c
return km
def ll_dist( pt1, pt2 ):
lat1, lon1 = pt1
lat2, lon2 = pt2
return haversine( lon1, lat1, lon2, lat2 )
def point_in_amsterdam( pt ):
city_center = (52.37022,4.89517)
return ll_dist( city_center, pt) <= 5 #8km
# stop utils
def _get_all_top_trips():
sql = """
SELECT
t.trip_id,
t.shape_id,
t.route_id as route_id,
st.stop_id,
count(*) AS cnt
FROM trips t
LEFT JOIN stop_times st
ON st.trip_id = t.trip_id
--WHERE t.route_id=r.route_id
GROUP BY st.trip_id
--GROUP BY t.route_id
ORDER BY cnt DESC
LIMIT 10
"""
lst = dbcursor.execute(sql)
trips = lst.fetchall()
pprint( trips )
# sys.exit(-1)
# return lst.fetchone()
def _get_top_trip(route_id):
sql = """
SELECT
t.trip_id,
t.shape_id,
count(*) AS cnt
FROM trips t
LEFT JOIN stop_times st
ON st.trip_id = t.trip_id
WHERE t.route_id='%s'
GROUP BY st.trip_id
ORDER BY cnt DESC;
"""
lst = dbcursor.execute(sql % route_id)
# pprint( lst.fetchall() )
# sys.exit(-1)
return lst.fetchone()
def _get_all_stops():
sql = """
SELECT
*
FROM stops s
"""
lst = dbcursor.execute(sql)
stops = lst.fetchall()
stops_hash = { stop['stop_id'] : stop for stop in stops }
#stops.sort(key=lambda x:x['stop_sequence'])
return stops_hash
def _get_trip_stops(trip_id):
sql = """
SELECT
st.stop_id,
st.stop_sequence,
s.stop_lat,
s.stop_lon,
s.stop_name,
s.parent_station as parent_id
FROM stop_times st
LEFT JOIN stops s
ON s.stop_id = st.stop_id
WHERE st.trip_id=?
"""
lst = dbcursor.execute(sql,(trip_id,))
stops = lst.fetchall()
stops.sort(key=lambda x:x['stop_sequence'])
return stops
"""
This method returns the stop lat/lon replaced with the parent's lat-lon
and also adds the parent_id for easy retrieval or something like that!
"""
def _get_trip_stops_real(trip_id):
if not hasattr(_get_trip_stops_real,'_all_stops'):
_get_trip_stops_real._all_stops = _get_all_stops()
_all_stops = _get_trip_stops_real._all_stops
_trip_stops = _get_trip_stops(trip_id)
# fetch the actual stop
# and replace the lat, lon & the name to parent one
for each_stop in _trip_stops:
actual_stop_id = each_stop['parent_id']
if actual_stop_id and len(actual_stop_id) > 0:
actual_stop = _all_stops[ actual_stop_id ]
# copy values from the parent to the stop for data file purposes
each_stop['stop_lat'] = actual_stop['stop_lat']
each_stop['stop_lon'] = actual_stop['stop_lon']
each_stop['stop_name'] = actual_stop['stop_name']
return _trip_stops
def _get_shape(shape_id):
sql = """
SELECT
s.shape_pt_lat AS lat,
s.shape_pt_lon AS lon,
s.shape_pt_sequence,
s.shape_dist_traveled AS dist
FROM
shapes s
WHERE
s.shape_id = ?
"""
lst = dbcursor.execute(sql,(shape_id,))
shapepts = lst.fetchall()
shapepts.sort(key=lambda x: x['shape_pt_sequence'])
return shapepts
pass
def _get_routes():
sql = """
SELECT
route_id,
route_short_name AS short_name,
route_long_name AS long_name,
route_type AS route_type
FROM routes
"""
lst = dbcursor.execute(sql)
routes = lst.fetchall()
routes.sort( key=lambda x: int( re.findall('(\d+)', x['route_id'])[0] ) )
return routes
def _write_to_file(fname,content):
if not os.path.exists(os.path.dirname(fname)):
os.makedirs(os.path.dirname(fname))
fh = open(fname,'w')
fh.write(content)
fh.close()
def _write_json(fname,obj):
_write_to_file(fname,json.dumps(obj,indent=4))
def _write_plist(fname,obj):
plistlib.writePlist(obj,fname)
# print point_in_amsterdam( (52.37022,4.89517) )
# sys.exit(-1)
if __name__ == '__main__':
allroutes = _get_routes()
# pprint( [ (e['route_id'], e['short_name']) for e in allroutes ])
# sys.exit(-1)
# pprint(allroutes)
# sys.exit(-1)
allstops = _get_all_stops()
stop_to_parent_map = { e['stop_id'] : e['parent_station'] for e in allstops.values() if len(e['parent_station']) > 0 }
# _write_json( 'json/%s_routes.json' % output_prefix, allroutes )
stop_routes_map = {}
valid_routes = []
# for each route
for i, route in enumerate(allroutes):
route_id = route['route_id']
print '%3d' % i, route_id
# get route_id -> trip_id -> all stops
trip = _get_top_trip(route_id)
if trip is not None:
trip_id = trip['trip_id']
else:
print 'skipping route(%s) trip(%s)' %( route_id, trip )
continue
"""First calculate shape information"""
shape_id = trip['shape_id']
trip_shape = _get_shape(shape_id)
trip_shape_min = [ (e['lat'],e['lon']) for e in trip_shape ]
shape_points_in_ams = map( point_in_amsterdam, trip_shape_min )
if sum(shape_points_in_ams) == 0:
print 'skipping route(%s) - outside amsterdam' %( route_id )
continue
"""Calculate trip stops information"""
trip_stops = _get_trip_stops_real(trip_id)
for each_trip_stop in trip_stops:
each_trip_stop_id = each_trip_stop['stop_id']
if not stop_routes_map.has_key(each_trip_stop_id):
stop_routes_map[ each_trip_stop_id ] = []
stop_routes_map[ each_trip_stop_id ].append( route_id )
# pprint(stop_routes_map)
# sys.exit(-1)
valid_routes.append( route )
route_fname = route_id.replace('|','_')
_write_json( 'json/stops/%s.json' % route_fname, trip_stops )
_write_json( 'json/shapes/%s.json' % route_fname, trip_shape_min )
_write_json( 'json/%s_routes.json' % output_prefix, valid_routes )
_write_json('json/%s_stop_routes_map.json' % output_prefix, stop_routes_map )
_write_plist('json/%s_stop_routes_map.plist' % output_prefix, stop_routes_map )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.