gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
.. _tutorial-deploy-model-on-rasp:
Deploy the Pretrained Model on Raspberry Pi
===========================================
**Author**: `Ziheng Jiang <https://ziheng.org/>`_, \
`Hiroyuki Makino <https://makihiro.github.io/>`_
This is an example of using Relay to compile a ResNet model and deploy
it on Raspberry Pi.
"""
import tvm
import tvm.relay as relay
from tvm import rpc
from tvm.contrib import util, graph_runtime as runtime
from tvm.contrib.download import download_testdata
######################################################################
# .. _build-tvm-runtime-on-device:
#
# Build TVM Runtime on Device
# ---------------------------
#
# The first step is to build the TVM runtime on the remote device.
#
# .. note::
#
# All instructions in both this section and next section should be
# executed on the target device, e.g. Raspberry Pi. And we assume it
# has Linux running.
#
# Since we do compilation on local machine, the remote device is only used
# for running the generated code. We only need to build tvm runtime on
# the remote device.
#
# .. code-block:: bash
#
# git clone --recursive https://github.com/dmlc/tvm
# cd tvm
# mkdir build
# cp cmake/config.cmake build
# cd build
# cmake ..
# make runtime -j4
#
# After building runtime successfully, we need to set environment varibles
# in :code:`~/.bashrc` file. We can edit :code:`~/.bashrc`
# using :code:`vi ~/.bashrc` and add the line below (Assuming your TVM
# directory is in :code:`~/tvm`):
#
# .. code-block:: bash
#
# export PYTHONPATH=$PYTHONPATH:~/tvm/python
#
# To update the environment variables, execute :code:`source ~/.bashrc`.
######################################################################
# Set Up RPC Server on Device
# ---------------------------
# To start an RPC server, run the following command on your remote device
# (Which is Raspberry Pi in our example).
#
# .. code-block:: bash
#
# python -m tvm.exec.rpc_server --host 0.0.0.0 --port=9090
#
# If you see the line below, it means the RPC server started
# successfully on your device.
#
# .. code-block:: bash
#
# INFO:root:RPCServer: bind to 0.0.0.0:9090
#
######################################################################
# Prepare the Pre-trained Model
# -----------------------------
# Back to the host machine, which should have a full TVM installed (with LLVM).
#
# We will use pre-trained model from
# `MXNet Gluon model zoo <https://mxnet.incubator.apache.org/api/python/gluon/model_zoo.html>`_.
# You can found more details about this part at tutorial :ref:`tutorial-from-mxnet`.
from mxnet.gluon.model_zoo.vision import get_model
from PIL import Image
import numpy as np
# one line to get the model
block = get_model('resnet18_v1', pretrained=True)
######################################################################
# In order to test our model, here we download an image of cat and
# transform its format.
img_url = 'https://github.com/dmlc/mxnet.js/blob/master/data/cat.png?raw=true'
img_name = 'cat.png'
img_path = download_testdata(img_url, img_name, module='data')
image = Image.open(img_path).resize((224, 224))
def transform_image(image):
image = np.array(image) - np.array([123., 117., 104.])
image /= np.array([58.395, 57.12, 57.375])
image = image.transpose((2, 0, 1))
image = image[np.newaxis, :]
return image
x = transform_image(image)
######################################################################
# synset is used to transform the label from number of ImageNet class to
# the word human can understand.
synset_url = ''.join(['https://gist.githubusercontent.com/zhreshold/',
'4d0b62f3d01426887599d4f7ede23ee5/raw/',
'596b27d23537e5a1b5751d2b0481ef172f58b539/',
'imagenet1000_clsid_to_human.txt'])
synset_name = 'imagenet1000_clsid_to_human.txt'
synset_path = download_testdata(synset_url, synset_name, module='data')
with open(synset_path) as f:
synset = eval(f.read())
######################################################################
# Now we would like to port the Gluon model to a portable computational graph.
# It's as easy as several lines.
# We support MXNet static graph(symbol) and HybridBlock in mxnet.gluon
shape_dict = {'data': x.shape}
mod, params = relay.frontend.from_mxnet(block, shape_dict)
# we want a probability so add a softmax operator
func = mod["main"]
func = relay.Function(func.params, relay.nn.softmax(func.body), None, func.type_params, func.attrs)
######################################################################
# Here are some basic data workload configurations.
batch_size = 1
num_classes = 1000
image_shape = (3, 224, 224)
data_shape = (batch_size,) + image_shape
######################################################################
# Compile The Graph
# -----------------
# To compile the graph, we call the :any:`relay.build` function
# with the graph configuration and parameters. However, You cannot to
# deploy a x86 program on a device with ARM instruction set. It means
# Relay also needs to know the compilation option of target device,
# apart from arguments :code:`net` and :code:`params` to specify the
# deep learning workload. Actually, the option matters, different option
# will lead to very different performance.
######################################################################
# If we run the example on our x86 server for demonstration, we can simply
# set it as :code:`llvm`. If running it on the Raspberry Pi, we need to
# specify its instruction set. Set :code:`local_demo` to False if you want
# to run this tutorial with a real device.
local_demo = True
if local_demo:
target = tvm.target.create('llvm')
else:
target = tvm.target.arm_cpu('rasp3b')
# The above line is a simple form of
# target = tvm.target.create('llvm -device=arm_cpu -model=bcm2837 -target=armv7l-linux-gnueabihf -mattr=+neon')
with relay.build_config(opt_level=3):
graph, lib, params = relay.build(func, target, params=params)
# After `relay.build`, you will get three return values: graph,
# library and the new parameter, since we do some optimization that will
# change the parameters but keep the result of model as the same.
# Save the library at local temporary directory.
tmp = util.tempdir()
lib_fname = tmp.relpath('net.tar')
lib.export_library(lib_fname)
######################################################################
# Deploy the Model Remotely by RPC
# --------------------------------
# With RPC, you can deploy the model remotely from your host machine
# to the remote device.
# obtain an RPC session from remote device.
if local_demo:
remote = rpc.LocalSession()
else:
# The following is my environment, change this to the IP address of your target device
host = '10.77.1.162'
port = 9090
remote = rpc.connect(host, port)
# upload the library to remote device and load it
remote.upload(lib_fname)
rlib = remote.load_module('net.tar')
# create the remote runtime module
ctx = remote.cpu(0)
module = runtime.create(graph, rlib, ctx)
# set parameter (upload params to the remote device. This may take a while)
module.set_input(**params)
# set input data
module.set_input('data', tvm.nd.array(x.astype('float32')))
# run
module.run()
# get output
out = module.get_output(0)
# get top1 result
top1 = np.argmax(out.asnumpy())
print('TVM prediction top-1: {}'.format(synset[top1]))
|
|
# -*- coding: utf-8 -*-
import time, re, HTMLParser
import hf, lxml, logging, datetime
from sqlalchemy import *
from lxml import etree
class dCacheMoverInfo(hf.module.ModuleBase):
config_keys = {
'watch_jobs': ('Colon separated list of the jobs to watch on the pools', ''),
'pool_match_regex': ('Watch only pools that match the given regular expression', 'rT_cms$'),
'critical_queue_threshold': ('Job is bad if the number of queued tasks exceeds the threshold', '6'),
'source': ('Download command for the qstat XML file', ''),
}
config_hint = ''
table_columns = [
Column('critical_queue_threshold', TEXT),
], []
subtable_columns = {
"summary": ([
Column('job', TEXT),
Column('active', INT),
Column('max', INT),
Column('queued', INT),
], []),
"info": ([
Column('pool', TEXT),
Column('domain', TEXT),
Column('job', TEXT),
Column('active', INT),
Column('max', INT),
Column('queued', INT),
], []),
}
def prepareAcquisition(self):
self.watch_jobs = self.config['watch_jobs'].split(',')
self.pool_match_regex = self.config['pool_match_regex']
self.critical_queue_threshold = self.config['critical_queue_threshold']
if 'source' not in self.config: raise hf.exceptions.ConfigError('source option not set')
self.source = hf.downloadService.addDownload(self.config['source'])
self.job_info_db_value_list = []
self.job_summary_db_value_list = []
def extractData(self):
data = {'critical_queue_threshold':self.critical_queue_threshold}
data['source_url'] = self.source.getSourceUrl()
class TableRowExtractor(HTMLParser.HTMLParser):
'''
Parse the HTML and extract all rows from the table.
The format is a list of rows, each row is a list with format [th?, class, data],
saved in the extractedRows attribute
'''
extractedRows = []
__currentRow = []
__curTag = ''
def handle_starttag(self, tag, attr):
self.__curTag = tag
if tag == "tr":
self.__currentRow = []
elif tag == 'td' or tag == 'th':
cssClass = ''
for a in attr:
if a[0] == 'class': cssClass = a[1]
self.__currentRow.append([tag == 'th', cssClass, ''])
def handle_endtag(self, tag):
if tag == "tr":
self.extractedRows.append(self.__currentRow)
self.__currentRow = []
def handle_data(self, data):
if data == '\n' or data == '\r\n' or data == '':
return
if self.__curTag == 'td' or self.__curTag == 'th':
self.__currentRow[len(self.__currentRow)-1][2] = data
def extractPools(rows):
'''
This applies 'filters' to the row-data to discard headlines, totals
and extract only pools that are interessting for us.
Return value is a dictionary of pools, key is pool name, value
is a tuple (domain, value-dict). Value-dict contains the data, key
is the transfer-type, value is a tripple (cur, max, queue)
'''
protocols = []
# extract all protocols (they are in the first row, starting at third column
for p in rows[0][2:]:
protocols.append(p[2])
pools = {}
for r in rows:
# Discard empty rows
if len(r) == 0: continue
# Discard all rows starting with a head
if r[0][0]: continue
# We now have data-rows only.
name, domain = r[0][2], r[1][2]
# Only CMS read-tape pools
if not re.search(self.pool_match_regex, name):
#print 'Discard', name
continue
values = {}
for i,proto in enumerate(protocols):
values[proto] = (int(r[i*3+2][2]), int(r[i*3+3][2]), int(r[i*3+4][2]))
pools[name] = (domain, values)
return pools
# now actually import the data
tableRowExtractor = TableRowExtractor()
for line in open(self.source.getTmpPath(), 'r'):
tableRowExtractor.feed(line)
pool_list = extractPools(tableRowExtractor.extractedRows)
num_queuing_pools = 0
has_critical_queue = False
job_transfers_sum = {} # calculate sums over all pools
for pool,value in pool_list.iteritems():
job_has_queue = False
# Add all the job-values that interesst us to database as a new row per job
for job in self.watch_jobs:
if not job in job_transfers_sum: job_transfers_sum[job] = [0, 0, 0]
job_info_db_values = {}
job_info_db_values['pool'] = pool
job_info_db_values['domain'] = value[0]
job_info_db_values['job'] = job
job_info_db_values['active'] = int(value[1][job][0])
job_info_db_values['max'] = int(value[1][job][1])
job_info_db_values['queued'] = int(value[1][job][2])
self.job_info_db_value_list.append(job_info_db_values)
job_transfers_sum[job][0] += job_info_db_values['active']
job_transfers_sum[job][1] += job_info_db_values['max']
job_transfers_sum[job][2] += job_info_db_values['queued']
if int(value[1][job][2]) > 0:
job_has_queue = True
elif int(value[1][job][2]) > self.critical_queue_threshold:
has_critical_queue = True
if job_has_queue:
num_queuing_pools += 1
# calculate happiness as ratio of queued pools to total pools,
# be sad if there is a critical queue
data['status'] = 1.0 - float(num_queuing_pools) / len(pool_list)
if has_critical_queue: data['status'] = 0.0
self.job_summary_db_value_list = [{'job':job, 'active':v[0], 'max':v[1], 'queued':v[2]} for job,v in job_transfers_sum.iteritems()]
return data
def fillSubtables(self, parent_id):
self.subtables['info'].insert().execute([dict(parent_id=parent_id, **row) for row in self.job_info_db_value_list])
self.subtables['summary'].insert().execute([dict(parent_id=parent_id, **row) for row in self.job_summary_db_value_list])
def getTemplateData(self):
data = hf.module.ModuleBase.getTemplateData(self)
info_list = self.subtables['info'].select().where(self.subtables['info'].c.parent_id==self.dataset['id']).execute().fetchall()
info_list = map(dict, info_list)
summary_list = self.subtables['summary'].select().where(self.subtables['summary'].c.parent_id==self.dataset['id']).execute().fetchall()
summary_list = map(dict, summary_list)
for i,group in enumerate(summary_list):
if group['queued'] >= int(self.dataset['critical_queue_threshold']):
group['status'] = 'critical'
elif group['queued'] > 0:
group['status'] = 'warning'
else:
group['status'] = 'ok'
for i,group in enumerate(info_list):
if group['queued'] >= int(self.dataset['critical_queue_threshold']):
group['status'] = 'critical'
elif group['queued'] > 0:
group['status'] = 'warning'
else:
group['status'] = 'ok'
data['summary_list'] = summary_list
poollist = []
for i,group in enumerate(info_list):
if not(group['pool'] in poollist):
poollist.append(group['pool'])
details_list = {}
for i,group in enumerate(poollist):
appending = {group:[]}
details_list.update(appending)
for i,group in enumerate(info_list):
details_list[group['pool']].append(group)
data['details_list'] = details_list
return data
|
|
import random
import arcade
import math
import numpy
# Adapted from this tutorial:
# http://gamedevelopment.tutsplus.com/tutorials/how-to-create-a-custom-2d-physics-engine-the-basics-and-impulse-resolution--gamedev-6331
# The tutorial has a lot of mistakes in the code, which is kind of annoying.
# Don't use this type of engine on a platformer
# http://higherorderfun.com/blog/2012/05/20/the-guide-to-implementing-2d-platformers/
class PhysicsObject:
def __init__(self, position, velocity, restitution, mass):
self.velocity = velocity
self.restitution = restitution
self.mass = mass
self.position = position # Vector
def _get_x(self):
return self.position[0]
x = property(_get_x)
def _get_y(self):
return self.position[1]
y = property(_get_y)
class Circle(PhysicsObject):
def __init__(self, position, velocity, restitution, mass, radius, color):
super().__init__(position, velocity, restitution, mass)
self.radius = radius
self.color = color
def draw(self):
arcade.draw_circle_filled(self.position[0], self.position[1],
self.radius, self.color)
class AABB(PhysicsObject):
def __init__(self, rect, velocity, restitution, mass, color):
super().__init__([rect[0], rect[1]], velocity, restitution, mass)
self.color = color
self.width = rect[2]
self.height = rect[3]
def draw(self):
arcade.draw_rect_filled(self.position[0], self.position[1], self.width,
self.height, self.color)
def _get_min(self):
return Vector(self.position.x, self.position.y)
min = property(_get_min)
def _get_max(self):
return Vector(self.position.x + self.width,
self.position.y + self.height)
max = property(_get_max)
def distanceA(a, b):
return math.sqrt((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)
class Manifold:
def __init__(self, a, b, penetration, normal):
self.a = a
self.b = b
self.penetration = penetration
self.normal = normal
def __str__(self):
return "Penetration: {}, Normal: {}".format(self.penetration,
self.normal)
def circle_vs_circle(m):
n = numpy.subtract(m.b.position.data, m.a.position.data)
r = m.a.radius + m.b.radius
r *= r
if r < (m.a.x - m.b.x) ** 2 + (m.a.y - m.b.y) ** 2:
return False
d = distanceA(m.a.position.data, m.b.position.data)
if d != 0:
# Distance is difference between radius and distance
m.penetration = r - d
# Utilize our d since we performed sqrt on it already within Length( )
# Points from A to B, and is a unit vector
m.normal = n / d
return True
else:
# Choose random (but consistent) values
m.penetration = m.a.radius
m.normal = [1, 0]
return True
def resolve_collision(m):
# Calculate relative velocity
rv = numpy.subtract(m.b.velocity, m.a.velocity)
# Calculate relative velocity in terms of the normal direction
velocity_along_normal = numpy.dot(rv, m.normal)
# Do not resolve if velocities are separating
if velocity_along_normal > 0:
# print("Separating:", velocity_along_normal)
# print(" Normal: ", m.normal)
# print(" Vel: ", m.b.velocity, m.a.velocity)
return False
# Calculate restitution
e = min(m.a.restitution, m.a.restitution)
# Calculate impulse scalar
j = -(1 + e) * velocity_along_normal
j /= 1 / m.a.mass + 1 / m.b.mass
# Apply impulse
impulse = numpy.multiply(j, m.normal)
# print("Before: ", m.a.velocity, m.b.velocity)
m.a.velocity = numpy.subtract(m.a.velocity,
numpy.multiply(1 / m.a.mass, impulse))
m.b.velocity = numpy.add(m.b.velocity,
numpy.multiply(1 / m.b.mass, impulse))
# print("After: ", m.a.velocity, m.b.velocity)
# print(" Normal: ", m.normal)
return True
def aabb_vs_aabb(m):
n = numpy.subtract(m.b.position, m.a.position)
abox = m.a
bbox = m.b
a_extent = (abox.width) / 2
b_extent = (bbox.width) / 2
x_overlap = a_extent + b_extent - abs(n[0])
if x_overlap > 0:
# Calculate half extents along x axis for each object
a_extent = (abox.height) / 2
b_extent = (bbox.height) / 2
# Calculate overlap on y axis
y_overlap = a_extent + b_extent - abs(n[1])
# SAT test on y axis
if y_overlap > 0:
# Find out which axis is axis of least penetration
if x_overlap < y_overlap:
# Point towards B knowing that n points from A to B
if n[0] < 0: # n.x
m.normal = [-1, 0]
else:
m.normal = [1, 0]
m.penetration = x_overlap
return True
else:
# Point toward B knowing that n points from A to B
if n[1] < 0: # n.y
m.normal = [0, -1]
else:
m.normal = [0, 1]
m.penetration = y_overlap
return True
return False
def clamp(a, min_value, max_value):
return max(min(a, max_value), min_value)
def magnitude(v):
return math.sqrt(sum(v[i]*v[i] for i in range(len(v))))
def add(u, v):
return [u[i]+v[i] for i in range(len(u))]
def sub(u, v):
return [u[i]-v[i] for i in range(len(u))]
def neg(u):
return [-u[i] for i in range(len(u))]
def dot(u, v):
return sum(u[i]*v[i] for i in range(len(u)))
def normalize(v):
vmag = magnitude(v)
return [v[i]/vmag for i in range(len(v))]
def aabb_vs_circle(m):
x_extent = m.a.width / 2
y_extent = m.a.height / 2
a_center = [m.a.x + x_extent, m.a.y - y_extent]
b_center = m.b.position
closestX = clamp(b_center[0],
m.a.position[0], m.a.position[0] + m.a.width)
closestY = clamp(b_center[1],
m.a.position[1] - m.a.height, m.a.position[1])
# Calculate the distance between the circle's center and this closest point
distanceX = b_center[0] - closestX
distanceY = b_center[1] - closestY
# If the distance is less than the circle's radius, an intersection occurs
distanceSquared = (distanceX * distanceX) + (distanceY * distanceY)
collision = distanceSquared < (m.b.radius * m.b.radius)
if not collision:
return False
# print("Bang")
d = distanceA(a_center, b_center)
# print(d)
if d == 0:
# Choose random (but consistent) values
m.penetration = m.b.radius
m.normal = [1, 0]
return True
else:
m.normal = neg(normalize(sub(a_center, b_center)))
return collision
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
import mxnet as mx
from mxnet.test_utils import *
from mxnet.base import MXNetError
import numpy as np
import os, gzip
import pickle as pickle
import time
try:
import h5py
except ImportError:
h5py = None
import sys
from common import assertRaises
import unittest
def test_MNISTIter():
# prepare data
get_mnist_ubyte()
batch_size = 100
train_dataiter = mx.io.MNISTIter(
image="data/train-images-idx3-ubyte",
label="data/train-labels-idx1-ubyte",
data_shape=(784,),
batch_size=batch_size, shuffle=1, flat=1, silent=0, seed=10)
# test_loop
nbatch = 60000 / batch_size
batch_count = 0
for batch in train_dataiter:
batch_count += 1
assert(nbatch == batch_count)
# test_reset
train_dataiter.reset()
train_dataiter.iter_next()
label_0 = train_dataiter.getlabel().asnumpy().flatten()
train_dataiter.iter_next()
train_dataiter.iter_next()
train_dataiter.iter_next()
train_dataiter.iter_next()
train_dataiter.reset()
train_dataiter.iter_next()
label_1 = train_dataiter.getlabel().asnumpy().flatten()
assert(sum(label_0 - label_1) == 0)
def test_Cifar10Rec():
get_cifar10()
dataiter = mx.io.ImageRecordIter(
path_imgrec="data/cifar/train.rec",
mean_img="data/cifar/cifar10_mean.bin",
rand_crop=False,
and_mirror=False,
shuffle=False,
data_shape=(3,28,28),
batch_size=100,
preprocess_threads=4,
prefetch_buffer=1)
labelcount = [0 for i in range(10)]
batchcount = 0
for batch in dataiter:
npdata = batch.data[0].asnumpy().flatten().sum()
sys.stdout.flush()
batchcount += 1
nplabel = batch.label[0].asnumpy()
for i in range(nplabel.shape[0]):
labelcount[int(nplabel[i])] += 1
for i in range(10):
assert(labelcount[i] == 5000)
def test_NDArrayIter():
data = np.ones([1000, 2, 2])
label = np.ones([1000, 1])
for i in range(1000):
data[i] = i / 100
label[i] = i / 100
dataiter = mx.io.NDArrayIter(data, label, 128, True, last_batch_handle='pad')
batchidx = 0
for batch in dataiter:
batchidx += 1
assert(batchidx == 8)
dataiter = mx.io.NDArrayIter(data, label, 128, False, last_batch_handle='pad')
batchidx = 0
labelcount = [0 for i in range(10)]
for batch in dataiter:
label = batch.label[0].asnumpy().flatten()
assert((batch.data[0].asnumpy()[:,0,0] == label).all())
for i in range(label.shape[0]):
labelcount[int(label[i])] += 1
for i in range(10):
if i == 0:
assert(labelcount[i] == 124)
else:
assert(labelcount[i] == 100)
def test_NDArrayIter_h5py():
if not h5py:
return
data = np.ones([1000, 2, 2])
label = np.ones([1000, 1])
for i in range(1000):
data[i] = i / 100
label[i] = i / 100
try:
os.remove("ndarraytest.h5")
except OSError:
pass
with h5py.File("ndarraytest.h5") as f:
f.create_dataset("data", data=data)
f.create_dataset("label", data=label)
dataiter = mx.io.NDArrayIter(f["data"], f["label"], 128, True, last_batch_handle='pad')
batchidx = 0
for batch in dataiter:
batchidx += 1
assert(batchidx == 8)
dataiter = mx.io.NDArrayIter(f["data"], f["label"], 128, False, last_batch_handle='pad')
labelcount = [0 for i in range(10)]
for batch in dataiter:
label = batch.label[0].asnumpy().flatten()
assert((batch.data[0].asnumpy()[:,0,0] == label).all())
for i in range(label.shape[0]):
labelcount[int(label[i])] += 1
try:
os.remove("ndarraytest.h5")
except OSError:
pass
for i in range(10):
if i == 0:
assert(labelcount[i] == 124)
else:
assert(labelcount[i] == 100)
def test_NDArrayIter_csr():
# creating toy data
num_rows = rnd.randint(5, 15)
num_cols = rnd.randint(1, 20)
batch_size = rnd.randint(1, num_rows)
shape = (num_rows, num_cols)
csr, _ = rand_sparse_ndarray(shape, 'csr')
dns = csr.asnumpy()
# CSRNDArray or scipy.sparse.csr_matrix with last_batch_handle not equal to 'discard' will throw NotImplementedError
assertRaises(NotImplementedError, mx.io.NDArrayIter, {'data': csr}, dns, batch_size)
try:
import scipy.sparse as spsp
train_data = spsp.csr_matrix(dns)
assertRaises(NotImplementedError, mx.io.NDArrayIter, {'data': train_data}, dns, batch_size)
except ImportError:
pass
# CSRNDArray with shuffle
csr_iter = iter(mx.io.NDArrayIter({'csr_data': csr, 'dns_data': dns}, dns, batch_size,
shuffle=True, last_batch_handle='discard'))
num_batch = 0
for batch in csr_iter:
num_batch += 1
assert(num_batch == num_rows // batch_size)
# make iterators
csr_iter = iter(mx.io.NDArrayIter(csr, csr, batch_size, last_batch_handle='discard'))
begin = 0
for batch in csr_iter:
expected = np.zeros((batch_size, num_cols))
end = begin + batch_size
expected[:num_rows - begin] = dns[begin:end]
if end > num_rows:
expected[num_rows - begin:] = dns[0:end - num_rows]
assert_almost_equal(batch.data[0].asnumpy(), expected)
begin += batch_size
def test_LibSVMIter():
def check_libSVMIter_synthetic():
cwd = os.getcwd()
data_path = os.path.join(cwd, 'data.t')
label_path = os.path.join(cwd, 'label.t')
with open(data_path, 'w') as fout:
fout.write('1.0 0:0.5 2:1.2\n')
fout.write('-2.0\n')
fout.write('-3.0 0:0.6 1:2.4 2:1.2\n')
fout.write('4 2:-1.2\n')
with open(label_path, 'w') as fout:
fout.write('1.0\n')
fout.write('-2.0 0:0.125\n')
fout.write('-3.0 2:1.2\n')
fout.write('4 1:1.0 2:-1.2\n')
data_dir = os.path.join(cwd, 'data')
data_train = mx.io.LibSVMIter(data_libsvm=data_path, label_libsvm=label_path,
data_shape=(3, ), label_shape=(3, ), batch_size=3)
first = mx.nd.array([[ 0.5, 0., 1.2], [ 0., 0., 0.], [ 0.6, 2.4, 1.2]])
second = mx.nd.array([[ 0., 0., -1.2], [ 0.5, 0., 1.2], [ 0., 0., 0.]])
i = 0
for batch in iter(data_train):
expected = first.asnumpy() if i == 0 else second.asnumpy()
data = data_train.getdata()
data.check_format(True)
assert_almost_equal(data.asnumpy(), expected)
i += 1
def check_libSVMIter_news_data():
news_metadata = {
'name': 'news20.t',
'origin_name': 'news20.t.bz2',
'url': "https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/gluon/dataset/news20.t.bz2",
'feature_dim': 62060 + 1,
'num_classes': 20,
'num_examples': 3993,
}
batch_size = 33
num_examples = news_metadata['num_examples']
data_dir = os.path.join(os.getcwd(), 'data')
get_bz2_data(data_dir, news_metadata['name'], news_metadata['url'],
news_metadata['origin_name'])
path = os.path.join(data_dir, news_metadata['name'])
data_train = mx.io.LibSVMIter(data_libsvm=path, data_shape=(news_metadata['feature_dim'],),
batch_size=batch_size)
for epoch in range(2):
num_batches = 0
for batch in data_train:
# check the range of labels
data = batch.data[0]
label = batch.label[0]
data.check_format(True)
assert(np.sum(label.asnumpy() > 20) == 0)
assert(np.sum(label.asnumpy() <= 0) == 0)
num_batches += 1
expected_num_batches = num_examples / batch_size
assert(num_batches == int(expected_num_batches)), num_batches
data_train.reset()
def check_libSVMIter_exception():
cwd = os.getcwd()
data_path = os.path.join(cwd, 'data.t')
label_path = os.path.join(cwd, 'label.t')
with open(data_path, 'w') as fout:
fout.write('1.0 0:0.5 2:1.2\n')
fout.write('-2.0\n')
# Below line has a neg indice. Should throw an exception
fout.write('-3.0 -1:0.6 1:2.4 2:1.2\n')
fout.write('4 2:-1.2\n')
with open(label_path, 'w') as fout:
fout.write('1.0\n')
fout.write('-2.0 0:0.125\n')
fout.write('-3.0 2:1.2\n')
fout.write('4 1:1.0 2:-1.2\n')
data_dir = os.path.join(cwd, 'data')
data_train = mx.io.LibSVMIter(data_libsvm=data_path, label_libsvm=label_path,
data_shape=(3, ), label_shape=(3, ), batch_size=3)
for batch in iter(data_train):
data_train.get_data().asnumpy()
check_libSVMIter_synthetic()
check_libSVMIter_news_data()
assertRaises(MXNetError, check_libSVMIter_exception)
def test_DataBatch():
from nose.tools import ok_
from mxnet.io import DataBatch
import re
batch = DataBatch(data=[mx.nd.ones((2,3))])
ok_(re.match('DataBatch: data shapes: \[\(2L?, 3L?\)\] label shapes: None', str(batch)))
batch = DataBatch(data=[mx.nd.ones((2,3)), mx.nd.ones((7,8))], label=[mx.nd.ones((4,5))])
ok_(re.match('DataBatch: data shapes: \[\(2L?, 3L?\), \(7L?, 8L?\)\] label shapes: \[\(4L?, 5L?\)\]', str(batch)))
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/7826")
def test_CSVIter():
def check_CSVIter_synthetic():
cwd = os.getcwd()
data_path = os.path.join(cwd, 'data.t')
label_path = os.path.join(cwd, 'label.t')
with open(data_path, 'w') as fout:
for i in range(1000):
fout.write(','.join(['1' for _ in range(8*8)]) + '\n')
with open(label_path, 'w') as fout:
for i in range(1000):
fout.write('0\n')
data_train = mx.io.CSVIter(data_csv=data_path, data_shape=(8,8),
label_csv=label_path, batch_size=100)
expected = mx.nd.ones((100, 8, 8))
for batch in iter(data_train):
assert_almost_equal(data_train.getdata().asnumpy(), expected.asnumpy())
check_CSVIter_synthetic()
if __name__ == "__main__":
test_NDArrayIter()
if h5py:
test_NDArrayIter_h5py()
test_MNISTIter()
test_Cifar10Rec()
test_LibSVMIter()
test_NDArrayIter_csr()
test_CSVIter()
|
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import collections
from qiime2.core.util import tuplize
from qiime2.core.type.collection import List, Set
from qiime2.core.type.primitive import Int, Float, Bool, Str
from qiime2.core.type.grammar import UnionExp, _ExpBase, IntersectionExp
from qiime2.core.type.parse import ast_to_type
def _strip_predicates(expr):
if isinstance(expr, UnionExp):
return UnionExp(_strip_predicates(m) for m in expr.members).normalize()
if hasattr(expr, 'fields'):
new_fields = tuple(_strip_predicates(f) for f in expr.fields)
return expr.duplicate(fields=new_fields, predicate=IntersectionExp())
def val_to_bool(value):
if type(value) is bool:
return value
elif str(value).lower() == 'true':
return True
elif str(value).lower() == 'false':
return False
else:
raise ValueError('Could not cast to bool')
def val_to_int(v):
type_ = type(v)
if type_ is int:
return v
elif type_ is str:
return int(v)
else:
raise ValueError('Could not cast to int')
def val_to_float(v):
type_ = type(v)
if type_ is float:
return v
elif type_ is str:
return float(v)
else:
raise ValueError('Could not cast to float')
VariadicRecord = collections.namedtuple('VariadicRecord', ['pytype', 'q2type'])
_VARIADIC = {
'List': VariadicRecord(pytype=list, q2type=List),
'Set': VariadicRecord(pytype=set, q2type=Set),
}
CoercionRecord = collections.namedtuple('CoercionRecord', ['func', 'pytype'])
# Beware visitor, order matters in this here mapper
_COERCION_MAPPER = {
Int: CoercionRecord(pytype=int, func=val_to_int),
Float: CoercionRecord(pytype=float, func=val_to_float),
Bool: CoercionRecord(pytype=bool, func=val_to_bool),
Str: CoercionRecord(pytype=str, func=str),
}
_COERCE_ERROR = ValueError(
'Could not coerce value based on expression provided.')
CollectionStyle = collections.namedtuple(
'CollectionStyle', ['style', 'members', 'view', 'expr', 'base'])
def _norm_input(t):
if type(t) is dict:
return ast_to_type(t)
elif not isinstance(t, _ExpBase):
raise TypeError("%r is not a QIIME 2 type" % (t,))
return t
def is_qiime_type(t):
try:
_norm_input(t)
except Exception:
return False
else:
return True
def is_primitive_type(t):
expr = _norm_input(t)
return hasattr(expr, 'kind') and expr.kind == 'primitive'
def is_metadata_type(t):
expr = _norm_input(t)
return is_primitive_type(t) and expr.name.startswith('Metadata')
def is_metadata_column_type(t):
expr = _norm_input(t)
return is_primitive_type(t) and expr.name.endswith('MetadataColumn')
def is_semantic_type(t):
expr = _norm_input(t)
return hasattr(expr, 'kind') and expr.kind == 'semantic-type'
def is_visualization_type(t):
expr = _norm_input(t)
return hasattr(expr, 'kind') and expr.kind == 'visualization'
def is_union(t):
expr = _norm_input(t)
return isinstance(expr, UnionExp)
def is_collection_type(t):
expr = _norm_input(t)
if expr.name in _VARIADIC:
return True
if is_union(expr):
for m in expr.members:
if m.name in _VARIADIC:
return True
return False
def interrogate_collection_type(t):
expr = _norm_input(t)
style = None # simple, monomorphic, composite, complex
members = None # T , [T1, T2] , [T1, T2], [[T1], [T2, T3]]
view = None # set, list
base = None
if expr.name in _VARIADIC:
view, base = _VARIADIC[expr.name]
field, = expr.fields
if isinstance(field, UnionExp):
style = 'composite'
members = list(field.members)
else:
style = 'simple'
members = field
elif isinstance(expr, UnionExp):
if expr.members[0].name in _VARIADIC:
members = []
for member in expr.members:
field, = member.fields
if isinstance(field, UnionExp):
style = 'complex'
members.append(list(field.members))
else:
members.append([field])
if style != 'complex':
style = 'monomorphic'
# use last iteration
view, base = _VARIADIC[member.name]
if style == 'monomorphic':
members = [m[0] for m in members]
return CollectionStyle(style=style, members=members, view=view,
expr=expr, base=base)
def _ordered_coercion(types):
types = tuple(types)
return tuple(k for k in _COERCION_MAPPER.keys() if k in types)
def _interrogate_types(allowed, value):
ordered_allowed = _ordered_coercion(allowed)
for coerce_type in (_COERCION_MAPPER[x].func for x in ordered_allowed):
try:
return coerce_type(value)
except ValueError:
pass
raise _COERCE_ERROR
def parse_primitive(t, value):
expr = _norm_input(t)
result = []
allowed = None
homogeneous = True
if is_metadata_type(expr):
raise ValueError('%r may not be parsed with this util.' % (expr,))
expr = _strip_predicates(expr)
collection_style = interrogate_collection_type(expr)
if collection_style.style in ('simple', 'monomorphic', 'composite'):
allowed = list(collection_style.members)
if collection_style.style == 'composite':
homogeneous = False
elif collection_style.style == 'complex':
# Sort here so that we can start with any simple lists in the memberset
for subexpr in sorted(collection_style.members, key=len):
expr = collection_style.base[UnionExp(subexpr)]
try:
return parse_primitive(expr, value)
except ValueError:
pass
raise _COERCE_ERROR
elif collection_style.style is None:
value = tuplize(value)
if expr in (Int, Float, Bool, Str):
# No sense in walking over all options when we know
# what it should be
allowed = [expr]
else:
allowed = list(_COERCION_MAPPER.keys())
else:
pass
assert allowed is not None
# Int <= Float, make sure its added in
if Float in allowed and Int not in allowed:
allowed.append(Int)
for v in value:
result.append(_interrogate_types(allowed, v))
# Some exprs require homogeneous values, make it so
if homogeneous:
all_matching = False
for member in allowed:
if all(type(x) == _COERCION_MAPPER[member].pytype
for x in result):
all_matching = True
break
if not all_matching and collection_style.style == 'monomorphic':
for subexpr in allowed:
expr = collection_style.base[subexpr]
try:
return parse_primitive(expr, value)
except ValueError:
pass
raise _COERCE_ERROR
if collection_style.view is None:
return result[0]
else:
return collection_style.view(result)
|
|
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
from pwd import getpwnam
import shutil
import sys
# TODO(rhallisey): add docstring.
logging.basicConfig()
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.INFO)
def validate_config(config):
required_keys = {'source', 'dest', 'owner', 'perm'}
if 'command' not in config:
LOG.error('Config is missing required "command" key')
sys.exit(1)
# Validate config sections
for data in config.get('config_files', list()):
# Verify required keys exist. Only 'source' and 'dest' are
# required. 'owner' and 'perm' should user system defaults if not
# specified
if not data.viewkeys() >= required_keys:
LOG.error('Config is missing required keys: {}'.format(data))
sys.exit(1)
def validate_source(data):
source = data.get('source')
if not os.path.exists(source):
if data.get('optional'):
LOG.warn('{} does not exist, but is not required'.format(source))
return False
else:
LOG.error('The source to copy does not exist: {}'.format(source))
sys.exit(1)
return True
def copy_files(data):
dest = data.get('dest')
source = data.get('source')
if os.path.exists(dest):
LOG.info('Removing existing destination: {}'.format(dest))
if os.path.isdir(dest):
shutil.rmtree(dest)
else:
os.remove(dest)
if os.path.isdir(source):
source_path = source
dest_path = dest
else:
source_path = os.path.dirname(source)
dest_path = os.path.dirname(dest)
if not os.path.exists(dest_path):
LOG.info('Creating dest parent directory: {}'.format(dest_path))
os.makedirs(dest_path)
if source != source_path:
# Source is file
LOG.info('Copying {} to {}'.format(source, dest))
shutil.copy(source, dest)
else:
# Source is a directory
for src in os.listdir(source_path):
LOG.info('Copying {} to {}'.format(
os.path.join(source_path, src), dest_path))
if os.path.isdir(src):
shutil.copytree(os.path.join(source_path, src), dest_path)
else:
shutil.copy(os.path.join(source_path, src), dest_path)
def set_permissions(data):
def set_perms(file_, uid, guid, perm):
LOG.info('Setting permissions for {}'.format(file_))
# Give config file proper perms.
try:
os.chown(file_, uid, gid)
except OSError as e:
LOG.error('While trying to chown {} received error: {}'.format(
file_, e))
sys.exit(1)
try:
os.chmod(file_, perm)
except OSError as e:
LOG.error('While trying to chmod {} received error: {}'.format(
file_, e))
sys.exit(1)
dest = data.get('dest')
owner = data.get('owner')
perm = int(data.get('perm'), 0)
# Check for user and group id in the environment.
try:
uid = getpwnam(owner).pw_uid
except KeyError:
LOG.error('The specified user does not exist: {}'.format(owner))
sys.exit(1)
try:
gid = getpwnam(owner).pw_gid
except KeyError:
LOG.error('The specified group does not exist: {}'.format(owner))
sys.exit(1)
# Set permissions on the top level dir or file
set_perms(dest, uid, gid, perm)
if os.path.isdir(dest):
# Recursively set permissions
for root, dirs, files in os.walk(dest):
for dir_ in dirs:
set_perms(os.path.join(root, dir_), uid, gid, perm)
for file_ in files:
set_perms(os.path.join(root, file_), uid, gid, perm)
def load_config():
config_file = '/var/lib/kolla/config_files/config.json'
LOG.info('Loading config file at {}'.format(config_file))
# Attempt to read config file
with open(config_file) as f:
try:
config = json.load(f)
except ValueError:
LOG.error('Invalid json file found at {}'.format(config_file))
sys.exit(1)
except IOError as e:
LOG.error('Could not read file {}. Failed with error {}'.format(
config_file, e))
sys.exit(1)
LOG.info('Validating config file')
validate_config(config)
if 'config_files' in config:
LOG.info('Copying service configuration files')
for data in config['config_files']:
if validate_source(data):
copy_files(data)
set_permissions(data)
else:
LOG.debug('No files to copy found in config')
LOG.info('Writing out command to execute')
LOG.debug('Command is: {}'.format(config['command']))
# The value from the 'command' key will be written to '/run_command'
with open('/run_command', 'w+') as f:
f.write(config['command'])
def execute_config_strategy():
try:
config_strategy = os.environ.get("KOLLA_CONFIG_STRATEGY")
LOG.info('Kolla config strategy set to: {}'.format(config_strategy))
except KeyError:
LOG.error("KOLLA_CONFIG_STRATEGY is not set properly.")
sys.exit(1)
if config_strategy == "COPY_ALWAYS":
load_config()
elif config_strategy == "COPY_ONCE":
if os.path.exists('/configured'):
LOG.info("The config strategy prevents copying new configs")
sys.exit(0)
else:
load_config()
f = open('/configured', 'w+')
f.close()
else:
LOG.error('KOLLA_CONFIG_STRATEGY is not set properly')
sys.exit(1)
def main():
execute_config_strategy()
return 0
if __name__ == "__main__":
sys.exit(main())
|
|
###########################################
# VCZ calibration (coarse landscape) FLUX dance 1
###########################################
file_cfg = gc.generate_config(in_filename=input_file,
out_filename=config_fn,
mw_pulse_duration=20,
ro_duration=2200,
flux_pulse_duration=60,
init_duration=200000)
# set CZ parameters
flux_lm_X3.cfg_awg_channel_amplitude(0.28500000000000003)
flux_lm_X3.vcz_amp_dac_at_11_02_NE(.5)
flux_lm_D8.vcz_amp_dac_at_11_02_SW(0)
flux_lm_D6.cfg_awg_channel_amplitude(0.19302332066356387)
flux_lm_D6.vcz_amp_dac_at_11_02_SW(.5)
flux_lm_X2.vcz_amp_dac_at_11_02_NE(0)
flux_lm_X1.cfg_awg_channel_amplitude(0.25166666666666665)
flux_lm_X1.vcz_amp_dac_at_11_02_NE(.5)
flux_lm_D2.vcz_amp_dac_at_11_02_SW(0)
# Set park parameters
flux_lm_D7.cfg_awg_channel_amplitude(.21)
flux_lm_Z4.cfg_awg_channel_amplitude(.19)
flux_lm_Z1.cfg_awg_channel_amplitude(.21)
flux_lm_D1.cfg_awg_channel_amplitude(.235)
flux_lm_D7.park_amp(.5)
flux_lm_Z4.park_amp(.5)
flux_lm_Z1.park_amp(.5)
flux_lm_D1.park_amp(.5)
flux_lm_D7.park_double_sided(True)
flux_lm_Z4.park_double_sided(True)
flux_lm_Z1.park_double_sided(True)
flux_lm_D1.park_double_sided(True)
device.ro_acq_averages(1024)
device.ro_acq_digitized(False)
device.ro_acq_weight_type('optimal')
device.prepare_fluxing(qubits=['D7', 'Z4', 'Z1', 'D1'])
device.prepare_for_timedomain(qubits=['X3', 'D8', 'D6', 'X2', 'X1', 'D2'])
pairs = [['X3', 'D8'], ['D6', 'X2'], ['X1', 'D2']]
parked_qubits = ['D7', 'Z1', 'Z4', 'D1']
from pycqed.measurement import cz_cost_functions as cf
conv_cost_det = det.Function_Detector(
get_function=cf.conventional_CZ_cost_func2,
msmt_kw={'device': device,
'MC': MC,
'pairs' : pairs,
'parked_qbs': parked_qubits,
'prepare_for_timedomain': False,
'disable_metadata': True,
'extract_only': True,
'disable_metadata': True,
'flux_codeword': 'flux-dance-1',
'parked_qubit_seq': 'ground',
'include_single_qubit_phase_in_cost': False,
'target_single_qubit_phase': 360,
'include_leakage_in_cost': True,
'target_phase': 180,
'cond_phase_weight_factor': 2},
value_names=[f'cost_function_val_{pair}' for pair in pairs ] +
[f'delta_phi_{pair}' for pair in pairs ] +
[f'missing_fraction_{pair}' for pair in pairs ],
result_keys=[f'cost_function_val_{pair}' for pair in pairs ] +
[f'delta_phi_{pair}' for pair in pairs ] +
[f'missing_fraction_{pair}' for pair in pairs ],
value_units=['a.u.' for pair in pairs ] +
['deg' for pair in pairs ] +
['%' for pair in pairs ])
Sw_functions = [ swf.FLsweep(flux_lm_X3, flux_lm_X3.vcz_amp_sq_NE, 'cz_NE'),
swf.FLsweep(flux_lm_D6, flux_lm_D6.vcz_amp_sq_SW, 'cz_SW'),
swf.FLsweep(flux_lm_X1, flux_lm_X1.vcz_amp_sq_NE, 'cz_NE') ]
swf1 = swf.multi_sweep_function(Sw_functions, sweep_point_ratios= [1.2/3, 1, 1.2/3])
swf2 = swf.flux_t_middle_sweep(fl_lm_tm = [flux_lm_X3, flux_lm_D8,
flux_lm_D6, flux_lm_X2,
flux_lm_X1, flux_lm_D2],
which_gate= ['NE', 'SW',
'SW', 'NE',
'NE', 'SW'],
fl_lm_park = [flux_lm_Z1, flux_lm_D7, flux_lm_Z4, flux_lm_D1],
speed_limit = [2.9583333333333334e-08, 2.75e-08, 2.75e-08])
# swf2.set_parameter(5)
# plt.plot(flux_lm_D5._wave_dict['cz_SE'], label='D5')
# plt.plot(flux_lm_X3._wave_dict['cz_NW'], label='X3')
# plt.plot(flux_lm_X2._wave_dict['cz_NW'], label='X2')
# plt.plot(flux_lm_D7._wave_dict['cz_SE'], label='D7')
# plt.plot(flux_lm_Z1._wave_dict['park'], label='Z1')
# plt.plot(flux_lm_Z1._wave_dict['park'], label='Z4')
# plt.plot(flux_lm_Z1._wave_dict['park'], label='D8')
# plt.axhline(.5, color='k', ls='--', alpha=.25)
# plt.legend()
# plt.show()
nested_MC.set_sweep_function(swf1)
nested_MC.set_sweep_function_2D(swf2)
nested_MC.set_sweep_points(np.linspace(.95, 1.05, 21))
nested_MC.set_sweep_points_2D(np.linspace(0, 10, 11)[::1])
nested_MC.cfg_clipping_mode(True)
label = 'VCZ_2D_{}_tm{}'.format(pairs, ' sweep')
nested_MC.set_detector_function(conv_cost_det)
result = nested_MC.run(label, mode='2D')
try:
ma2.Conditional_Oscillation_Heatmap_Analysis(label=label)
except Exception:
print('Failed Analysis')
###########################################
# VCZ calibration (coarse landscape) FLUX dance 2
###########################################
file_cfg = gc.generate_config(in_filename=input_file,
out_filename=config_fn,
mw_pulse_duration=20,
ro_duration=2200,
flux_pulse_duration=60,
init_duration=200000)
# set CZ parameters
flux_lm_X3.cfg_awg_channel_amplitude(0.3242724012703858)
flux_lm_X3.vcz_amp_dac_at_11_02_NW(.5)
flux_lm_D7.vcz_amp_dac_at_11_02_SE(0)
flux_lm_D5.cfg_awg_channel_amplitude(0.16687470158591108)
flux_lm_D5.vcz_amp_dac_at_11_02_SE(.5)
flux_lm_X2.vcz_amp_dac_at_11_02_NW(0)
flux_lm_X1.cfg_awg_channel_amplitude(0.27975182997855896)
flux_lm_X1.vcz_amp_dac_at_11_02_NW(.5)
flux_lm_D1.vcz_amp_dac_at_11_02_SE(0)
# Set park parameters
flux_lm_D8.cfg_awg_channel_amplitude(.22)
flux_lm_Z4.cfg_awg_channel_amplitude(.19)
flux_lm_Z1.cfg_awg_channel_amplitude(.21)
flux_lm_D2.cfg_awg_channel_amplitude(.225)
flux_lm_D8.park_amp(.5)
flux_lm_Z4.park_amp(.5)
flux_lm_Z1.park_amp(.5)
flux_lm_D2.park_amp(.5)
flux_lm_D8.park_double_sided(True)
flux_lm_Z4.park_double_sided(True)
flux_lm_Z1.park_double_sided(True)
flux_lm_D2.park_double_sided(True)
device.ro_acq_averages(1024)
device.ro_acq_digitized(False)
device.ro_acq_weight_type('optimal')
device.prepare_fluxing(qubits=['D8', 'Z4', 'Z1', 'D2'])
device.prepare_for_timedomain(qubits=['X3', 'D7', 'D5', 'X2', 'X1', 'D1'])
pairs = [['X3', 'D7'], ['D5', 'X2'], ['X1', 'D1']]
parked_qubits = ['D8', 'Z1', 'Z4', 'D2']
from pycqed.measurement import cz_cost_functions as cf
conv_cost_det = det.Function_Detector(
get_function=cf.conventional_CZ_cost_func2,
msmt_kw={'device': device,
'MC': MC,
'pairs' : pairs,
'parked_qbs': parked_qubits,
'prepare_for_timedomain': False,
'disable_metadata': True,
'extract_only': True,
'disable_metadata': True,
'flux_codeword': 'flux-dance-2',
'parked_qubit_seq': 'ground',
'include_single_qubit_phase_in_cost': False,
'target_single_qubit_phase': 360,
'include_leakage_in_cost': True,
'target_phase': 180,
'cond_phase_weight_factor': 2},
value_names=[f'cost_function_val_{pair}' for pair in pairs ] +
[f'delta_phi_{pair}' for pair in pairs ] +
[f'missing_fraction_{pair}' for pair in pairs ],
result_keys=[f'cost_function_val_{pair}' for pair in pairs ] +
[f'delta_phi_{pair}' for pair in pairs ] +
[f'missing_fraction_{pair}' for pair in pairs ],
value_units=['a.u.' for pair in pairs ] +
['deg' for pair in pairs ] +
['%' for pair in pairs ])
Sw_functions = [ swf.FLsweep(flux_lm_X3, flux_lm_X3.vcz_amp_sq_NW, 'cz_NW'),
swf.FLsweep(flux_lm_D5, flux_lm_D5.vcz_amp_sq_SE, 'cz_SE'),
swf.FLsweep(flux_lm_X1, flux_lm_X1.vcz_amp_sq_NW, 'cz_NW') ]
swf1 = swf.multi_sweep_function(Sw_functions, sweep_point_ratios= [1.2/3, 1, 1.2/3])
swf2 = swf.flux_t_middle_sweep(fl_lm_tm = [flux_lm_X3, flux_lm_D7,
flux_lm_D5, flux_lm_X2,
flux_lm_X1, flux_lm_D1],
which_gate= ['NW', 'SE',
'SE', 'NW',
'NW', 'SE'],
fl_lm_park = [flux_lm_Z1, flux_lm_D8, flux_lm_Z4, flux_lm_D2],
speed_limit = [2.9583333333333334e-08, 2.4166666666666668e-08, 2.5416666666666666e-08])
# swf2.set_parameter(5)
# plt.plot(flux_lm_X4._wave_dict['cz_SE'], label='X4')
# plt.plot(flux_lm_D9._wave_dict['cz_NW'], label='D9')
# plt.plot(flux_lm_D5._wave_dict['cz_NW'], label='D5')
# plt.plot(flux_lm_X3._wave_dict['cz_SE'], label='X3')
# plt.plot(flux_lm_X2._wave_dict['cz_NW'], label='X2')
# plt.plot(flux_lm_D3._wave_dict['cz_SE'], label='D3')
# plt.plot(flux_lm_Z1._wave_dict['park'], label='Z1')
# plt.plot(flux_lm_Z1._wave_dict['park'], label='Z4')
# plt.plot(flux_lm_Z1._wave_dict['park'], label='D8')
# plt.axhline(.5, color='k', ls='--', alpha=.25)
# plt.legend()
# plt.show()
nested_MC.set_sweep_function(swf1)
nested_MC.set_sweep_function_2D(swf2)
nested_MC.set_sweep_points(np.linspace(.95, 1.05, 21))
nested_MC.set_sweep_points_2D(np.linspace(0, 10, 11)[::1])
nested_MC.cfg_clipping_mode(True)
label = 'VCZ_2D_{}_tm{}'.format(pairs, ' sweep')
nested_MC.set_detector_function(conv_cost_det)
result = nested_MC.run(label, mode='2D')
try:
ma2.Conditional_Oscillation_Heatmap_Analysis(label=label)
except Exception:
print('Failed Analysis')
coha = ma2.Conditional_Oscillation_Heatmap_Analysis(
label="223142_VCZ_2D_[['X3', 'D7'], ['D5', 'X2'], ['X1', 'D1']]_fine_sweep",
for_multi_CZ = True,
pair = {'pair_name':['X3','D7'],'sweep_ratio':[1.2/3,1],'pair_num':0},
close_figs=True,
extract_only=False,
plt_orig_pnts=True,
plt_contour_L1=False,
plt_contour_phase=True,
plt_optimal_values=True,
plt_optimal_values_max=1,
find_local_optimals=True,
plt_clusters=False,
cluster_from_interp=False,
clims={
"Cost func": [0., 300],
"missing fraction": [0, 30],
"offset difference": [0, 30]
},
target_cond_phase=180,
phase_thr=15,
L1_thr=5,
clustering_thr=0.15,
gen_optima_hulls=True,
hull_L1_thr=4,
hull_phase_thr=20,
plt_optimal_hulls=True,
save_cond_phase_contours=[180],
)
###########################################
# VCZ calibration (coarse landscape) FLUX dance 3
###########################################
file_cfg = gc.generate_config(in_filename=input_file,
out_filename=config_fn,
mw_pulse_duration=20,
ro_duration=2200,
flux_pulse_duration=60,
init_duration=200000)
# set CZ parameters
flux_lm_X4.cfg_awg_channel_amplitude(0.2658333333333333)
flux_lm_X4.vcz_amp_dac_at_11_02_SE(.5)
flux_lm_D9.vcz_amp_dac_at_11_02_NW(0)
flux_lm_D5.cfg_awg_channel_amplitude(0.2)
flux_lm_D5.vcz_amp_dac_at_11_02_NW(.5)
flux_lm_X3.vcz_amp_dac_at_11_02_SE(0)
flux_lm_X2.cfg_awg_channel_amplitude(0.316)
flux_lm_X2.vcz_amp_dac_at_11_02_SE(.5)
flux_lm_D3.vcz_amp_dac_at_11_02_NW(0)
# Set park parameters
flux_lm_D8.cfg_awg_channel_amplitude(.22)
flux_lm_Z4.cfg_awg_channel_amplitude(.19)
flux_lm_Z1.cfg_awg_channel_amplitude(.21)
flux_lm_D2.cfg_awg_channel_amplitude(.225)
flux_lm_D8.park_amp(.5)
flux_lm_Z4.park_amp(.5)
flux_lm_Z1.park_amp(.5)
flux_lm_D2.park_amp(.5)
flux_lm_D8.park_double_sided(True)
flux_lm_Z4.park_double_sided(True)
flux_lm_Z1.park_double_sided(True)
flux_lm_D2.park_double_sided(True)
# flux-dance 3
## input from user besides cfg amps & speedlimt & flux-danace code
pairs = [['X4', 'D9'], ['D5', 'X3'], ['X2', 'D3']]
which_gate= [['SE', 'NW'],['NW', 'SE'], ['SE', 'NW']]
parked_qubits = ['D8', 'Z1', 'Z4', 'D2']
## processed
flux_lms_target = [device.find_instrument("flux_lm_{}".format(pair[0]))\
for pair in pairs]
flux_lms_control = [device.find_instrument("flux_lm_{}".format(pair[1]))\
for pair in pairs]
flux_lms_park = [device.find_instrument("flux_lm_{}".format(qb))\
for qb in parked_qubits]
list_qubits_used = np.asarray(pairs).flatten().tolist()
which_gates = np.asarray(which_gate).flatten().tolist()
device.ro_acq_averages(1024)
device.ro_acq_digitized(False)
device.ro_acq_weight_type('optimal')
device.prepare_fluxing(qubits=parked_qubits)
device.prepare_for_timedomain(qubits=list_qubits_used)
from pycqed.measurement import cz_cost_functions as cf
conv_cost_det = det.Function_Detector(
get_function=cf.conventional_CZ_cost_func2,
msmt_kw={'device': device,
'MC': MC,
'pairs' : pairs,
'parked_qbs': parked_qubits,
'prepare_for_timedomain': False,
'disable_metadata': True,
'extract_only': True,
'disable_metadata': True,
'flux_codeword': 'flux-dance-3',
'parked_qubit_seq': 'ground',
'include_single_qubit_phase_in_cost': False,
'target_single_qubit_phase': 360,
'include_leakage_in_cost': True,
'target_phase': 180,
'cond_phase_weight_factor': 2},
value_names=[f'cost_function_val_{pair}' for pair in pairs ] +
[f'delta_phi_{pair}' for pair in pairs ] +
[f'missing_fraction_{pair}' for pair in pairs ],
result_keys=[f'cost_function_val_{pair}' for pair in pairs ] +
[f'delta_phi_{pair}' for pair in pairs ] +
[f'missing_fraction_{pair}' for pair in pairs ],
value_units=['a.u.' for pair in pairs ] +
['deg' for pair in pairs ] +
['%' for pair in pairs ])
Sw_functions = [swf.FLsweep(flux_lm_target, flux_lm_target.parameters['vcz_amp_sq_{}'.format(gate[0])],
'cz_{}'.format(gate[0])) for flux_lm_target, gate in \
zip(flux_lms_target,which_gate)]
swf1 = swf.multi_sweep_function(Sw_functions, sweep_point_ratios= [.5, 1, .2])
swf2 = swf.flux_t_middle_sweep(fl_lm_tm = [device.find_instrument("flux_lm_{}".format(qubit))\
for qubit in list_qubits_used],
which_gate= which_gates,
fl_lm_park = flux_lms_park,
speed_limit = [2.75e-08, 2.75e-08, 2.75e-8]) # input
nested_MC.set_sweep_function(swf1)
nested_MC.set_sweep_function_2D(swf2)
nested_MC.set_sweep_points(np.linspace(.95, 1.05, 31))
nested_MC.set_sweep_points_2D(np.linspace(0, 10, 11)[::1])
nested_MC.cfg_clipping_mode(True)
label = 'VCZ_2D_{}_tm{}'.format(pairs, ' sweep')
nested_MC.set_detector_function(conv_cost_det)
result = nested_MC.run(label, mode='2D')
try:
ma2.Conditional_Oscillation_Heatmap_Analysis(label=label)
except Exception:
print('Failed Analysis')
###########################################
# VCZ calibration (coarse landscape) FLUX dance 4
###########################################
file_cfg = gc.generate_config(in_filename=input_file,
out_filename=config_fn,
mw_pulse_duration=20,
ro_duration=2200,
flux_pulse_duration=60,
init_duration=200000)
# set CZ parameters
flux_lm_X4.cfg_awg_channel_amplitude(0.261)
flux_lm_X4.vcz_amp_dac_at_11_02_SW(.5)
flux_lm_D8.vcz_amp_dac_at_11_02_NE(0)
flux_lm_D4.cfg_awg_channel_amplitude(0.201)
flux_lm_D4.vcz_amp_dac_at_11_02_NE(.5)
flux_lm_X3.vcz_amp_dac_at_11_02_SW(0)
flux_lm_X2.cfg_awg_channel_amplitude(0.31174999999999997)
flux_lm_X2.vcz_amp_dac_at_11_02_SW(.5)
flux_lm_D2.vcz_amp_dac_at_11_02_NE(0)
# Set park parameters
flux_lm_D9.cfg_awg_channel_amplitude(.206)
flux_lm_Z3.cfg_awg_channel_amplitude(.214)
flux_lm_Z1.cfg_awg_channel_amplitude(.21)
flux_lm_D3.cfg_awg_channel_amplitude(.223)
flux_lm_D9.park_amp(.5)
flux_lm_Z3.park_amp(.5)
flux_lm_Z1.park_amp(.5)
flux_lm_D3.park_amp(.5)
flux_lm_D9.park_double_sided(True)
flux_lm_Z3.park_double_sided(True)
flux_lm_Z1.park_double_sided(True)
flux_lm_D3.park_double_sided(True)
# flux-dance 4
## input from user besides cfg amps & speedlimt & flux-danace code word
pairs = [['X4', 'D8'], ['D4', 'X3'], ['X2', 'D2']]
which_gate= [['SW', 'NE'],['NE', 'SW'], ['SW', 'NE']]
parked_qubits = ['D9', 'Z1', 'Z3', 'D3']
## processed
flux_lms_target = [device.find_instrument("flux_lm_{}".format(pair[0]))\
for pair in pairs]
flux_lms_control = [device.find_instrument("flux_lm_{}".format(pair[1]))\
for pair in pairs]
flux_lms_park = [device.find_instrument("flux_lm_{}".format(qb))\
for qb in parked_qubits]
list_qubits_used = np.asarray(pairs).flatten().tolist()
which_gates = np.asarray(which_gate).flatten().tolist()
device.ro_acq_averages(1024)
device.ro_acq_digitized(False)
device.ro_acq_weight_type('optimal')
device.prepare_fluxing(qubits=parked_qubits)
device.prepare_for_timedomain(qubits=list_qubits_used)
from pycqed.measurement import cz_cost_functions as cf
conv_cost_det = det.Function_Detector(
get_function=cf.conventional_CZ_cost_func2,
msmt_kw={'device': device,
'MC': MC,
'pairs' : pairs,
'parked_qbs': parked_qubits,
'prepare_for_timedomain': False,
'disable_metadata': True,
'extract_only': True,
'disable_metadata': True,
'flux_codeword': 'flux-dance-4',
'parked_qubit_seq': 'ground',
'include_single_qubit_phase_in_cost': False,
'target_single_qubit_phase': 360,
'include_leakage_in_cost': True,
'target_phase': 180,
'cond_phase_weight_factor': 2},
value_names=[f'cost_function_val_{pair}' for pair in pairs ] +
[f'delta_phi_{pair}' for pair in pairs ] +
[f'missing_fraction_{pair}' for pair in pairs ],
result_keys=[f'cost_function_val_{pair}' for pair in pairs ] +
[f'delta_phi_{pair}' for pair in pairs ] +
[f'missing_fraction_{pair}' for pair in pairs ],
value_units=['a.u.' for pair in pairs ] +
['deg' for pair in pairs ] +
['%' for pair in pairs ])
Sw_functions = [swf.FLsweep(flux_lm_target, flux_lm_target.parameters['vcz_amp_sq_{}'.format(gate[0])],
'cz_{}'.format(gate[0])) for flux_lm_target, gate in \
zip(flux_lms_target,which_gate)]
swf1 = swf.multi_sweep_function(Sw_functions, sweep_point_ratios= [.6, 1.8, 1.2/3])
swf2 = swf.flux_t_middle_sweep(fl_lm_tm = [device.find_instrument("flux_lm_{}".format(qubit))\
for qubit in list_qubits_used],
which_gate= which_gates,
fl_lm_park = flux_lms_park,
speed_limit = [2.75e-08, 2.78e-8,2.75e-08]) # input
nested_MC.set_sweep_function(swf1)
nested_MC.set_sweep_function_2D(swf2)
nested_MC.set_sweep_points(np.linspace(.985, 1.005, 31))
nested_MC.set_sweep_points_2D(np.linspace(0, 10, 11)[::-1])
nested_MC.cfg_clipping_mode(True)
label = 'VCZ_2D_{}_tm{}'.format(pairs, ' sweep')
nested_MC.set_detector_function(conv_cost_det)
result = nested_MC.run(label, mode='2D')
try:
ma2.Conditional_Oscillation_Heatmap_Analysis(label=label)
except Exception:
print('Failed Analysis')
###########################################
# VCZ calibration (coarse landscape) FLUX dance 4
###########################################
file_cfg = gc.generate_config(in_filename=input_file,
out_filename=config_fn,
mw_pulse_duration=20,
ro_duration=2200,
flux_pulse_duration=60,
init_duration=200000)
# set CZ parameters
flux_lm_D4.cfg_awg_channel_amplitude(0.201)
flux_lm_D4.vcz_amp_dac_at_11_02_NE(.5)
flux_lm_X3.vcz_amp_dac_at_11_02_SW(0)
# Set park parameters
flux_lm_Z3.cfg_awg_channel_amplitude(.3)#(.214)
flux_lm_Z1.cfg_awg_channel_amplitude(.3)#(.21)
flux_lm_Z3.park_amp(.5)
flux_lm_Z1.park_amp(.5)
flux_lm_Z3.park_double_sided(False)
flux_lm_Z1.park_double_sided(False)
plt.plot(flux_lm_D4._wave_dict['cz_NE'], label='D4')
plt.plot(flux_lm_X3._wave_dict['cz_SW'], label='X3')
plt.plot(flux_lm_Z1._wave_dict['park'], label='Z1')
plt.plot(flux_lm_Z3._wave_dict['park'], label='Z3')
plt.axhline(.5, color='k', ls='--', alpha=.25)
plt.legend()
plt.show()
# flux-dance 4
## input from user besides cfg amps & speedlimt & flux-danace code word
pairs = [['D4', 'X3']]
which_gate= [['NE', 'SW']]
parked_qubits = ['Z1', 'Z3']
## processed
flux_lms_target = [device.find_instrument("flux_lm_{}".format(pair[0]))\
for pair in pairs]
flux_lms_control = [device.find_instrument("flux_lm_{}".format(pair[1]))\
for pair in pairs]
flux_lms_park = [device.find_instrument("flux_lm_{}".format(qb))\
for qb in parked_qubits]
list_qubits_used = np.asarray(pairs).flatten().tolist()
which_gates = np.asarray(which_gate).flatten().tolist()
device.ro_acq_averages(1024)
device.ro_acq_digitized(False)
device.ro_acq_weight_type('optimal')
device.prepare_fluxing(qubits=parked_qubits)
device.prepare_for_timedomain(qubits=list_qubits_used)
from pycqed.measurement import cz_cost_functions as cf
conv_cost_det = det.Function_Detector(
get_function=cf.conventional_CZ_cost_func2,
msmt_kw={'device': device,
'MC': MC,
'pairs' : pairs,
'parked_qbs': parked_qubits,
'wait_time_before_flux_ns': 60,
'wait_time_after_flux_ns': 60,
'prepare_for_timedomain': False,
'disable_metadata': True,
'extract_only': True,
'disable_metadata': True,
'flux_codeword': 'cz',
'parked_qubit_seq': 'ground',
'include_single_qubit_phase_in_cost': False,
'target_single_qubit_phase': 360,
'include_leakage_in_cost': True,
'target_phase': 180,
'cond_phase_weight_factor': 2},
value_names=[f'cost_function_val_{pair}' for pair in pairs ] +
[f'delta_phi_{pair}' for pair in pairs ] +
[f'missing_fraction_{pair}' for pair in pairs ],
result_keys=[f'cost_function_val_{pair}' for pair in pairs ] +
[f'delta_phi_{pair}' for pair in pairs ] +
[f'missing_fraction_{pair}' for pair in pairs ],
value_units=['a.u.' for pair in pairs ] +
['deg' for pair in pairs ] +
['%' for pair in pairs ])
Sw_functions = [swf.FLsweep(flux_lm_target, flux_lm_target.parameters['vcz_amp_sq_{}'.format(gate[0])],
'cz_{}'.format(gate[0])) for flux_lm_target, gate in \
zip(flux_lms_target,which_gate)]
swf1 = swf.multi_sweep_function(Sw_functions, sweep_point_ratios= [1])
swf2 = swf.flux_t_middle_sweep(fl_lm_tm = [device.find_instrument("flux_lm_{}".format(qubit))\
for qubit in list_qubits_used],
which_gate= which_gates,
fl_lm_park = flux_lms_park,
speed_limit = [2.78e-8]) # input
nested_MC.set_sweep_function(swf1)
nested_MC.set_sweep_function_2D(swf2)
nested_MC.set_sweep_points(np.linspace(.95, 1.05, 11))
nested_MC.set_sweep_points_2D([0,1,2,3,4,5,6,7,8,9,10])
nested_MC.cfg_clipping_mode(True)
label = 'VCZ_2D_{}_tm{}'.format(pairs, ' sweep')
nested_MC.set_detector_function(conv_cost_det)
result = nested_MC.run(label, mode='2D')
try:
ma2.Conditional_Oscillation_Heatmap_Analysis(label=label)
except Exception:
print('Failed Analysis')
###########################################
# VCZ calibration (coarse landscape) FLUX dance 4 (olddd)
###########################################
file_cfg = gc.generate_config(in_filename=input_file,
out_filename=config_fn,
mw_pulse_duration=20,
ro_duration=2200,
flux_pulse_duration=60,
init_duration=200000)
# set CZ parameters
flux_lm_X4.cfg_awg_channel_amplitude(0.261)
flux_lm_X4.vcz_amp_dac_at_11_02_SW(.5)
flux_lm_D8.vcz_amp_dac_at_11_02_NE(0)
flux_lm_D4.cfg_awg_channel_amplitude(0.25999999046325684)
flux_lm_D4.vcz_amp_dac_at_11_02_NE(.5)
flux_lm_X3.vcz_amp_dac_at_11_02_SW(0)
flux_lm_X2.cfg_awg_channel_amplitude(0.31174999999999997)
flux_lm_X2.vcz_amp_dac_at_11_02_SW(.5)
flux_lm_D2.vcz_amp_dac_at_11_02_NE(0)
# Set park parameters
flux_lm_D9.cfg_awg_channel_amplitude(.206)
flux_lm_Z3.cfg_awg_channel_amplitude(.214)
flux_lm_Z1.cfg_awg_channel_amplitude(.21)
flux_lm_D3.cfg_awg_channel_amplitude(.223)
flux_lm_D9.park_amp(.5)
flux_lm_Z3.park_amp(.5)
flux_lm_Z1.park_amp(.5)
flux_lm_D3.park_amp(.5)
flux_lm_D9.park_double_sided(True)
flux_lm_Z3.park_double_sided(True)
flux_lm_Z1.park_double_sided(True)
flux_lm_D3.park_double_sided(True)
# flux-dance 4
## input from user besides cfg amps & speedlimt & flux-danace code word
pairs = [['X4', 'D8'], ['D4', 'X3'], ['X2', 'D2']]
which_gate= [['SW', 'NE'],['NE', 'SW'], ['SW', 'NE']]
parked_qubits = ['D9', 'Z1', 'Z3', 'D3']
## processed
flux_lms_target = [device.find_instrument("flux_lm_{}".format(pair[0]))\
for pair in pairs]
flux_lms_control = [device.find_instrument("flux_lm_{}".format(pair[1]))\
for pair in pairs]
flux_lms_park = [device.find_instrument("flux_lm_{}".format(qb))\
for qb in parked_qubits]
list_qubits_used = np.asarray(pairs).flatten().tolist()
which_gates = np.asarray(which_gate).flatten().tolist()
device.ro_acq_averages(1024)
device.ro_acq_digitized(False)
device.ro_acq_weight_type('optimal')
device.prepare_fluxing(qubits=parked_qubits)
device.prepare_for_timedomain(qubits=list_qubits_used)
from pycqed.measurement import cz_cost_functions as cf
conv_cost_det = det.Function_Detector(
get_function=cf.conventional_CZ_cost_func2,
msmt_kw={'device': device,
'MC': MC,
'pairs' : pairs,
'parked_qbs': parked_qubits,
'prepare_for_timedomain': False,
'disable_metadata': True,
'extract_only': True,
'disable_metadata': True,
'flux_codeword': 'flux-dance-4',
'parked_qubit_seq': 'ground',
'include_single_qubit_phase_in_cost': False,
'target_single_qubit_phase': 360,
'include_leakage_in_cost': True,
'target_phase': 180,
'cond_phase_weight_factor': 2},
value_names=[f'cost_function_val_{pair}' for pair in pairs ] +
[f'delta_phi_{pair}' for pair in pairs ] +
[f'missing_fraction_{pair}' for pair in pairs ],
result_keys=[f'cost_function_val_{pair}' for pair in pairs ] +
[f'delta_phi_{pair}' for pair in pairs ] +
[f'missing_fraction_{pair}' for pair in pairs ],
value_units=['a.u.' for pair in pairs ] +
['deg' for pair in pairs ] +
['%' for pair in pairs ])
Sw_functions = [swf.FLsweep(flux_lm_target, flux_lm_target.parameters['vcz_amp_sq_{}'.format(gate[0])],
'cz_{}'.format(gate[0])) for flux_lm_target, gate in \
zip(flux_lms_target,which_gate)]
swf1 = swf.multi_sweep_function(Sw_functions, sweep_point_ratios= [.6, 1.8, 1.2/3])
swf2 = swf.flux_t_middle_sweep(fl_lm_tm = [device.find_instrument("flux_lm_{}".format(qubit))\
for qubit in list_qubits_used],
which_gate= which_gates,
fl_lm_park = flux_lms_park,
speed_limit = [2.75e-08, 2.78e-8,2.75e-08]) # input
nested_MC.set_sweep_function(swf1)
nested_MC.set_sweep_function_2D(swf2)
nested_MC.set_sweep_points(np.linspace(.985, 1.005, 31))
nested_MC.set_sweep_points_2D(np.linspace(0, 10, 11)[::-1])
nested_MC.cfg_clipping_mode(True)
label = 'VCZ_2D_{}_tm{}'.format(pairs, ' sweep')
nested_MC.set_detector_function(conv_cost_det)
result = nested_MC.run(label, mode='2D')
try:
ma2.Conditional_Oscillation_Heatmap_Analysis(label=label)
except Exception:
print('Failed Analysis')
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class to hold a library of OpDefs and use it to create Brain operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from autograd import core as ag_core
import six
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.framework import tensor_pb2
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import tf_contextlib
def _Attr(op_def, name):
for attr in op_def.attr:
if attr.name == name:
return attr
raise TypeError("Inconsistent OpDef for '%s', missing attr '%s'" %
(op_def.name, name))
def _AttrValue(attr_protos, name):
if name in attr_protos:
return attr_protos[name]
raise TypeError("Inconsistent OpDef, missing attr '%s' from '%s'." %
(name, attr_protos))
def _SatisfiesTypeConstraint(dtype, attr_def, param_name):
if attr_def.HasField("allowed_values"):
allowed_list = attr_def.allowed_values.list.type
if dtype not in allowed_list:
raise TypeError(
"Value passed to parameter '%s' has DataType %s not in list of "
"allowed values: %s" %
(param_name, dtypes.as_dtype(dtype).name,
", ".join(dtypes.as_dtype(x).name for x in allowed_list)))
def _IsListParameter(arg):
if arg.number_attr:
return True
elif arg.type_list_attr:
return True
return False
def _NumTypeFields(arg):
num = 0
if arg.type != types_pb2.DT_INVALID: num += 1
if arg.type_attr: num += 1
if arg.type_list_attr: num += 1
return num
def _IsListValue(v):
return isinstance(v, (list, tuple))
def _Flatten(l):
"""Converts [1, 2, [3, 4], [5]] to [1, 2, 3, 4, 5]."""
# [1, 2, [3, 4], [5]] -> [[1], [2], [3, 4], [5]]
l_of_l = [x if _IsListValue(x) else [x] for x in l]
# [[1], [2], [3, 4], [5]] -> [1, 2, 3, 4, 5]
return [item for sublist in l_of_l for item in sublist]
def _Restructure(l, structure):
"""Returns the elements of list l structured according to the given structure.
A structure is represented by a list whose elements are either
`None` or a non-negative integer. `None` corresponds to a single
element in the output list, and an integer N corresponds to a nested
list of length N.
The function returns a data structure whose shape is given by
`structure`, and whose elements are taken from `l`. If `structure`
is a singleton, the function returns the single data structure
implied by the 0th element of `structure`. For example:
_Restructure(["foo", "bar", "baz", "qux"], [None, 2, None])
-> ["foo", ["bar", "baz"], "qux"]
_Restructure(["foo"], [None]) -> "foo"
_Restructure(["foo"], [1]) -> ["foo"]
_Restructure([], [0]) -> []
Args:
l: A list.
structure: A list whose elements are either `None` or a non-negative
integer.
Returns:
The elements of `l`, restructured according to `structure`. If
`structure` is a list of length 1, this function returns the
single data structure implied by `structure[0]`.
"""
result = []
current_index = 0
for element in structure:
if element is None:
result.append(l[current_index])
current_index += 1
else:
result.append(l[current_index:current_index+element])
current_index += element
if len(result) == 1:
return result[0]
else:
return tuple(result)
def _MakeFloat(v, arg_name):
if not isinstance(v, compat.real_types):
raise TypeError("Expected float for argument '%s' not %s." %
(arg_name, repr(v)))
return float(v)
def _MakeInt(v, arg_name):
if isinstance(v, six.string_types):
raise TypeError("Expected int for argument '%s' not %s." %
(arg_name, repr(v)))
try:
return int(v)
except (ValueError, TypeError):
raise TypeError("Expected int for argument '%s' not %s." %
(arg_name, repr(v)))
def _MakeStr(v, arg_name):
if not isinstance(v, compat.bytes_or_text_types):
raise TypeError("Expected string for argument '%s' not %s." %
(arg_name, repr(v)))
return compat.as_bytes(v) # Convert unicode strings to bytes.
def _MakeBool(v, arg_name):
if not isinstance(v, bool):
raise TypeError("Expected bool for argument '%s' not %s." %
(arg_name, repr(v)))
return v
def _MakeType(v, attr_def):
try:
v = dtypes.as_dtype(v).base_dtype
except TypeError:
raise TypeError("Expected DataType for argument '%s' not %s." %
(attr_def.name, repr(v)))
i = v.as_datatype_enum
_SatisfiesTypeConstraint(i, attr_def, param_name=attr_def.name)
return i
def _MakeShape(v, arg_name):
"""Convert v into a TensorShapeProto."""
# Args:
# v: A TensorShapeProto, a list of ints, or a tensor_shape.TensorShape.
# arg_name: String, for error messages.
# Returns:
# A TensorShapeProto.
if isinstance(v, tensor_shape_pb2.TensorShapeProto):
for d in v.dim:
if d.name:
logging.warning("Warning: TensorShapeProto with a named dimension: %s",
str(v))
break
return v
try:
return tensor_shape.as_shape(v).as_proto()
except TypeError as e:
raise TypeError("Error converting %s to a TensorShape: %s" % (arg_name, e))
except ValueError as e:
raise ValueError("Error converting %s to a TensorShape: %s" % (arg_name, e))
def _MakeTensor(v, arg_name):
"""Ensure v is a TensorProto."""
if isinstance(v, tensor_pb2.TensorProto):
return v
raise TypeError(
"Don't know how to convert %s to a TensorProto for argument '%s'" %
(repr(v), arg_name))
class _OpInfo(object):
"""All per-Op state we would like to precompute/validate."""
def __init__(self, op_def):
self.op_def = op_def
# TODO(josh11b): SWIG the ValidateOpDef() function from C++ and call it
# here, instead of these checks.
for arg in list(op_def.input_arg) + list(op_def.output_arg):
num_type_fields = _NumTypeFields(arg)
if num_type_fields != 1:
raise TypeError("Arg '%s' of '%s' must have one type field not %d" %
(arg.name, op_def.name, num_type_fields))
if arg.type_attr:
attr_type = _Attr(op_def, arg.type_attr).type
if attr_type != "type":
raise TypeError("Attr '%s' of '%s' used as a type_attr "
"but has type %s" %
(arg.type_attr, op_def.name, attr_type))
if arg.type_list_attr:
attr_type = _Attr(op_def, arg.type_list_attr).type
if attr_type != "list(type)":
raise TypeError(
"Attr '%s' of '%s' used as a type_list_attr but has type %s" %
(arg.type_attr, op_def.name, attr_type))
if arg.number_attr:
attr_type = _Attr(op_def, arg.number_attr).type
if attr_type != "int":
raise TypeError(
"Attr '%s' of '%s' used as a number_attr but has type %s" %
(arg.number_attr, op_def.name, attr_type))
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _MaybeColocateWith(inputs):
"""A context manager for (maybe) colocating with a list of input tensors.
Args:
inputs: A list of `Tensor` or `Operation` objects.
Returns:
A context manager.
"""
if not inputs:
yield
else:
# NOTE(mrry): The `ops.colocate_with()` function accepts only a single
# op or tensor, so we create one context manager per element in the list.
with ops.colocate_with(inputs[0]), _MaybeColocateWith(inputs[1:]):
yield
# pylint: enable=g-doc-return-or-yield
class OpDefLibrary(object):
"""Holds a collection of OpDefs, can add the corresponding Ops to a graph."""
def __init__(self):
self._ops = {}
# pylint: disable=invalid-name
def add_op(self, op_def):
"""Register an OpDef. May call apply_op with the name afterwards."""
if not isinstance(op_def, op_def_pb2.OpDef):
raise TypeError("%s is %s, not an op_def_pb2.OpDef" %
(op_def, type(op_def)))
if not op_def.name:
raise ValueError("%s missing name." % op_def)
if op_def.name in self._ops:
raise RuntimeError("Op name %s registered twice." % op_def.name)
self._ops[op_def.name] = _OpInfo(op_def)
def add_op_list(self, op_list):
"""Register the OpDefs from an OpList."""
if not isinstance(op_list, op_def_pb2.OpList):
raise TypeError("%s is %s, not an op_def_pb2.OpList" %
(op_list, type(op_list)))
for op_def in op_list.op:
self.add_op(op_def)
def apply_op(self, op_type_name, name=None, **keywords):
# pylint: disable=g-doc-args
"""Add a node invoking a registered Op to a graph.
Example usage:
# input1 and input2 can be Tensors or anything ops.convert_to_tensor()
# will convert to a Tensor.
op_def_library.apply_op("op", input1=input1, input2=input2)
# Can specify a node name.
op_def_library.apply_op("op", input1=input1, name="node_name")
# Must use keyword arguments, with the names specified in the OpDef.
op_def_library.apply_op("op", input_name=input, attr_name=attr)
All attrs must either be inferred from an input or specified.
(If inferred, the attr must not be specified.) If an attr has a default
value specified in the Op's OpDef, then you may pass None as the value
of that attr to get the default.
Args:
op_type_name: string. Must match the name field of a registered Op.
name: string. Optional name of the created op.
**keywords: input Tensor and attr arguments specified by name,
and optional parameters to pass when constructing the Operation.
Returns:
The Tensor(s) representing the output of the operation, or the Operation
itself if there are no outputs.
Raises:
RuntimeError: On some errors.
TypeError: On some errors.
ValueError: On some errors.
"""
output_structure, is_stateful, op = self._apply_op_helper(
op_type_name, name, **keywords)
if output_structure:
outputs = op.outputs
res = _Restructure(ops.convert_n_to_tensor(outputs), output_structure)
if isinstance(res, list) and not res and is_stateful:
return op
else:
return res
else:
return op
def _apply_op_helper(self, op_type_name, name=None, **keywords):
"""Implementation of apply_op that returns output_structure, op."""
op_info = self._ops.get(op_type_name, None)
if op_info is None:
raise RuntimeError("Unrecognized Op name " + op_type_name)
op_def = op_info.op_def
# Determine the graph context.
try:
# Need to flatten all the arguments into a list.
# pylint: disable=protected-access
g = ops._get_graph_from_inputs(_Flatten(keywords.values()))
# pylint: enable=protected-access
except AssertionError as e:
raise RuntimeError(
"Cannot determine graph for Op '%s' due to: %s"
% (op_type_name, e.message))
# Default name if not specified.
if name is None:
name = op_type_name
# Check for deprecation
deprecation_version = op_def.deprecation.version
if deprecation_version:
producer = g.graph_def_versions.producer
if producer >= deprecation_version:
raise NotImplementedError(
("Op %s is not available in GraphDef version %d. "
"It has been removed in version %d. %s.") %
(op_type_name, producer, deprecation_version,
op_def.deprecation.explanation))
# Fill in the list of default types for all "type" attrs. This
# will be used to choose a preferred dtype to convert to in the
# absence of input type information.
#
# TODO(b/31302892): Currently the defaults don't work in the right
# way if you have two inputs, one of whose type resolution depends
# on the other. Handling this will require restructuring this code
# significantly.
default_type_attr_map = {}
for attr_def in op_def.attr:
if attr_def.type != "type":
continue
key = attr_def.name
if attr_def.HasField("default_value"):
default_type_attr_map[key] = dtypes.as_dtype(
attr_def.default_value.type)
# Requires that op_def has passed validation (using the C++
# ValidateOpDef() from ../framework/op_def_util.h).
attrs = {}
inputs = []
input_types = []
with g.as_default(), ops.name_scope(name) as scope:
# Perform input type inference
inferred_from = {}
for input_arg in op_def.input_arg:
input_name = input_arg.name
if input_name in keywords:
values = keywords.pop(input_name)
elif input_name + "_" in keywords:
# Handle the case where the name is a keyword or built-in
# for Python so we use the name + _ instead.
input_name += "_"
values = keywords.pop(input_name)
else:
raise TypeError("No argument for input " + input_name)
# Goals:
# * Convert values to Tensors if it contains constants.
# * Verify that values is a list if that matches the input_arg's
# type.
# * If the input_arg's type is determined by attrs, either set
# those attrs and validate those attr values are legal (if
# they have not yet been set) or validate the input matches
# the type indicated by the attrs (if they have already been
# inferred via an earlier input).
# * If the input_arg has an explicit type, make sure the input
# conforms.
if _IsListParameter(input_arg):
if not _IsListValue(values):
raise TypeError(
"Expected list for '%s' argument to '%s' Op, not %s." %
(input_name, op_type_name, values))
# In cases where we expect all elements of the list to have the
# same dtype, try to cast non-Tensor elements to that type.
dtype = None
default_dtype = None
if input_arg.type != types_pb2.DT_INVALID:
dtype = input_arg.type
elif input_arg.number_attr:
if input_arg.type_attr in attrs:
dtype = attrs[input_arg.type_attr]
else:
for t in values:
if isinstance(t, ops.Tensor):
dtype = t.dtype
break
# dtype still not found, prefer using the default dtype
# from the attr.
if dtype is None and input_arg.type_attr in default_type_attr_map:
default_dtype = default_type_attr_map[input_arg.type_attr]
try:
if not input_arg.is_ref and dtype:
dtype = dtypes.as_dtype(dtype).base_dtype
values = ops.internal_convert_n_to_tensor(
values,
name=input_arg.name,
dtype=dtype if dtype else None,
preferred_dtype=default_dtype,
as_ref=input_arg.is_ref)
if input_arg.number_attr and len(
set(v.dtype.base_dtype for v in values)) > 1:
raise TypeError() # All types should match.
except (TypeError, ValueError):
# What types does the conversion function think values have?
observed_types = []
for value in values:
try:
converted_value = ops.internal_convert_to_tensor(
value, as_ref=input_arg.is_ref)
observed_types.append(converted_value.dtype.base_dtype.name)
except (TypeError, ValueError):
observed_types.append("<NOT CONVERTIBLE TO TENSOR>")
observed = ", ".join(observed_types)
prefix = (
"Tensors in list passed to '%s' of '%s' Op have types [%s]" %
(input_name, op_type_name, observed))
if input_arg.number_attr:
if input_arg.type != types_pb2.DT_INVALID:
raise TypeError("%s that do not match expected type %s." %
(prefix, dtype.name))
elif input_arg.type_attr in attrs:
raise TypeError("%s that do not match type %s inferred from "
"earlier arguments." %
(prefix, dtype.name))
else:
raise TypeError("%s that don't all match." % prefix)
else:
raise TypeError("%s that are invalid." % prefix)
types = [x.dtype for x in values]
inputs.extend(values)
else:
# In cases where we have an expected type, try to convert non-Tensor
# arguments to that type.
dtype = None
default_dtype = None
if input_arg.type != types_pb2.DT_INVALID:
dtype = input_arg.type
elif input_arg.type_attr in attrs:
dtype = attrs[input_arg.type_attr]
elif input_arg.type_attr in default_type_attr_map:
# The dtype could not be inferred solely from the inputs,
# so we prefer the attr's default, so code that adds a new attr
# with a default is backwards compatible.
default_dtype = default_type_attr_map[input_arg.type_attr]
try:
values = ag_core.getval(values)
values = ops.internal_convert_to_tensor(
values,
name=input_arg.name,
dtype=dtype,
as_ref=input_arg.is_ref,
preferred_dtype=default_dtype)
except TypeError as err:
if dtype is None:
raise err
else:
raise TypeError(
"Expected %s passed to parameter '%s' of op '%s', got %s of "
"type '%s' instead." %
(dtypes.as_dtype(dtype).name, input_arg.name, op_type_name,
repr(values), type(values).__name__))
except ValueError:
# What type does convert_to_tensor think it has?
try:
observed = ops.internal_convert_to_tensor(
values, as_ref=input_arg.is_ref).dtype.name
except ValueError as err:
raise ValueError(
"Tried to convert '%s' to a tensor and failed. Error: %s" %
(input_name, err))
prefix = ("Input '%s' of '%s' Op has type %s that does not match" %
(input_name, op_type_name, observed))
if input_arg.type != types_pb2.DT_INVALID:
raise TypeError("%s expected type of %s." %
(prefix, dtypes.as_dtype(input_arg.type).name))
else:
# Update the maps with the default, if needed.
k = input_arg.type_attr
if k in default_type_attr_map:
if k not in attrs:
attrs[k] = default_type_attr_map[k]
if k not in inferred_from:
inferred_from[k] = "Default in OpDef"
raise TypeError(
"%s type %s of argument '%s'." %
(prefix, dtypes.as_dtype(attrs[input_arg.type_attr]).name,
inferred_from[input_arg.type_attr]))
types = [values.dtype]
inputs.append(values)
base_types = [x.base_dtype for x in types]
if input_arg.number_attr:
# <number-attr> * <type> or <number-attr> * <type-attr>
if input_arg.number_attr in attrs:
if len(values) != attrs[input_arg.number_attr]:
raise ValueError(
"List argument '%s' to '%s' Op with length %d must match "
"length %d of argument '%s'." %
(input_name, op_type_name, len(values),
attrs[input_arg.number_attr],
inferred_from[input_arg.number_attr]))
else:
attrs[input_arg.number_attr] = len(values)
inferred_from[input_arg.number_attr] = input_name
num_attr = _Attr(op_def, input_arg.number_attr)
if num_attr.has_minimum and len(values) < num_attr.minimum:
raise ValueError(
"List argument '%s' to '%s' Op with length %d shorter "
"than minimum length %d." %
(input_name, op_type_name, len(values), num_attr.minimum))
# All tensors must have the same base type.
if any([bt != base_types[0] for bt in base_types]):
raise TypeError(
"All tensors passed to '%s' of '%s' Op "
"must have the same type." %
(input_name, op_type_name))
if input_arg.type != types_pb2.DT_INVALID:
# <number-attr> * <type> case
if base_types and base_types[0] != input_arg.type:
assert False, "Unreachable"
elif input_arg.type_attr in attrs:
# <number-attr> * <type-attr> case, where <type-attr> already
# has an inferred value.
if base_types and base_types[0] != attrs[input_arg.type_attr]:
assert False, "Unreachable"
else:
# <number-attr> * <type-attr> case, where we are now setting
# the <type-attr> based on this input
if not base_types:
raise TypeError(
"Don't know how to infer type variable from empty input "
"list passed to input '%s' of '%s' Op." %
(input_name, op_type_name))
attrs[input_arg.type_attr] = base_types[0]
inferred_from[input_arg.type_attr] = input_name
type_attr = _Attr(op_def, input_arg.type_attr)
_SatisfiesTypeConstraint(base_types[0], type_attr,
param_name=input_name)
elif input_arg.type_attr:
# <type-attr>
attr_value = base_types[0]
if input_arg.type_attr in attrs:
if attrs[input_arg.type_attr] != attr_value:
assert False, "Unreachable"
else:
for base_type in base_types:
_SatisfiesTypeConstraint(base_type,
_Attr(op_def, input_arg.type_attr),
param_name=input_name)
attrs[input_arg.type_attr] = attr_value
inferred_from[input_arg.type_attr] = input_name
elif input_arg.type_list_attr:
# <type-list-attr>
attr_value = base_types
if input_arg.type_list_attr in attrs:
if attrs[input_arg.type_list_attr] != attr_value:
raise TypeError(
"Input '%s' of '%s' Op has type list of %s that does not "
"match type list %s of argument '%s'." %
(input_name, op_type_name,
", ".join(dtypes.as_dtype(x).name for x in attr_value),
", ".join(dtypes.as_dtype(x).name
for x in attrs[input_arg.type_list_attr]),
inferred_from[input_arg.type_list_attr]))
else:
for base_type in base_types:
_SatisfiesTypeConstraint(base_type,
_Attr(op_def, input_arg.type_list_attr),
param_name=input_name)
attrs[input_arg.type_list_attr] = attr_value
inferred_from[input_arg.type_list_attr] = input_name
else:
# single Tensor with specified type
if base_types[0] != input_arg.type:
assert False, "Unreachable"
if input_arg.is_ref:
if not all(x._is_ref_dtype for x in types): # pylint: disable=protected-access
raise TypeError(
("'%s' Op requires that input '%s' be a mutable tensor "
"(e.g.: a tf.Variable)") % (op_type_name, input_name))
input_types.extend(types)
else:
input_types.extend(base_types)
# Process remaining attrs
for attr in op_def.attr:
# Skip attrs that have already had their values inferred
if attr.name in attrs:
if attr.name in keywords:
raise TypeError(
"Should not specify value for inferred attr '%s'." % attr.name)
continue
if attr.name in keywords:
attrs[attr.name] = keywords.pop(attr.name)
elif attr.name + "_" in keywords:
# Attrs whose names match Python keywords have an extra '_'
# appended, so we must check for that as well.
attrs[attr.name] = keywords.pop(attr.name + "_")
else:
raise TypeError("No argument for attr " + attr.name)
# Convert attr values to AttrValue protos.
attr_protos = {}
for attr_def in op_def.attr:
key = attr_def.name
value = attrs[key]
attr_value = attr_value_pb2.AttrValue()
if attr_def.HasField("default_value") and value is None:
attr_value.CopyFrom(attr_def.default_value)
attr_protos[key] = attr_value
continue
if attr_def.type.startswith("list("):
if not _IsListValue(value):
raise TypeError("Expected list for attr " + key)
if attr_def.has_minimum:
if len(value) < attr_def.minimum:
raise ValueError("Attr '%s' of '%s' Op passed list of length %d "
"less than minimum %d." %
(key, op_type_name, len(value),
attr_def.minimum))
attr_value.list.SetInParent()
if attr_def.type == "string":
attr_value.s = _MakeStr(value, key)
if attr_def.HasField("allowed_values"):
if attr_value.s not in attr_def.allowed_values.list.s:
raise ValueError(
"Attr '%s' of '%s' Op passed string '%s' not in: \"%s\"." %
(key, op_type_name, compat.as_text(attr_value.s),
'", "'.join(map(compat.as_text,
attr_def.allowed_values.list.s))))
elif attr_def.type == "list(string)":
attr_value.list.s.extend([_MakeStr(x, key) for x in value])
if attr_def.HasField("allowed_values"):
for x in attr_value.list.s:
if x not in attr_def.allowed_values.list.s:
raise ValueError(
"Attr '%s' of '%s' Op passed string '%s' not in: \"%s\"." %
(key, op_type_name, compat.as_text(x),
'", "'.join(map(compat.as_text,
attr_def.allowed_values.list.s))))
elif attr_def.type == "int":
attr_value.i = _MakeInt(value, key)
if attr_def.has_minimum:
if attr_value.i < attr_def.minimum:
raise ValueError(
"Attr '%s' of '%s' Op passed %d less than minimum %d." %
(key, op_type_name, attr_value.i, attr_def.minimum))
elif attr_def.type == "list(int)":
attr_value.list.i.extend([_MakeInt(x, key) for x in value])
elif attr_def.type == "float":
attr_value.f = _MakeFloat(value, key)
elif attr_def.type == "list(float)":
attr_value.list.f.extend([_MakeFloat(x, key) for x in value])
elif attr_def.type == "bool":
attr_value.b = _MakeBool(value, key)
elif attr_def.type == "list(bool)":
attr_value.list.b.extend([_MakeBool(x, key) for x in value])
elif attr_def.type == "type":
attr_value.type = _MakeType(value, attr_def)
elif attr_def.type == "list(type)":
attr_value.list.type.extend(
[_MakeType(x, attr_def) for x in value])
elif attr_def.type == "shape":
attr_value.shape.CopyFrom(_MakeShape(value, key))
elif attr_def.type == "list(shape)":
attr_value.list.shape.extend(
[_MakeShape(x, key) for x in value])
elif attr_def.type == "tensor":
attr_value.tensor.CopyFrom(_MakeTensor(value, key))
elif attr_def.type == "list(tensor)":
attr_value.list.tensor.extend(
[_MakeTensor(x, key) for x in value])
elif attr_def.type == "func":
if isinstance(value, attr_value_pb2.NameAttrList):
attr_value.func.CopyFrom(value)
elif isinstance(value, compat.bytes_or_text_types):
attr_value.func.name = value
else:
value.add_to_graph(ops.get_default_graph())
attr_value.func.name = value.name
else:
raise TypeError("Unrecognized Attr type " + attr_def.type)
attr_protos[key] = attr_value
del attrs # attrs is no longer authoritative, use attr_protos instead
# Determine output types (possibly using attrs)
output_types = []
output_structure = []
for arg in op_def.output_arg:
types = []
if arg.number_attr:
n = _AttrValue(attr_protos, arg.number_attr).i
if arg.type_attr:
types = [_AttrValue(attr_protos, arg.type_attr).type] * n
else:
types = [arg.type] * n
output_structure.append(n)
elif arg.type_attr:
t = _AttrValue(attr_protos, arg.type_attr)
types = [t.type]
output_structure.append(None)
elif arg.type_list_attr:
t = _AttrValue(attr_protos, arg.type_list_attr)
types = t.list.type
output_structure.append(len(types))
else:
types = [arg.type]
output_structure.append(None)
if arg.is_ref:
types = [dtypes.as_dtype(x)._as_ref for x in types] # pylint: disable=protected-access
output_types.extend(types)
if keywords:
raise TypeError("apply_op() got unexpected keyword arguments: " +
", ".join(sorted(keywords.keys())))
# NOTE(mrry): We add an explicit colocation constraint between
# the newly created op and any of its reference-typed inputs.
must_colocate_inputs = [val for arg, val in zip(op_def.input_arg, inputs)
if arg.is_ref]
with _MaybeColocateWith(must_colocate_inputs):
# Add Op to graph
inputs = [ag_core.getval(x) for x in inputs]
op = g.create_op(op_type_name, inputs, output_types, name=scope,
input_types=input_types, attrs=attr_protos,
op_def=op_def)
return output_structure, op_def.is_stateful, op
# pylint: enable=invalid-name
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.layers.normalization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.layers import normalization as normalization_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class BNTest(test.TestCase):
def testCreateBN(self):
# Call layer.
bn = normalization_layers.BatchNormalization(axis=1)
inputs = random_ops.random_uniform((5, 4, 3), seed=1)
training = array_ops.placeholder(dtype='bool')
outputs = bn.apply(inputs, training=training)
# Verify shape.
self.assertListEqual(outputs.get_shape().as_list(), [5, 4, 3])
# Verify layer attributes.
self.assertEqual(len(bn.updates), 2)
self.assertEqual(len(bn.variables), 4)
self.assertEqual(len(bn.trainable_variables), 2)
self.assertEqual(len(bn.non_trainable_variables), 2)
# Test that updates were created and added to UPDATE_OPS.
self.assertEqual(len(bn.updates), 2)
self.assertListEqual(
ops.get_collection(ops.GraphKeys.UPDATE_OPS), bn.updates)
# Test that weights were created and added to TRAINABLE_VARIABLES.
self.assertListEqual(
ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES),
bn.trainable_variables)
def test3DInputAxis1(self):
epsilon = 1e-3
bn = normalization_layers.BatchNormalization(
axis=1, epsilon=epsilon, momentum=0.9)
inputs = variables.Variable(
np.random.random((5, 4, 3)) + 100, dtype=dtypes.float32)
training = array_ops.placeholder(dtype='bool')
outputs = bn.apply(inputs, training=training)
with self.test_session() as sess:
# Test training with placeholder learning phase.
sess.run(variables.global_variables_initializer())
np_gamma, np_beta = sess.run([bn.gamma, bn.beta])
np_gamma = np.reshape(np_gamma, (1, 4, 1))
np_beta = np.reshape(np_beta, (1, 4, 1))
for _ in range(100):
np_output, _, _ = sess.run([outputs] + bn.updates,
feed_dict={training: True})
# Verify that the axis is normalized during training.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
# Verify that the statistics are updated during training.
moving_mean, moving_var = sess.run([bn.moving_mean, bn.moving_variance])
np_inputs = sess.run(inputs)
mean = np.mean(np_inputs, axis=(0, 2))
std = np.std(np_inputs, axis=(0, 2))
variance = np.square(std)
self.assertAllClose(mean, moving_mean, atol=1e-2)
self.assertAllClose(variance, moving_var, atol=1e-2)
# Test inference with placeholder learning phase.
np_output = sess.run(outputs, feed_dict={training: False})
# Verify that the axis is normalized during inference.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
def test3DInputAxis2(self):
epsilon = 1e-3
bn = normalization_layers.BatchNormalization(
axis=2, epsilon=epsilon, momentum=0.9)
inputs = variables.Variable(
np.random.random((5, 4, 3)) + 100, dtype=dtypes.float32)
training = array_ops.placeholder(dtype='bool')
outputs = bn.apply(inputs, training=training)
with self.test_session() as sess:
# Test training with placeholder learning phase.
sess.run(variables.global_variables_initializer())
np_gamma, np_beta = sess.run([bn.gamma, bn.beta])
np_gamma = np.reshape(np_gamma, (1, 1, 3))
np_beta = np.reshape(np_beta, (1, 1, 3))
for _ in range(100):
np_output, _, _ = sess.run([outputs] + bn.updates,
feed_dict={training: True})
# Verify that the axis is normalized during training.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
# Verify that the statistics are updated during training.
moving_mean, moving_var = sess.run([bn.moving_mean, bn.moving_variance])
np_inputs = sess.run(inputs)
mean = np.mean(np_inputs, axis=(0, 1))
std = np.std(np_inputs, axis=(0, 1))
variance = np.square(std)
self.assertAllClose(mean, moving_mean, atol=1e-2)
self.assertAllClose(variance, moving_var, atol=1e-2)
# Test inference with placeholder learning phase.
np_output = sess.run(outputs, feed_dict={training: False})
# Verify that the axis is normalized during inference.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
def test4DInputAxis1(self):
epsilon = 1e-3
bn = normalization_layers.BatchNormalization(
axis=1, epsilon=epsilon, momentum=0.9)
inputs = variables.Variable(
np.random.random((5, 4, 3, 6)) + 100, dtype=dtypes.float32)
training = array_ops.placeholder(dtype='bool')
outputs = bn.apply(inputs, training=training)
with self.test_session() as sess:
# Test training with placeholder learning phase.
sess.run(variables.global_variables_initializer())
np_gamma, np_beta = sess.run([bn.gamma, bn.beta])
np_gamma = np.reshape(np_gamma, (1, 4, 1, 1))
np_beta = np.reshape(np_beta, (1, 4, 1, 1))
for _ in range(100):
np_output, _, _ = sess.run([outputs] + bn.updates,
feed_dict={training: True})
# Verify that the axis is normalized during training.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
# Verify that the statistics are updated during training.
moving_mean, moving_var = sess.run([bn.moving_mean, bn.moving_variance])
np_inputs = sess.run(inputs)
mean = np.mean(np_inputs, axis=(0, 2, 3))
std = np.std(np_inputs, axis=(0, 2, 3))
variance = np.square(std)
self.assertAllClose(mean, moving_mean, atol=1e-2)
self.assertAllClose(variance, moving_var, atol=1e-2)
# Test inference with placeholder learning phase.
np_output = sess.run(outputs, feed_dict={training: False})
# Verify that the axis is normalized during inference.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
def test4DInputAxis2(self):
epsilon = 1e-3
bn = normalization_layers.BatchNormalization(
axis=2, epsilon=epsilon, momentum=0.9)
inputs = variables.Variable(
np.random.random((5, 4, 3, 6)) + 100, dtype=dtypes.float32)
training = array_ops.placeholder(dtype='bool')
outputs = bn.apply(inputs, training=training)
with self.test_session() as sess:
# Test training with placeholder learning phase.
sess.run(variables.global_variables_initializer())
np_gamma, np_beta = sess.run([bn.gamma, bn.beta])
np_gamma = np.reshape(np_gamma, (1, 1, 3, 1))
np_beta = np.reshape(np_beta, (1, 1, 3, 1))
for _ in range(100):
np_output, _, _ = sess.run([outputs] + bn.updates,
feed_dict={training: True})
# Verify that the axis is normalized during training.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
# Verify that the statistics are updated during training.
moving_mean, moving_var = sess.run([bn.moving_mean, bn.moving_variance])
np_inputs = sess.run(inputs)
mean = np.mean(np_inputs, axis=(0, 1, 3))
std = np.std(np_inputs, axis=(0, 1, 3))
variance = np.square(std)
self.assertAllClose(mean, moving_mean, atol=1e-2)
self.assertAllClose(variance, moving_var, atol=1e-2)
# Test inference with placeholder learning phase.
np_output = sess.run(outputs, feed_dict={training: False})
# Verify that the axis is normalized during inference.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
def test4DInputAxis3(self):
epsilon = 1e-3
bn = normalization_layers.BatchNormalization(
axis=3, epsilon=epsilon, momentum=0.9)
inputs = variables.Variable(
np.random.random((5, 4, 3, 6)) + 100, dtype=dtypes.float32)
training = array_ops.placeholder(dtype='bool')
outputs = bn.apply(inputs, training=training)
with self.test_session() as sess:
# Test training with placeholder learning phase.
sess.run(variables.global_variables_initializer())
np_gamma, np_beta = sess.run([bn.gamma, bn.beta])
np_gamma = np.reshape(np_gamma, (1, 1, 1, 6))
np_beta = np.reshape(np_beta, (1, 1, 1, 6))
for _ in range(100):
np_output, _, _ = sess.run([outputs] + bn.updates,
feed_dict={training: True})
# Verify that the axis is normalized during training.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
# Verify that the statistics are updated during training.
moving_mean, moving_var = sess.run([bn.moving_mean, bn.moving_variance])
np_inputs = sess.run(inputs)
mean = np.mean(np_inputs, axis=(0, 1, 2))
std = np.std(np_inputs, axis=(0, 1, 2))
variance = np.square(std)
self.assertAllClose(mean, moving_mean, atol=1e-2)
self.assertAllClose(variance, moving_var, atol=1e-2)
# Test inference with placeholder learning phase.
np_output = sess.run(outputs, feed_dict={training: False})
# Verify that the axis is normalized during inference.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
def testNegativeAxis(self):
epsilon = 1e-3
bn = normalization_layers.BatchNormalization(
axis=-1, epsilon=epsilon, momentum=0.9)
inputs = variables.Variable(
np.random.random((5, 4, 3, 6)) + 100, dtype=dtypes.float32)
training = array_ops.placeholder(dtype='bool')
outputs = bn.apply(inputs, training=training)
with self.test_session() as sess:
# Test training with placeholder learning phase.
sess.run(variables.global_variables_initializer())
np_gamma, np_beta = sess.run([bn.gamma, bn.beta])
np_gamma = np.reshape(np_gamma, (1, 1, 1, 6))
np_beta = np.reshape(np_beta, (1, 1, 1, 6))
for _ in range(100):
np_output, _, _ = sess.run([outputs] + bn.updates,
feed_dict={training: True})
# Verify that the axis is normalized during training.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
# Verify that the statistics are updated during training.
moving_mean, moving_var = sess.run([bn.moving_mean, bn.moving_variance])
np_inputs = sess.run(inputs)
mean = np.mean(np_inputs, axis=(0, 1, 2))
std = np.std(np_inputs, axis=(0, 1, 2))
variance = np.square(std)
self.assertAllClose(mean, moving_mean, atol=1e-2)
self.assertAllClose(variance, moving_var, atol=1e-2)
# Test inference with placeholder learning phase.
np_output = sess.run(outputs, feed_dict={training: False})
# Verify that the axis is normalized during inference.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
def testBooleanLearningPhase(self):
epsilon = 1e-3
bn = normalization_layers.BatchNormalization(
axis=-1, epsilon=epsilon, momentum=0.9)
inputs = variables.Variable(
np.random.random((5, 4, 3, 6)) + 100, dtype=dtypes.float32)
outputs_training = bn.apply(inputs, training=True)
outputs_infer = bn.apply(inputs, training=False)
with self.test_session() as sess:
# Test training with placeholder learning phase.
sess.run(variables.global_variables_initializer())
np_gamma, np_beta = sess.run([bn.gamma, bn.beta])
np_gamma = np.reshape(np_gamma, (1, 1, 1, 6))
np_beta = np.reshape(np_beta, (1, 1, 1, 6))
for _ in range(100):
np_output, _, _ = sess.run([outputs_training] + bn.updates)
# Verify that the axis is normalized during training.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=2)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
# Verify that the statistics are updated during training.
moving_mean, moving_var = sess.run([bn.moving_mean, bn.moving_variance])
np_inputs = sess.run(inputs)
mean = np.mean(np_inputs, axis=(0, 1, 2))
std = np.std(np_inputs, axis=(0, 1, 2))
variance = np.square(std)
self.assertAllClose(mean, moving_mean, atol=1e-2)
self.assertAllClose(variance, moving_var, atol=1e-2)
# Test inference with placeholder learning phase.
np_output = sess.run(outputs_infer)
# Verify that the axis is normalized during inference.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
def testFunctionalNoReuse(self):
inputs = variables.Variable(
np.random.random((5, 4, 3, 6)), dtype=dtypes.float32)
epsilon = 1e-3
training = array_ops.placeholder(dtype='bool')
outputs = normalization_layers.batch_norm(
inputs,
axis=-1,
momentum=0.9,
epsilon=epsilon,
training=training,
name='bn')
updates = ops.get_collection(ops.GraphKeys.UPDATE_OPS)
all_vars = dict([(v.name, v) for v in variables.global_variables()])
moving_mean = all_vars['bn/moving_mean:0']
moving_variance = all_vars['bn/moving_variance:0']
beta = all_vars['bn/beta:0']
gamma = all_vars['bn/gamma:0']
with self.test_session() as sess:
# Test training with placeholder learning phase.
sess.run(variables.global_variables_initializer())
np_gamma, np_beta = sess.run([gamma, beta])
np_gamma = np.reshape(np_gamma, (1, 1, 1, 6))
np_beta = np.reshape(np_beta, (1, 1, 1, 6))
for _ in range(100):
np_output, _, _ = sess.run([outputs] + updates,
feed_dict={training: True})
# Verify that the axis is normalized during training.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
# Verify that the statistics are updated during training.
np_moving_mean, np_moving_var = sess.run([moving_mean, moving_variance])
np_inputs = sess.run(inputs)
np_mean = np.mean(np_inputs, axis=(0, 1, 2))
np_std = np.std(np_inputs, axis=(0, 1, 2))
np_variance = np.square(np_std)
self.assertAllClose(np_mean, np_moving_mean, atol=1e-2)
self.assertAllClose(np_variance, np_moving_var, atol=1e-2)
# Test inference with placeholder learning phase.
np_output = sess.run(outputs, feed_dict={training: False})
# Verify that the axis is normalized during inference.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
def testFunctionalReuse(self):
inputs1 = variables.Variable(
np.random.random((5, 4, 3, 6)), dtype=dtypes.float32)
inputs2 = variables.Variable(
np.random.random((5, 4, 3, 6)), dtype=dtypes.float32)
epsilon = 1e-3
training = array_ops.placeholder(dtype='bool')
_ = normalization_layers.batch_norm(
inputs1,
axis=-1,
momentum=0.9,
epsilon=epsilon,
training=training,
name='bn')
outputs2 = normalization_layers.batch_norm(
inputs2,
axis=-1,
momentum=0.9,
epsilon=epsilon,
training=training,
name='bn',
reuse=True)
# Last 2 update ops
updates = ops.get_collection(ops.GraphKeys.UPDATE_OPS)[-2:]
all_vars = dict([(v.name, v) for v in variables.global_variables()])
moving_mean = all_vars['bn/moving_mean:0']
moving_variance = all_vars['bn/moving_variance:0']
beta = all_vars['bn/beta:0']
gamma = all_vars['bn/gamma:0']
with self.test_session() as sess:
# Test training with placeholder learning phase.
sess.run(variables.global_variables_initializer())
for _ in range(100):
np_output, _, _ = sess.run([outputs2] + updates,
feed_dict={training: True})
# Verify that the statistics are updated during training.
np_moving_mean, np_moving_var = sess.run([moving_mean, moving_variance])
np_inputs = sess.run(inputs2)
np_mean = np.mean(np_inputs, axis=(0, 1, 2))
np_std = np.std(np_inputs, axis=(0, 1, 2))
np_variance = np.square(np_std)
self.assertAllClose(np_mean, np_moving_mean, atol=1e-2)
self.assertAllClose(np_variance, np_moving_var, atol=1e-2)
# Verify that the axis is normalized during training.
np_gamma, np_beta = sess.run([gamma, beta])
np_gamma = np.reshape(np_gamma, (1, 1, 1, 6))
np_beta = np.reshape(np_beta, (1, 1, 1, 6))
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=2)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
# Test inference with placeholder learning phase.
np_output = sess.run(outputs2, feed_dict={training: False})
# Verify that the axis is normalized during inference.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=2)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
def testFunctionalReuseFromScope(self):
inputs = variables.Variable(
np.random.random((5, 4, 3, 6)), dtype=dtypes.float32)
epsilon = 1e-3
training = array_ops.placeholder(dtype='bool')
with variable_scope.variable_scope('scope'):
_ = normalization_layers.batch_norm(
inputs, axis=-1, momentum=0.9, epsilon=epsilon, training=training)
self.assertEqual(len(variables.global_variables()), 5)
with variable_scope.variable_scope('scope', reuse=True):
_ = normalization_layers.batch_norm(
inputs, axis=-1, momentum=0.9, epsilon=epsilon, training=training)
self.assertEqual(len(variables.global_variables()), 5)
def testNoCenter(self):
bn = normalization_layers.BatchNormalization(axis=1, center=False)
inputs = random_ops.random_uniform((5, 4, 3), seed=1)
training = array_ops.placeholder(dtype='bool')
outputs = bn.apply(inputs, training=training)
# Verify shape.
self.assertListEqual(outputs.get_shape().as_list(), [5, 4, 3])
# Verify layer attributes.
self.assertEqual(len(bn.updates), 2)
self.assertEqual(len(bn.variables), 3)
self.assertEqual(len(bn.trainable_variables), 1)
self.assertEqual(len(bn.non_trainable_variables), 2)
def testNoScale(self):
bn = normalization_layers.BatchNormalization(axis=1, scale=False)
inputs = random_ops.random_uniform((5, 4, 3), seed=1)
training = array_ops.placeholder(dtype='bool')
outputs = bn.apply(inputs, training=training)
# Verify shape.
self.assertListEqual(outputs.get_shape().as_list(), [5, 4, 3])
# Verify layer attributes.
self.assertEqual(len(bn.updates), 2)
self.assertEqual(len(bn.variables), 3)
self.assertEqual(len(bn.trainable_variables), 1)
self.assertEqual(len(bn.non_trainable_variables), 2)
def testRegularizers(self):
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
bn = normalization_layers.BatchNormalization(axis=1, beta_regularizer=reg)
inputs = random_ops.random_uniform((5, 4, 3), seed=1)
training = array_ops.placeholder(dtype='bool')
_ = bn.apply(inputs, training=training)
self.assertEqual(len(bn.losses), 1)
bn = normalization_layers.BatchNormalization(axis=1, gamma_regularizer=reg)
inputs = random_ops.random_uniform((5, 4, 3), seed=1)
training = array_ops.placeholder(dtype='bool')
_ = bn.apply(inputs, training=training)
self.assertEqual(len(bn.losses), 1)
def testRenorm(self):
shape = (4, 3)
xt = array_ops.placeholder(dtypes.float32, shape)
momentum = 0.99
renorm_momentum = 0.8
rmax = 1.1
rmin = 0.9
dmax = 0.1
gamma = 2.
beta = 3.
epsilon = 0.001
bn = normalization_layers.BatchNormalization(
axis=1,
gamma_initializer=init_ops.constant_initializer(gamma),
beta_initializer=init_ops.constant_initializer(beta),
epsilon=epsilon,
momentum=momentum,
renorm=True,
renorm_clipping={'rmax': rmax, 'rmin': rmin, 'dmax': dmax},
renorm_momentum=renorm_momentum)
training = array_ops.placeholder(dtypes.bool)
yt = bn.apply(xt, training=training)
moving_mean = 0.
moving_variance = 1.
renorm_mean = renorm_stddev = 0.
renorm_weight = 0.
with self.test_session(use_gpu=True) as sess:
sess.run(variables.global_variables_initializer())
for _ in range(5):
x = np.random.random(shape)
mean = x.mean(0)
stddev = np.sqrt(x.var(0) + epsilon)
adj_mean = renorm_mean + (1. - renorm_weight) * mean
adj_stddev = renorm_stddev + (1. - renorm_weight) * stddev
r = (stddev / adj_stddev).clip(rmin, rmax)
d = ((mean - adj_mean) / adj_stddev).clip(-dmax, dmax)
y_train = ((x - mean) / stddev * r + d) * gamma + beta
renorm_mean += (mean - renorm_mean) * (1. - renorm_momentum)
renorm_stddev += (stddev - renorm_stddev) * (1. - renorm_momentum)
renorm_weight += (1. - renorm_weight) * (1. - renorm_momentum)
moving_mean += (renorm_mean / renorm_weight -
moving_mean) * (1. - momentum)
moving_variance += ((renorm_stddev / renorm_weight) ** 2 - epsilon -
moving_variance) * (1. - momentum)
y_test = ((x - moving_mean) / (moving_variance + epsilon) ** 0.5 *
gamma) + beta
yt_val_train, _, _ = sess.run([yt] + bn.updates,
feed_dict={xt: x, training: True})
yt_val_test, _, _ = sess.run([yt] + bn.updates,
feed_dict={xt: x, training: False})
self.assertAllClose(y_train, yt_val_train, atol=1e-5)
self.assertAllClose(y_test, yt_val_test, atol=1e-5)
if __name__ == '__main__':
test.main()
|
|
import sys
import os
import logging
import pdb
import code
from time import sleep
from java.io import BufferedWriter, OutputStreamWriter, FileOutputStream
import anyjson
import fbconsole
from os import path
from utils import ignore_exception, filter_dict
REPL_DEBUG = True
SOCKET_PATH = "/tmp/spade"
CURDIR = path.split(path.abspath(__file__))[0]
DUMPDIR = path.abspath( path.join(CURDIR, "../", "userdata/") )
CREATE_NODES_FOR_NONFRIENDS = False
if not os.path.exists(DUMPDIR):
os.makedirs(DUMPDIR)
logging.basicConfig(level=logging.DEBUG)
FB_ACTIVITY_TYPES = "status photo comment link".split()
SPADE_FB_PROCESSES = FB_ACTIVITY_TYPES + "likes timeline friendship".split()
class FBDownloader:
"""
Downloads data from Facebook and saves it temporarily
"""
def __init__(self, dump_path):
fbconsole.AUTH_SCOPE = ['user_friends', 'read_stream', 'friends_actions.news', 'friends_activities', 'friends_photos']
fbconsole.authenticate()
self.fb = fbconsole
self.dump_path = dump_path
self.logger = logging.getLogger(self.__class__.__name__)
def download(self):
userdata, userfriends, usernewsfeed = self.get_user_data()
self._save_data("me_data", userdata)
self._save_data("me_friends", userfriends)
self._save_data("me_newsfeed", usernewsfeed)
for friend in userfriends:
friend_id = friend['id']
try:
friend_info, friend_friends, friend_feed = self.get_person_data(friend_id)
self._save_data("%s_info" % friend_id, friend_info)
self._save_data("%s_friends" % friend_id, friend_friends)
self._save_data("%s_feed" % friend_id, friend_feed)
except Exception, e:
logger.error("Error while fetching %s (%s)'s' data: %s" % (friend['name'], friend['id'], e.message))
def get_user_data(self):
"""
Gets userinfo, friends and home newsfeed
"""
try:
fb = self.fb
userinfo = fb.get("/me/")
res = fb.get("/me/friends/")
friends = res['data']
newsfeed = fb.get("/me/home/")
return (userinfo, friends, newsfeed)
except Exception, e:
pdb.set_trace()
raise e
def get_person_data(self, fuid):
"""
Gets data of a person,
returns userinfo and newsfeed
"""
try:
fb = self.fb
userinfo = fb.get("/%s" % fuid)
friends = [] # fb.get("/%s/friends" % fuid )
feed = fb.get("/%s/feed" % fuid)['data']
return userinfo, friends, feed
except Exception, e:
if REPL_DEBUG:
pdb.set_trace()
raise e
def _save_data(self, filename, jsondata):
"""
Dumps json data in a file
"""
if not filename.endswith(".json"):
filename += ".json"
if not filename.startswith(self.dump_path):
filename = path.join(self.dump_path, filename)
if type(jsondata) not in [str, unicode]:
jsondata = anyjson.serialize(jsondata)
print ("File: %s" % filename)
f = open(filename, "w")
f.write(jsondata)
f.close()
class IDMapper:
""" Creates and maintains numerics SPADE IDs against Facebook's semi-numeric IDs
Tests
>>> mapper = IDMapper()
>>> mapper[None]
None
>>> mapper["hello"]
0
>>> mapper["world"]
1
>>> mapper[1337]
2
>>> mapper["world"]
1
>>> mapper[1337]
2
>>> mapper[None]
None
"""
def __init__(self):
self._next_id = 0
self._mapping = {}
def __getitem__(self, itemid):
if itemid is None:
return None
if self._mapping.has_key(itemid):
return self._mapping[itemid]
else:
ret = self._mapping[itemid] = self._next_id
self._next_id += 1
return ret
id_mapper = IDMapper()
class DSLSerializable:
""" Used to represent a Node or Edge and serialize it for SPADE DSL Reporter"""
def __init__(self, stype, fb_obj_id=None):
self.attrs = {}
self.stype = stype
self.fb_obj_id = fb_obj_id
def _keyval_serialize(self, k,v):
global id_mapper
if k in ['from', 'to', 'id']:
v = id_mapper[v]
esc = self._escape_data
return (esc(k), esc(unicode(v)) )
def serialize(self):
""" Returns a serialized verion of the data """
global id_mapper
esc = self._escape_data
attrdata = " ".join( "%s:%s" % self._keyval_serialize(k,v) for k,v in self.attrs.iteritems() if k not in ['id'])
if self.fb_obj_id:
return "type:%s id:%s %s\n" % (esc(self.stype), id_mapper[self.fb_obj_id], attrdata)
else:
return "type:%s %s\n" % (esc(self.stype), attrdata)
def add_attr(self, key, val):
remap = {'type': 'fbtype', 'id': 'fbid', 'actions': 'fbactions'}
if remap.has_key(key):
key = remap[key]
self.attrs[ key ] = val
def add_attrs(self, attrs):
for k,v in attrs.iteritems():
self.add_attr(k, v)
def _escape_data(self, string):
"""
Escapes the data for sending to SPADE
"""
try:
if type(string) not in [str, unicode]:
string = unicode(string)
return string.replace(" ", r"\ ").replace('"', r'\"').replace("'", r"\'").replace("\n", r"\n")
except Exception, e:
if REPL_DEBUG:
import code
code.interact(local=locals())
raise e
class SPADEFeeder:
def __init__(self, dump_path, dsl_pipe):
self.dump_path = dump_path
if not os.path.exists(dsl_pipe):
raise Exception("""
The path to pipe for DSL reporter %s does not exists.
Make sure SPADE is running and DSL reporter has been setup.
For more infromation, take a look at http://code.google.com/p/data-provenance/wiki/Pipe""")
self.pipe = BufferedWriter( OutputStreamWriter(FileOutputStream( dsl_pipe ) ))
self.logger = logging.getLogger(self.__class__.__name__)
self.user_data = self._read_json_data("me_data")
self.user_newsfeed = self._read_json_data("me_newsfeed")
self.friends = dict()
self.created_user_nodes = set()
for f in self._read_json_data("me_friends"):
fuid = f['id']
try:
self.friends[fuid] = self._read_json_data("%s_info" % fuid)
except IOError, e:
logger.info("Skipping data for friend %s; unable to read data" % f['name'])
self.friends[fuid] = f
self.friends[self.user_data['id']] = self.user_data
def create_person_node_if_not_exists(self, user_id, userdata):
"""
Creates a person's Agent node, Status, Likes, Comment, Timeline and Post process nodes
"""
try:
if user_id in self.created_user_nodes:
return False
person_node = DSLSerializable("Agent", user_id + ".agent")
person_node.add_attrs(userdata)
self.write_dsl(person_node)
for process in SPADE_FB_PROCESSES:
node_id = user_id + "." + process;
node = DSLSerializable("Process", node_id)
node.add_attr("name", process)
node.add_attr("fbuid", user_id)
node.add_attr("fbname", userdata.get("name", "[None]"))
self.write_dsl(node)
edge = DSLSerializable("WasControlledBy")
edge.add_attr("to", user_id + ".agent")
edge.add_attr("from", node_id)
self.write_dsl(edge)
self.created_user_nodes.add(user_id)
return True
except Exception, e:
logger.exception("Unable to create person node")
if REPL_DEBUG:
import code
code.interact(local=locals())
return False
def write_dsl(self, serializable):
""" Takes a DSLSeralizable object and writes it to SPADE """
data = serializable.serialize()
logger.info(data)
self.pipe.write(data)
def feed(self):
"""
Sends the read data to SPADE
"""
# Create nodes for all users
me_fbuid = self.user_data['id']
self.create_person_node_if_not_exists(me_fbuid, self.user_data)
for fuid, userdata in self.friends.iteritems():
try:
self.create_person_node_if_not_exists(fuid, userdata)
# Friendship edges
edge = DSLSerializable("WasTriggeredBy")
edge.add_attr("from", fuid+".friendship")
edge.add_attr("to", me_fbuid+".friendship")
self.write_dsl(edge)
edge = DSLSerializable("WasTriggeredBy")
edge.add_attr("to", fuid+".friendship")
edge.add_attr("from", me_fbuid+".friendship")
self.write_dsl(edge)
except Exception, e:
logger.exception("Error while creating user node")
if REPL_DEBUG:
import code
code.interact(local=locals())
# For each person
for fuid, userdata in self.friends.iteritems():
pass
try:
# Create friendship edges
userfeed = self._read_json_data("%s_feed" % fuid)
logger.info("Now processing feed of user %s" % fuid)
# For each activity
for activity in userfeed:
try:
# Create Node
node = DSLSerializable("Artifact", activity['id'])
node.add_attr("time", activity['created_time'])
node.add_attrs( filter_dict(activity, ['likes', 'shares', 'to', 'from', 'created_time', 'comments']) )
self.write_dsl(node)
if activity.get("from"):
post_from = activity["from"]["id"]
self.create_person_node_if_not_exists(activity["from"]["id"], activity["from"])
else:
post_from = fuid
if activity.get("to"):
post_to = [i["id"] for i in activity["to"]["data"] ]
for i in activity["to"]["data"]:
self.create_person_node_if_not_exists(i["id"], i)
else:
post_to = [fuid]
# TODO: Handle shares separately
if activity.get("type") in FB_ACTIVITY_TYPES:
activity_type = activity.get("type")
else:
default_activity = "status"
activity_type = activity.get("type", default_activity)
logger.warn( "Uknown FB Activity type: %s. Resorting to %s" % ( str(activity.get("type")), default_activity) )
edge = DSLSerializable("WasGeneratedBy")
edge.add_attr("from", post_from + "." + activity_type)
edge.add_attr("to", activity['id'])
self.write_dsl(edge)
for i in post_to:
edge = DSLSerializable("Used")
edge.add_attr("from", i + ".timeline")
edge.add_attr("to", activity['id'])
self.write_dsl(edge)
# Handle post likes
if activity.has_key("likes"):
# TODO: Handle pagination for large number of likes on a post
for like in activity['likes']['data']:
if CREATE_NODES_FOR_NONFRIENDS:
self.create_person_node_if_not_exists(like['id'], like)
if CREATE_NODES_FOR_NONFRIENDS or self.friends.has_key(like['id']):
edge = DSLSerializable("Used")
edge.add_attr("from", like['id'] + ".likes")
edge.add_attr("to", activity['id'])
self.write_dsl(edge)
# Handle post comments
if activity.has_key("comments"):
for comment in activity['comments']['data']:
commenter = comment['from']
if CREATE_NODES_FOR_NONFRIENDS:
self.create_person_node_if_not_exists(commenter['id'], commenter)
if CREATE_NODES_FOR_NONFRIENDS or self.friends.has_key(like['id']):
comment_node = DSLSerializable("Artifact", comment['id'])
comment_node.add_attrs( filter_dict(comment, ['from','id', 'to', 'likes']) )
comment_node.add_attr("type", "comment")
self.write_dsl(comment_node)
edge = DSLSerializable("WasGeneratedBy")
edge.add_attr("from", comment['id'])
edge.add_attr("to", commenter['id'] + ".process")
self.write_dsl(comment_node)
edge = DSLSerializable("WasDerivedFrom")
edge.add_attr("from", comment['id'])
edge.add_attr("to", activity['id'])
self.write_dsl(comment_node)
# TODO: Handle Facebook shares in one collapsed node?
except Exception, e:
logger.error(e.message)
if REPL_DEBUG:
import code; code.interact(local=locals())
except IOError, e:
logger.info("Unavailable feed details of %s. Skipping ... " % fuid)
except Exception, e:
logger.exception("")
if REPL_DEBUG:
import code; code.interact(local=locals())
finally:
try:
self.pipe.flush()
except Exception, e:
pass
def _read_json_data(self, filename):
if not filename.endswith(".json"):
filename += ".json"
if not filename.startswith(self.dump_path):
filename = path.join(self.dump_path, filename)
f = open(filename, "r")
data = f.read()
f.close()
return anyjson.deserialize(data)
if __name__ == '__main__':
logger = logging.getLogger("SPADE-FB")
if "--test" in sys.argv:
import doctest
doctest.testmod()
sys.exit(0)
if "--fetch" in sys.argv:
fbsaver = FBDownloader(DUMPDIR)
fbsaver.download()
if "--nofeed" not in sys.argv:
logger.debug("Setting up Feeder for %s" % SOCKET_PATH)
feeder = SPADEFeeder(DUMPDIR, SOCKET_PATH)
logger.info("Feeder initialized. Now feeding")
feeder.feed()
logger.info("Fed data to SPADE!")
sleep(1)
if "--post-introspect" in sys.argv:
# drop an interactive shell for tinkering around
logger.info("Dropping to shell for introspection after execution")
code.interact(local=locals())
|
|
import hashlib
import functools
import itertools
import re
import uuid
from contextlib import contextmanager
from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache
from django.core.cache import cache, caches, _create_cache
from django.utils import encoding, translation
from django.conf import settings
def make_key(key, with_locale=True, normalize=False):
"""Generate the full key for ``k``, with a prefix."""
if with_locale:
key = u'{key}:{lang}'.format(
key=key, lang=translation.get_language())
if normalize:
return hashlib.md5(encoding.smart_bytes(key)).hexdigest()
return encoding.smart_bytes(key)
def cache_get_or_set(key, default, timeout=DEFAULT_TIMEOUT, version=None):
"""
Fetch a given key from the cache. If the key does not exist,
the key is added and set to the default value. The default value can
also be any callable. If timeout is given, that timeout will be used
for the key; otherwise the default cache timeout will be used.
Return the value of the key stored or retrieved.
Backport from Django 1.11.
"""
val = cache.get(key, version=version)
if val is None:
if callable(default):
default = default()
if default is not None:
cache.add(key, default, timeout=timeout, version=version)
# Fetch the value again to avoid a race condition if another
# caller added a value between the first get() and the add()
# above.
return cache.get(key, default, version=version)
return val
def memoize_key(prefix, *args, **kwargs):
"""
For a prefix and arguments returns a key suitable for use in memcache.
Used by memoize.
:param prefix: a prefix for the key in memcache
:type param: string
:param args: arguments to be str()'d to form the key
:type args: list
:param kwargs: arguments to be str()'d to form the key
:type kwargs: list
"""
key = hashlib.md5()
for arg in itertools.chain(args, sorted(kwargs.items())):
key.update(str(arg))
return '%s:memoize:%s:%s' % (settings.CACHE_PREFIX,
prefix, key.hexdigest())
def memoize(prefix, timeout=60):
"""
A simple decorator that caches into memcache, using a simple
key based on stringing args and kwargs.
Arguments to the method must be easily and consistently serializable
using str(..) otherwise the cache key will be inconsistent.
:param prefix: a prefix for the key in memcache
:type prefix: string
:param timeout: number of seconds to cache the key for, default 60 seconds
:type timeout: integer
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
def wrapped_func():
return func(*args, **kwargs)
key = memoize_key(prefix, *args, **kwargs)
return cache_get_or_set(key, wrapped_func, timeout=timeout)
return wrapper
return decorator
class Message(object):
"""
A simple class to store an item in memcache, given a key.
"""
def __init__(self, key):
self.key = '%s:message:%s' % (settings.CACHE_PREFIX, key)
def delete(self):
cache.delete(self.key)
def save(self, message, time=60 * 5):
cache.set(self.key, message, time)
def get(self, delete=False):
res = cache.get(self.key)
if delete:
cache.delete(self.key)
return res
class Token(object):
"""
A simple token stored in the cache.
"""
_well_formed = re.compile('^[a-z0-9-]+$')
def __init__(self, token=None, data=True):
if token is None:
token = str(uuid.uuid4())
self.token = token
self.data = data
def cache_key(self):
assert self.token, 'No token value set.'
return '%s:token:%s' % (settings.CACHE_PREFIX, self.token)
def save(self, time=60):
cache.set(self.cache_key(), self.data, time)
def well_formed(self):
return self._well_formed.match(self.token)
@classmethod
def valid(cls, key, data=True):
"""Checks that the token is valid."""
token = cls(key)
if not token.well_formed():
return False
result = cache.get(token.cache_key())
if result is not None:
return result == data
return False
@classmethod
def pop(cls, key, data=True):
"""Checks that the token is valid and deletes it."""
token = cls(key)
if not token.well_formed():
return False
result = cache.get(token.cache_key())
if result is not None:
if result == data:
cache.delete(token.cache_key())
return True
return False
class CacheStatTracker(BaseCache):
"""A small class used to track cache calls."""
requests_limit = 5000
def __init__(self, location, params):
# Do a .copy() dance to avoid modifying `OPTIONS` in the actual
# settings object.
options = params['OPTIONS'].copy()
actual_backend = options.pop('ACTUAL_BACKEND')
self._real_cache = _create_cache(actual_backend, **options)
self.requests_log = []
self._setup_proxies()
def __repr__(self):
return str("<CacheStatTracker for %s>") % repr(self._real_cache)
def __contains__(self, key):
return self._real_cache.__contains__(key)
def __getattr__(self, name):
return getattr(self._real_cache, name)
def _proxy(self, name):
def _real_proxy(*args, **kwargs):
self.requests_log.append({
'name': name,
'args': args,
'kwargs': kwargs,
})
return getattr(self._real_cache, name)(*args, **kwargs)
return _real_proxy
def _setup_proxies(self):
mappings = (
'add', 'get', 'set', 'delete', 'clear', 'has_key', 'incr', 'decr',
'get_many', 'set_many', 'delete_many')
for name in mappings:
setattr(self, name, self._proxy(name))
def clear_log(self):
self.requests_log = []
@contextmanager
def assert_cache_requests(num, alias='default'):
cache_using = caches[alias]
cache_using.clear_log()
yield
executed = len(cache_using.requests_log)
assert executed == num, "%d requests executed, %d expected" % (
executed, num)
|
|
import json
import csv
import sys
import uuid
if len(sys.argv) != 2:
print('Usage: ./makeJson.py <state>')
st = sys.argv[1]
outputFile = "../docs/data/data-%s.json" % st
linksFile = "../data/%s/weights-%s.csv" % (st, st)
senatorsFile = "../data/%s/senator-info-%s.json" % (st, st)
summaryFile = "../data/%s/summary-%s.csv" % (st, st)
maxWeight = 800
minWeight = 100
importance = 15
senatorIDs = {}
pBinFormat = "{0:.2f}"
wBinFormat = "{0:.2f}"
class Senator:
def __init__(self, name, ID, info, party, importance, parities):
self.name = name
self.ID = ID
self.info = info
self.party = party
self.importance = importance
self.parities = parities
def toJson(self):
return {"name": self.name, "id": self.ID, "info": self.info, "party": self.party, "importance": self.importance, "parities": self.parities}
class Link:
def __init__(self, source, target, weight, term):
self.source = source
self.target = target
self.weight = weight
self.term = term
def scaleWeight(self, minWeightUnscaled, maxWeightUnscaled, term):
if self.term == term:
oldRange = maxWeightUnscaled - minWeightUnscaled
newRange = maxWeight - minWeight
self.weight = (((1 - self.weight) * newRange) /
oldRange) + minWeight
def toJson(self):
return {"source": self.source, "target": self.target, "weight": self.weight, "term": self.term}
class Term:
def __init__(self, year, numDem, numRep, numInd, numUnkOth, pbins, wbins):
self.year = year
self.numDem = numDem
self.numRep = numRep
self.numInd = numInd
self.numUnkOth = numUnkOth
self.pbins = pbins
self.wbins = wbins
def toJson(self):
return {"year": self.year, "numDem": self.numDem, "numRep": self.numRep, "numInd": self.numInd, "numUnkOth": self.numUnkOth, "pbins": self.pbins, "wbins": self.wbins}
def getInfoStr(infoItems):
districtStr = "Unknown"
if "district" in infoItems and infoItems["district"] != 0:
districtStr = infoItems["district"]
return "District " + str(districtStr)
def getSenators():
senators = []
with open(senatorsFile, "r") as f:
senatorsData = json.load(f)
for senatorData in senatorsData:
senatorID = senatorData["id"]
if senatorID in senatorIDs:
print("ERROR: Duplicate ID found: " +
senatorID + ". Data must be reconciled.")
sys.exit()
senatorUUID = str(uuid.uuid4())
senatorIDs[senatorID] = senatorUUID
info = getInfoStr(senatorData["info"])
party = "Party Unknown"
if "party" in senatorData["info"]:
party = "D"
if senatorData["info"]["party"] == "Rep":
party = "R"
elif senatorData["info"]["party"] == "Ind":
party = "I"
senators.append(Senator(
senatorData["name"], senatorUUID, info, party, importance, senatorData["parities"]))
return senators
def getLinks():
links = []
with open(linksFile, "r") as f:
reader = csv.DictReader(f)
minWeightsUnscaled = {}
maxWeightsUnscaled = {}
for row in reader:
id1 = senatorIDs[row["Senator1"]]
id2 = senatorIDs[row["Senator2"]]
if id1 == None or id2 == None:
print(
"ERROR: Mismatch between senator info IDs and sentor link IDs. Data must be reconciled.")
sys.exit()
for key in row:
if key != "Senator1" and key != "Senator2":
term = key[6:]
weight = row[key]
if weight != "NA":
weight = float(weight)
if term not in minWeightsUnscaled:
minWeightsUnscaled[term] = weight
else:
minWeightsUnscaled[term] = min(
minWeightsUnscaled[term], weight)
if term not in maxWeightsUnscaled:
maxWeightsUnscaled[term] = weight
else:
maxWeightsUnscaled[term] = max(
maxWeightsUnscaled[term], weight)
links.append(Link(id1, id2, weight, term))
for term in minWeightsUnscaled:
for link in links:
link.scaleWeight(
minWeightsUnscaled[term], maxWeightsUnscaled[term], term)
return links
def getTerms():
terms = []
with open(summaryFile, "r") as f:
reader = csv.DictReader(f)
numPBins = 0
numWBins = 0
for field in reader.fieldnames:
if field.startswith("pbin"):
numPBins += 1
elif field.startswith("wbin"):
numWBins += 1
for row in reader:
pBins = {}
for i in range(1, numPBins + 1):
colName = "pbin" + str(i)
low = (i - 1) * float(1 / numPBins)
high = i * float(1 / numPBins)
newColName = pBinFormat.format(
low) + " - " + pBinFormat.format(high)
pBins[newColName] = int(row[colName])
wBins = {}
for i in range(1, numWBins + 1):
colName = "wbin" + str(i)
low = (i - 1) * float(1 / numWBins)
high = i * float(1 / numWBins)
newColName = wBinFormat.format(
low) + " - " + wBinFormat.format(high)
wBins[newColName] = int(row[colName])
terms.append(Term(row[""], int(row["Dem"]), int(
row["Rep"]), int(row["Ind"]), int(row["Unk"]), pBins, wBins))
return sorted(terms, key=lambda t: t.year, reverse=True)
def writeToJson(senators, links, terms):
data = {}
allNodes = []
allLinks = []
allTerms = []
for senator in senators:
allNodes.append(senator.toJson())
for link in links:
allLinks.append(link.toJson())
for term in terms:
allTerms.append(term.toJson())
data["terms"] = allTerms
data["nodes"] = allNodes
data["links"] = allLinks
with open(outputFile, "w+") as f:
f.write(json.dumps(data, indent=4))
def main():
senators = getSenators()
links = getLinks()
terms = getTerms()
writeToJson(senators, links, terms)
main()
|
|
"""
Copyright (C) 2013-2018 Calliope contributors listed in AUTHORS.
Licensed under the Apache 2.0 License (see LICENSE file).
plotting.py
~~~~~~~~~~~
Functionality to plot model data.
"""
import os
import re
import plotly.offline as pltly
import jinja2
from calliope.exceptions import warn
from calliope.analysis.plotting.capacity import plot_capacity
from calliope.analysis.plotting.timeseries import plot_timeseries
from calliope.analysis.plotting.transmission import plot_transmission
from calliope.analysis.plotting.flows import plot_flows
from calliope.analysis.plotting.util import type_of_script
def plot_summary(model, to_file=False, mapbox_access_token=None):
"""
Plot a summary containing timeseries, installed capacities, and
transmission plots. Returns a HTML string by default, returns None if
``to_file`` given (and saves the HTML string to file).
Parameters
----------
to_file : str, optional
Path to output file to save HTML to.
mapbox_access_token : str, optional
(passed to plot_transmission) If given and a valid Mapbox API
key, a Mapbox map is drawn for lat-lon coordinates, else
(by default), a more simple built-in map.
"""
subset = {'costs': ['monetary']}
timeseries = _plot(*plot_timeseries(model, subset=subset), html_only=True)
capacity = _plot(*plot_capacity(model, subset=subset), html_only=True)
if 'loc_coordinates' in model._model_data:
transmission = _plot(*plot_transmission(
model, html_only=True, mapbox_access_token=mapbox_access_token
), html_only=True)
else:
transmission = '<br><br><p>No location coordinates defined -<br>not plotting transmission.</p>'
template_path = os.path.join(
os.path.dirname(__file__), '..', '..', 'config', 'plots_template.html'
)
with open(template_path, 'r') as f:
html_template = jinja2.Template(f.read())
if 'solution_time' in model._model_data.attrs:
solution_time = model._model_data.attrs['solution_time'] / 60
time_finished = model._model_data.attrs['time_finished']
result_stats = 'taking {:.2f} minutes to solve, completed at {}'.format(
solution_time, time_finished
)
else:
result_stats = 'inputs only'
html = html_template.render(
model_name=model._model_data.attrs['model.name'],
calliope_version=model._model_data.attrs['calliope_version'],
result_stats=result_stats,
top=timeseries,
bottom_left=capacity,
bottom_right=transmission,
)
# Strip plotly-inserted style="..." attributes
html = re.sub(r'style=".+?"', '', html)
if to_file:
with open(to_file, 'w') as f:
f.write(html)
else:
return html
def _plot(
data, layout, html_only=False, to_file=False,
layout_updates=None, plotly_kwarg_updates=None,
# kwargs are included as they get passed through from the
# plotting accessor method, but are not actually used
**kwargs):
plotly_kwargs = dict(
show_link=False,
config={
'displaylogo': False,
'modeBarButtonsToRemove': ['sendDataToCloud'],
},
)
if type_of_script() == 'jupyter':
plotter = pltly.iplot
plotly_filename_key = 'filename'
pltly.init_notebook_mode(connected=False)
else:
plotter = pltly.plot
plotly_filename_key = 'image_filename'
plotly_kwargs['auto_open'] = True
plotly_kwargs['filename'] = 'temp_plot.html'
if layout_updates:
layout = {**layout, **layout_updates}
if plotly_kwarg_updates:
plotly_kwargs = {**plotly_kwargs, **plotly_kwarg_updates}
if to_file:
filename, image_type = to_file.rsplit('.', 1)
# Plotly can only save certain file types
if image_type not in ['png', 'jpeg', 'svg', 'webp']:
raise TypeError(
'Unable to save plot as `{}`, choose from '
'[`png`, `jpeg`, `svg`, `webp`]'.format(image_type)
)
if 'updatemenus' in layout:
raise ValueError('Unable to save multiple arrays to SVG, pick one array only')
else:
plotly_kwargs.update(image=image_type)
plotly_kwargs[plotly_filename_key] = filename
if data:
if html_only:
return pltly.plot(
{'data': data, 'layout': layout},
include_plotlyjs=False, output_type='div',
**plotly_kwargs
)
else:
plotter({'data': data, 'layout': layout}, **plotly_kwargs)
else:
raise ValueError('No data to plot.')
class ModelPlotMethods:
def __init__(self, model):
self._model = model
_docstring_additions = """
html_only : bool, optional; default = False
Returns a html string for embedding the plot in a webpage
to_file : False or str, optional; default = False
Will save plot to file with the given name and extension.
`to_file='plot.svg'` to save to SVG, `to_file='plot.png'` for
a static PNG image.
Allowed file extensions are: ['png', 'jpeg', 'svg', 'webp'].
layout_updates : dict, optional
The given dict will be merged with the Plotly layout dict
generated by the Calliope plotting function, overwriting keys
that already exist.
plotly_kwarg_updates : dict, optional
The given dict will be merged with the Plotly plot function's
keyword arguments generated by the Calliope plotting function,
overwriting keys that already exist.
"""
def check_optimality(self):
termination = self._model._model_data.attrs.get(
'termination_condition', 'did_not_yet_run')
if termination not in ['optimal', 'did_not_yet_run']:
warn('Model termination condition was not optimal. Plotting may fail!')
def timeseries(self, **kwargs):
self.check_optimality()
data, layout = plot_timeseries(self._model, **kwargs)
return _plot(data, layout, **kwargs)
timeseries.__doc__ = plot_timeseries.__doc__.rstrip() + _docstring_additions
def capacity(self, **kwargs):
self.check_optimality()
data, layout = plot_capacity(self._model, **kwargs)
return _plot(data, layout, **kwargs)
capacity.__doc__ = plot_capacity.__doc__.rstrip() + _docstring_additions
def transmission(self, **kwargs):
self.check_optimality()
data, layout = plot_transmission(self._model, **kwargs)
return _plot(data, layout, **kwargs)
transmission.__doc__ = plot_transmission.__doc__.rstrip() + _docstring_additions
def flows(self, **kwargs):
self.check_optimality()
data, layout = plot_flows(self._model, **kwargs)
return _plot(data, layout, **kwargs)
flows.__doc__ = plot_flows.__doc__.rstrip() + _docstring_additions
def summary(self, **kwargs):
self.check_optimality()
return plot_summary(self._model, **kwargs)
summary.__doc__ = plot_summary.__doc__
|
|
"""piddletest.py
This module puts the various PIDDLE backends through their paces.
"""
from rdkit.sping import pagesizes
from rdkit.sping.pid import *
# The original code imported letters, a more generic lisit. This is no longer supported.
from string import ascii_letters as LETTERS
import math
backends = ['PDF', 'PIL', 'TK', 'PS', 'SVG', 'WX'] # 'piddleAI','piddleQD','piddleGL' ]
backends.sort()
# ----------------------------------------------------------------------
# note, these tests do not flush() the canvas
# ----------------------------------------------------------------------
def minimal(canvasClass):
"""Just a very basic test of line drawing and canvas size."""
canvas = canvasClass(pagesizes.A6, "test-minimal") # A6 is a quarter page
drawMinimal(canvas)
return canvas
def drawMinimal(canvas):
# leave canvas state as you found it, restores state when leaves scope
saver = StateSaver(canvas)
size = canvas.size # (actual size *may* differ from requested size)
canvas.defaultLineColor = green
canvas.drawLine(1, 1, size[0] - 1, size[1] - 1)
canvas.drawLine(1, size[1] - 1, size[0] - 1, 1)
canvas.drawRect(1, 1, size[0] - 1, size[1] - 1, edgeWidth=5)
return canvas
# ----------------------------------------------------------------------
def basics(canvasClass):
"""A general test of most of the drawing primitives except images and strings."""
canvas = canvasClass((300, 300), "test-basics")
return drawBasics(canvas)
def drawBasics(canvas):
# leave canvas state as you found it, restores state when leaves scope
saver = StateSaver(canvas)
canvas.defaultLineColor = Color(0.7, 0.7, 1.0) # light blue
canvas.drawLines(map(lambda i: (i * 10, 0, i * 10, 300), range(30)))
canvas.drawLines(map(lambda i: (0, i * 10, 300, i * 10), range(30)))
canvas.defaultLineColor = black
canvas.drawLine(10, 200, 20, 190, color=red)
canvas.drawEllipse(130, 30, 200, 100, fillColor=yellow, edgeWidth=4)
canvas.drawArc(130, 30, 200, 100, 45, 50, fillColor=blue, edgeColor=navy, edgeWidth=4)
canvas.defaultLineWidth = 4
canvas.drawRoundRect(30, 30, 100, 100, fillColor=blue, edgeColor=maroon)
canvas.drawCurve(20, 20, 100, 50, 50, 100, 160, 160)
# canvas.drawString("This is a test!", 30,130, Font(face="times",size=16,bold=1),
# color=green, angle=-45)
polypoints = [(160, 120), (130, 190), (210, 145), (110, 145), (190, 190)]
canvas.drawPolygon(polypoints, fillColor=lime, edgeColor=red, edgeWidth=3, closed=1)
canvas.drawRect(200, 200, 260, 260, edgeColor=yellow, edgeWidth=5)
canvas.drawLine(200, 260, 260, 260, color=green, width=5)
canvas.drawLine(260, 200, 260, 260, color=red, width=5)
return canvas
# ----------------------------------------------------------------------
def advanced(canvasClass):
"""A test of figures and images."""
canvas = canvasClass((300, 300), "test-advanced")
return drawAdvanced(canvas)
def drawAdvanced(canvas):
# leave canvas state as you found it, restores state when leaves scope
saver = StateSaver(canvas)
figure = [
(figureCurve, 20, 20, 100, 50, 50, 100, 160, 160), (figureLine, 200, 200, 250, 150),
(figureArc, 50, 10, 250, 150, 10, 90)
]
canvas.drawFigure(figure, fillColor=yellow, edgeWidth=4)
try:
from PIL import Image
except ImportError:
canvas.drawString("PIL not available!", 20, 200)
Image = None
if Image:
img = Image.open("python.gif")
canvas.drawImage(img, 120, 50, 120 + 32, 50 + 64)
canvas.drawImage(img, 0, 210, 300, 210 + 32)
return canvas
# ----------------------------------------------------------------------
def bluefunc(x):
return 1.0 / (1.0 + math.exp(-10 * (x - 0.6)))
def redfunc(x):
return 1.0 / (1.0 + math.exp(10 * (x - 0.5)))
def greenfunc(x):
return 1 - pow(redfunc(x + 0.2), 2) - bluefunc(x - 0.3)
def spectrum(canvasClass):
canvas = canvasClass((300, 300), "test-spectrum")
return drawSpectrum(canvas)
def drawSpectrum(canvas):
"""Generates a spectrum plot; illustrates colors and useful application."""
saver = StateSaver(
canvas) # leave canvas state as you found it, restores state when leaves scope
def plot(f, canvas, offset=0):
for i in range(0, 100):
x = float(i) / 100
canvas.drawLine(i * 3 + offset, 250, i * 3 + offset, 250 - 100 * f(x))
def genColors(n=100):
out = [None] * n
for i in range(n):
x = float(i) / n
out[i] = Color(redfunc(x), greenfunc(x), bluefunc(x))
return out
colors = genColors(300)
# draw a black background for the spectrum
canvas.drawRect(0, 0, 300, 100, edgeColor=black, fillColor=black)
# draw the spectrum
for i in range(len(colors)):
canvas.drawLine(i, 20, i, 80, colors[i])
# plot the components of the spectrum
canvas.defaultLineColor = red
plot(redfunc, canvas)
canvas.defaultLineColor = blue
plot(bluefunc, canvas, 1)
canvas.defaultLineColor = green
plot(greenfunc, canvas, 2)
return canvas
# ----------------------------------------------------------------------
def strings(canvasClass):
canvas = canvasClass(size=(400, 400), name="test-strings")
return drawStrings(canvas)
def drawStrings(canvas):
"""Checks font metrics, and also illustrates the standard fonts."""
# leave canvas state as you found it, restores state when leaves scope
saver = StateSaver(canvas)
def Write(canvas, s, font, curs):
if font:
canvas.defaultFont = font
text = s
while text and text[-1] == '\n':
text = text[:-1]
canvas.drawString(text, x=curs[0], y=curs[1])
if s[-1] == '\n':
curs[0] = 10
curs[1] = curs[1] + canvas.fontHeight() + canvas.fontDescent()
else:
curs[0] = curs[0] + canvas.stringWidth(s)
def StandardFonts(canvas, Write):
canvas.defaultLineColor = black
curs = [10, 70]
for size in (12, 18):
for fontname in ("times", "courier", "helvetica", "symbol", "monospaced", "serif",
"sansserif"):
curs[0] = 10
curs[1] = curs[1] + size * 1.5
Write(canvas, "%s %d " % (fontname, size), Font(face=fontname, size=size), curs)
Write(canvas, "bold ", Font(face=fontname, size=size, bold=1), curs)
Write(canvas, "italic ", Font(face=fontname, size=size, italic=1), curs)
Write(canvas, "underline", Font(face=fontname, size=size, underline=1), curs)
CenterAndBox(canvas, "spam, spam, spam, baked beans, and spam!")
StandardFonts(canvas, Write)
return canvas
def CenterAndBox(canvas, s, cx=200, y=40):
"tests string positioning, stringWidth, fontAscent, and fontDescent"
canvas.drawLine(cx, y - 30, cx, y + 30, color=yellow)
w = canvas
w = canvas.stringWidth(s)
canvas.drawLine(cx - w / 2, y, cx + w / 2, y, color=red)
canvas.drawString(s, cx - w / 2, y)
canvas.defaultLineColor = Color(0.7, 0.7, 1.0) # light blue
canvas.drawLine(cx - w / 2, y - 20, cx - w / 2, y + 20) # left
canvas.drawLine(cx + w / 2, y - 20, cx + w / 2, y + 20) # right
asc, desc = canvas.fontAscent(), canvas.fontDescent()
canvas.drawLine(cx - w / 2 - 20, y - asc, cx + w / 2 + 20, y - asc) # top
canvas.drawLine(cx - w / 2 - 20, y + desc, cx + w / 2 + 20, y + desc) # bottom
# ----------------------------------------------------------------------
def rotstring(canvasClass):
canvas = canvasClass((450, 300), name='test-rotstring')
return drawRotstring(canvas)
def drawRotstring(canvas):
"""Draws rotated strings."""
saver = StateSaver(
canvas) # leave canvas state as you found it, restores state when leaves scope
canvas.defaultFont = Font(bold=1)
canvas.defaultLineColor = (blue + white) / 2
canvas.drawLine(0, 150, 300, 150)
canvas.drawLine(150, 0, 150, 300)
s = " __albatros at "
w = canvas.stringWidth(s)
canvas.drawEllipse(150 - w, 150 - w, 150 + w, 150 + w, fillColor=transparent)
colors = [red, orange, yellow, green, blue, purple]
cnum = 0
for ang in range(0, 359, 30):
canvas.defaultLineColor = colors[cnum]
s2 = s + str(ang)
canvas.drawString(s2, 150, 150, angle=ang)
cnum = (cnum + 1) % len(colors)
canvas.drawString("This is a\nrotated\nmulti-line string!!!", 350, 100, angle=-90,
font=Font(underline=1))
#canvas.drawString( "This is a\nrotated\nmulti-line string!!!", 400, 175, angle= -45, font=Font(underline=1) )
return canvas
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
def tkTest(testfunc):
# TK tests are called from here because need TK's event loop
try:
import sping.TK
import Tkinter
except ImportError:
print("A module needed for sping.TK is not available, select another backend")
return
root = Tkinter.Tk()
frame = Tkinter.Frame(root) # label='piddletestTK'
# try new Tk canvas
tkcanvas = sping.TK.TKCanvas(size=(400, 400), name='sping-testTK', master=frame)
bframe = Tkinter.Frame(root)
minimalB = Tkinter.Button(
bframe, text='minimal test',
command=lambda c=tkcanvas: (c.clear(), drawMinimal(c), c.flush())).pack(side=Tkinter.LEFT)
basicB = Tkinter.Button(
bframe, text='basic test',
command=lambda c=tkcanvas: (c.clear(), drawBasics(c), c.flush())).pack(side=Tkinter.LEFT)
spectB = Tkinter.Button(
bframe, text='spectrum test',
command=lambda c=tkcanvas: (c.clear(), drawSpectrum(c), c.flush())).pack(side=Tkinter.LEFT)
stringsB = Tkinter.Button(
bframe, text='strings test',
command=lambda c=tkcanvas: (c.clear(), drawStrings(c), c.flush())).pack(side=Tkinter.LEFT)
rotstrB = Tkinter.Button(
bframe, text='rotated strings test',
command=lambda c=tkcanvas: (c.clear(), drawRotstring(c), c.flush())).pack(side=Tkinter.LEFT)
advancedB = Tkinter.Button(
bframe, text='advanced test',
command=lambda c=tkcanvas: (c.clear(), drawAdvanced(c), c.flush())).pack(side=Tkinter.LEFT)
bframe.pack(side=Tkinter.TOP)
tkcanvas.pack()
frame.pack()
# try to draw before running mainloop
if testfunc == minimal:
drawMinimal(tkcanvas)
elif testfunc == basics:
drawBasics(tkcanvas)
elif testfunc == advanced:
drawAdvanced(tkcanvas)
elif testfunc == spectrum:
drawSpectrum(tkcanvas)
elif testfunc == strings:
drawStrings(tkcanvas)
elif testfunc == rotstring:
drawRotstring(tkcanvas)
else:
print("Illegal testfunc handed to tkTest")
raise ValueError("Unsupported testfunc")
tkcanvas.flush()
root.mainloop()
# ----------------------------------------------------------------------
def wxTest(testfunc):
try:
import sping.WX
from wxPython.wx import wxApp
except ImportError:
print("A module needed for sping.WX is not available, select another backend")
return
global wx_app
if 'wx_app' not in globals():
class CanvasApp(wxApp):
"The wxApp that runs canvas. Initializes windows, and handles redrawing"
def OnInit(self):
return 1
wx_app = CanvasApp(0)
# run the test, passing the canvas class and returning the canvas
canvas = testfunc(sping.WX.WXCanvas)
canvas.flush()
# Run the main loop
wx_app.MainLoop()
def runtest(backend, testfunc):
# special cases:
if backend == 'TK':
tkTest(testfunc) # takes care of import, etc.
return
if backend == 'WX':
wxTest(testfunc) # takes care of import, etc.
return
# import the relevant module
modname = 'sping.' + backend
print("importing ", modname)
module = __import__('sping.' + backend, globals(), locals(), [backend])
# figure out the canvas class name (e.g., "PILCanvas") and get that
#canvasClass = getattr(module, backend[6:]+"Canvas")
#canvasClass = getattr(module, backend+"Canvas")
# from spam.ham import eggs
# __import('spam.ham', 'globals(), local
canvasClass = getattr(module, backend + "Canvas")
# run the test, passing the canvas class and returning the canvas
canvas = testfunc(canvasClass)
# do post-test cleanup
canvas.flush()
# handle save's here
if backend == 'PIL':
# I'm saving twice because sometimes jpeg doesn't work when png does
canvas.save(file=canvas.name + ".jpg") # save as a jpeg file
canvas.save(format='png') # save as a png file
elif backend == 'piddleVCR':
filename = canvas.name + ".vcr"
canvas.save(filename)
print(filename, "saved")
else: # if backend == 'PS' or backend== 'PDF':
canvas.save() # should be "pass'ed" by Canvas's that don't use save
def mainLoop():
global tests, backends
backend = None
test = None
while 1:
# print backends on left, tests on right, indicate chosen one of each
i = 0
while i < len(backends) or i < len(tests):
try:
bstr = str(i + 1) + '. ' + backends[i]
except Exception:
bstr = ''
try:
tstr = chr(65 + i) + '. ' + tests[i].__name__
except Exception:
tstr = ''
if i == backend:
bflag = '==>'
else:
bflag = ''
if i == test:
tflag = '==>'
else:
tflag = ''
print("%10s %-20s %10s %-20s" % (bflag, bstr, tflag, tstr))
i = i + 1
print()
inp = input("Selection (0 to exit): ")
print()
if inp == '0':
return
if inp:
testinp = ''
if inp[-1] in LETTERS:
testinp = inp[-1]
elif inp[0] in LETTERS:
testinp = inp[0]
backinp = ' '.join(filter(lambda x: x in '0123456789', inp))
if backinp:
backend = int(backinp) - 1
if backend < len(backends):
docstr = __import__('sping.' + backends[backend], globals(), locals(),
backends[backend]).__doc__
#docstr = __import__('sping.'+backends[backend]).__doc__
if docstr:
print(docstr)
else:
print("<no doc string>")
else:
backend = None
if testinp:
test = ord(testinp[0].upper()) - ord('A')
if test >= 0 and test < len(tests):
docstr = tests[test].__doc__
if docstr:
print(docstr)
else:
test = None
print
# now, if we have a valid backend and test, run it
if backend is not None and test is not None:
runtest(backends[backend], tests[test])
tests = (minimal, basics, advanced, spectrum, strings, rotstring)
if __name__ == '__main__':
mainLoop()
|
|
import numpy as np
import tensorflow as tf
from tensorflow.contrib import rnn
def _print_success_message():
print('Tests Passed')
def test_create_lookup_tables(create_lookup_tables):
with tf.Graph().as_default():
test_text = '''
Moe_Szyslak Moe's Tavern Where the elite meet to drink
Bart_Simpson Eh yeah hello is Mike there Last name Rotch
Moe_Szyslak Hold on I'll check Mike Rotch Mike Rotch Hey has anybody seen Mike Rotch lately
Moe_Szyslak Listen you little puke One of these days I'm gonna catch you and I'm gonna carve my name on your back with an ice pick
Moe_Szyslak Whats the matter Homer You're not your normal effervescent self
Homer_Simpson I got my problems Moe Give me another one
Moe_Szyslak Homer hey you should not drink to forget your problems
Barney_Gumble Yeah you should only drink to enhance your social skills'''
test_text = test_text.lower()
test_text = test_text.split()
vocab_to_int, int_to_vocab = create_lookup_tables(test_text)
# Check types
assert isinstance(vocab_to_int, dict),\
'vocab_to_int is not a dictionary.'
assert isinstance(int_to_vocab, dict),\
'int_to_vocab is not a dictionary.'
# Compare lengths of dicts
assert len(vocab_to_int) == len(int_to_vocab),\
'Length of vocab_to_int and int_to_vocab don\'t match. ' \
'vocab_to_int is length {}. int_to_vocab is length {}'.format(len(vocab_to_int), len(int_to_vocab))
# Make sure the dicts have the same words
vocab_to_int_word_set = set(vocab_to_int.keys())
int_to_vocab_word_set = set(int_to_vocab.values())
assert not (vocab_to_int_word_set - int_to_vocab_word_set),\
'vocab_to_int and int_to_vocab don\'t have the same words.' \
'{} found in vocab_to_int, but not in int_to_vocab'.format(vocab_to_int_word_set - int_to_vocab_word_set)
assert not (int_to_vocab_word_set - vocab_to_int_word_set),\
'vocab_to_int and int_to_vocab don\'t have the same words.' \
'{} found in int_to_vocab, but not in vocab_to_int'.format(int_to_vocab_word_set - vocab_to_int_word_set)
# Make sure the dicts have the same word ids
vocab_to_int_word_id_set = set(vocab_to_int.values())
int_to_vocab_word_id_set = set(int_to_vocab.keys())
assert not (vocab_to_int_word_id_set - int_to_vocab_word_id_set),\
'vocab_to_int and int_to_vocab don\'t contain the same word ids.' \
'{} found in vocab_to_int, but not in int_to_vocab'.format(vocab_to_int_word_id_set - int_to_vocab_word_id_set)
assert not (int_to_vocab_word_id_set - vocab_to_int_word_id_set),\
'vocab_to_int and int_to_vocab don\'t contain the same word ids.' \
'{} found in int_to_vocab, but not in vocab_to_int'.format(int_to_vocab_word_id_set - vocab_to_int_word_id_set)
# Make sure the dicts make the same lookup
missmatches = [(word, id, id, int_to_vocab[id]) for word, id in vocab_to_int.items() if int_to_vocab[id] != word]
assert not missmatches,\
'Found {} missmatche(s). First missmatch: vocab_to_int[{}] = {} and int_to_vocab[{}] = {}'.format(
len(missmatches),
*missmatches[0])
assert len(vocab_to_int) > len(set(test_text))/2,\
'The length of vocab seems too small. Found a length of {}'.format(len(vocab_to_int))
_print_success_message()
def test_get_batches(get_batches):
with tf.Graph().as_default():
test_batch_size = 128
test_seq_length = 5
test_int_text = list(range(1000*test_seq_length))
batches = get_batches(test_int_text, test_batch_size, test_seq_length)
# Check type
assert isinstance(batches, np.ndarray),\
'Batches is not a Numpy array'
# Check shape
assert batches.shape == (7, 2, 128, 5),\
'Batches returned wrong shape. Found {}'.format(batches.shape)
for x in range(batches.shape[2]):
assert np.array_equal(batches[0,0,x], np.array(range(x * 35, x * 35 + batches.shape[3]))),\
'Batches returned wrong contents. For example, input sequence {} in the first batch was {}'.format(x, batches[0,0,x])
assert np.array_equal(batches[0,1,x], np.array(range(x * 35 + 1, x * 35 + 1 + batches.shape[3]))),\
'Batches returned wrong contents. For example, target sequence {} in the first batch was {}'.format(x, batches[0,1,x])
last_seq_target = (test_batch_size-1) * 35 + 31
last_seq = np.array(range(last_seq_target, last_seq_target+ batches.shape[3]))
last_seq[-1] = batches[0,0,0,0]
assert np.array_equal(batches[-1,1,-1], last_seq),\
'The last target of the last batch should be the first input of the first batch. Found {} but expected {}'.format(batches[-1,1,-1], last_seq)
_print_success_message()
def test_tokenize(token_lookup):
with tf.Graph().as_default():
symbols = set(['.', ',', '"', ';', '!', '?', '(', ')', '--', '\n'])
token_dict = token_lookup()
# Check type
assert isinstance(token_dict, dict), \
'Returned type is {}.'.format(type(token_dict))
# Check symbols
missing_symbols = symbols - set(token_dict.keys())
unknown_symbols = set(token_dict.keys()) - symbols
assert not missing_symbols, \
'Missing symbols: {}'.format(missing_symbols)
assert not unknown_symbols, \
'Unknown symbols: {}'.format(unknown_symbols)
# Check values type
bad_value_type = [type(val) for val in token_dict.values() if not isinstance(val, str)]
assert not bad_value_type,\
'Found token as {} type.'.format(bad_value_type[0])
# Check for spaces
key_has_spaces = [k for k in token_dict.keys() if ' ' in k]
val_has_spaces = [val for val in token_dict.values() if ' ' in val]
assert not key_has_spaces,\
'The key "{}" includes spaces. Remove spaces from keys and values'.format(key_has_spaces[0])
assert not val_has_spaces,\
'The value "{}" includes spaces. Remove spaces from keys and values'.format(val_has_spaces[0])
# Check for symbols in values
symbol_val = ()
for symbol in symbols:
for val in token_dict.values():
if symbol in val:
symbol_val = (symbol, val)
assert not symbol_val,\
'Don\'t use a symbol that will be replaced in your tokens. Found the symbol {} in value {}'.format(*symbol_val)
_print_success_message()
def test_get_inputs(get_inputs):
with tf.Graph().as_default():
input_data, targets, lr = get_inputs()
# Check type
assert input_data.op.type == 'Placeholder',\
'Input not a Placeholder.'
assert targets.op.type == 'Placeholder',\
'Targets not a Placeholder.'
assert lr.op.type == 'Placeholder',\
'Learning Rate not a Placeholder.'
# Check name
assert input_data.name == 'input:0',\
'Input has bad name. Found name {}'.format(input_data.name)
# Check rank
input_rank = 0 if input_data.get_shape() == None else len(input_data.get_shape())
targets_rank = 0 if targets.get_shape() == None else len(targets.get_shape())
lr_rank = 0 if lr.get_shape() == None else len(lr.get_shape())
assert input_rank == 2,\
'Input has wrong rank. Rank {} found.'.format(input_rank)
assert targets_rank == 2,\
'Targets has wrong rank. Rank {} found.'.format(targets_rank)
assert lr_rank == 0,\
'Learning Rate has wrong rank. Rank {} found'.format(lr_rank)
_print_success_message()
def test_get_init_cell(get_init_cell):
with tf.Graph().as_default():
test_batch_size_ph = tf.placeholder(tf.int32)
test_rnn_size = 256
cell, init_state = get_init_cell(test_batch_size_ph, test_rnn_size)
# Check type
assert isinstance(cell, tf.contrib.rnn.MultiRNNCell),\
'Cell is wrong type. Found {} type'.format(type(cell))
# Check for name attribute
assert hasattr(init_state, 'name'),\
'Initial state doesn\'t have the "name" attribute. Try using `tf.identity` to set the name.'
# Check name
assert init_state.name == 'initial_state:0',\
'Initial state doesn\'t have the correct name. Found the name {}'.format(init_state.name)
_print_success_message()
def test_get_embed(get_embed):
with tf.Graph().as_default():
embed_shape = [50, 5, 256]
test_input_data = tf.placeholder(tf.int32, embed_shape[:2])
test_vocab_size = 27
test_embed_dim = embed_shape[2]
embed = get_embed(test_input_data, test_vocab_size, test_embed_dim)
# Check shape
assert embed.shape == embed_shape,\
'Wrong shape. Found shape {}'.format(embed.shape)
_print_success_message()
def test_build_rnn(build_rnn):
with tf.Graph().as_default():
test_rnn_size = 256
test_rnn_layer_size = 2
test_cell = rnn.MultiRNNCell([rnn.BasicLSTMCell(test_rnn_size) for _ in range(test_rnn_layer_size)])
test_inputs = tf.placeholder(tf.float32, [None, None, test_rnn_size])
outputs, final_state = build_rnn(test_cell, test_inputs)
# Check name
assert hasattr(final_state, 'name'),\
'Final state doesn\'t have the "name" attribute. Try using `tf.identity` to set the name.'
assert final_state.name == 'final_state:0',\
'Final state doesn\'t have the correct name. Found the name {}'.format(final_state.name)
# Check shape
assert outputs.get_shape().as_list() == [None, None, test_rnn_size],\
'Outputs has wrong shape. Found shape {}'.format(outputs.get_shape())
assert final_state.get_shape().as_list() == [test_rnn_layer_size, 2, None, test_rnn_size],\
'Final state wrong shape. Found shape {}'.format(final_state.get_shape())
_print_success_message()
def test_build_nn(build_nn):
with tf.Graph().as_default():
test_input_data_shape = [128, 5]
test_input_data = tf.placeholder(tf.int32, test_input_data_shape)
test_rnn_size = 256
test_rnn_layer_size = 2
test_vocab_size = 27
test_cell = rnn.MultiRNNCell([rnn.BasicLSTMCell(test_rnn_size) for _ in range(test_rnn_layer_size)])
logits, final_state = build_nn(test_cell, test_rnn_size, test_input_data, test_vocab_size)
# Check name
assert hasattr(final_state, 'name'), \
'Final state doesn\'t have the "name" attribute. Are you using build_rnn?'
assert final_state.name == 'final_state:0', \
'Final state doesn\'t have the correct name. Found the name {}. Are you using build_rnn?'.format(final_state.name)
# Check Shape
assert logits.get_shape().as_list() == test_input_data_shape + [test_vocab_size], \
'Outputs has wrong shape. Found shape {}'.format(logits.get_shape())
assert final_state.get_shape().as_list() == [test_rnn_layer_size, 2, None, test_rnn_size], \
'Final state wrong shape. Found shape {}'.format(final_state.get_shape())
_print_success_message()
def test_get_tensors(get_tensors):
test_graph = tf.Graph()
with test_graph.as_default():
test_input = tf.placeholder(tf.int32, name='input')
test_initial_state = tf.placeholder(tf.int32, name='initial_state')
test_final_state = tf.placeholder(tf.int32, name='final_state')
test_probs = tf.placeholder(tf.float32, name='probs')
input_text, initial_state, final_state, probs = get_tensors(test_graph)
# Check correct tensor
assert input_text == test_input,\
'Test input is wrong tensor'
assert initial_state == test_initial_state, \
'Initial state is wrong tensor'
assert final_state == test_final_state, \
'Final state is wrong tensor'
assert probs == test_probs, \
'Probabilities is wrong tensor'
_print_success_message()
def test_pick_word(pick_word):
with tf.Graph().as_default():
test_probabilities = np.array([0.1, 0.8, 0.05, 0.05])
test_int_to_vocab = {word_i: word for word_i, word in enumerate(['this', 'is', 'a', 'test'])}
pred_word = pick_word(test_probabilities, test_int_to_vocab)
# Check type
assert isinstance(pred_word, str),\
'Predicted word is wrong type. Found {} type.'.format(type(pred_word))
# Check word is from vocab
assert pred_word in test_int_to_vocab.values(),\
'Predicted word not found in int_to_vocab.'
_print_success_message()
|
|
"""
Compiler-side implementation of the Numba typed-list.
"""
import operator
from enum import IntEnum
from llvmlite import ir
from numba.core.extending import (
overload,
overload_method,
overload_attribute,
register_jitable,
intrinsic,
register_model,
models,
lower_builtin,
)
from numba.core.imputils import iternext_impl
from numba.core import types, cgutils
from numba.core.types import (
ListType,
ListTypeIterableType,
ListTypeIteratorType,
Type,
NoneType,
)
from numba.core.imputils import impl_ret_borrowed, RefType
from numba.core.errors import TypingError
from numba.core import typing
from numba.typed.typedobjectutils import (_as_bytes, _cast, _nonoptional,
_get_incref_decref,
_container_get_data,
_container_get_meminfo,)
from numba.cpython import listobj
ll_list_type = cgutils.voidptr_t
ll_listiter_type = cgutils.voidptr_t
ll_voidptr_type = cgutils.voidptr_t
ll_status = cgutils.int32_t
ll_ssize_t = cgutils.intp_t
ll_bytes = cgutils.voidptr_t
_meminfo_listptr = types.MemInfoPointer(types.voidptr)
INDEXTY = types.intp
index_types = types.integer_domain
DEFAULT_ALLOCATED = 0
@register_model(ListType)
class ListModel(models.StructModel):
def __init__(self, dmm, fe_type):
members = [
('meminfo', _meminfo_listptr),
('data', types.voidptr), # ptr to the C list
]
super(ListModel, self).__init__(dmm, fe_type, members)
@register_model(ListTypeIterableType)
@register_model(ListTypeIteratorType)
class ListIterModel(models.StructModel):
def __init__(self, dmm, fe_type):
members = [
('size', types.intp), # the size of the iteration space
('parent', fe_type.parent), # the parent list
('index', types.EphemeralPointer(types.intp)), # current index
]
super(ListIterModel, self).__init__(dmm, fe_type, members)
class ListStatus(IntEnum):
"""Status code for other list operations.
"""
LIST_OK = 0,
LIST_ERR_INDEX = -1
LIST_ERR_NO_MEMORY = -2
LIST_ERR_MUTATED = -3
LIST_ERR_ITER_EXHAUSTED = -4
LIST_ERR_IMMUTABLE = -5
class ErrorHandler(object):
"""ErrorHandler for calling codegen functions from this file.
Stores the state needed to raise an exception from nopython mode.
"""
def __init__(self, context):
self.context = context
def __call__(self, builder, status, msg):
ok_status = status.type(int(ListStatus.LIST_OK))
with builder.if_then(builder.icmp_signed('!=', status, ok_status),
likely=True):
self.context.call_conv.return_user_exc(
builder, RuntimeError, (msg,))
def _check_for_none_typed(lst, method):
if isinstance(lst.dtype, NoneType):
raise TypingError("method support for List[None] is limited, "
"not supported: '{}'.".format(method))
@intrinsic
def _as_meminfo(typingctx, lstobj):
"""Returns the MemInfoPointer of a list.
"""
if not isinstance(lstobj, types.ListType):
raise TypingError('expected *lstobj* to be a ListType')
def codegen(context, builder, sig, args):
[tl] = sig.args
[l] = args
# Incref
context.nrt.incref(builder, tl, l)
ctor = cgutils.create_struct_proxy(tl)
lstruct = ctor(context, builder, value=l)
# Returns the plain MemInfo
return lstruct.meminfo
sig = _meminfo_listptr(lstobj)
return sig, codegen
@intrinsic
def _from_meminfo(typingctx, mi, listtyperef):
"""Recreate a list from a MemInfoPointer
"""
if mi != _meminfo_listptr:
raise TypingError('expected a MemInfoPointer for list.')
listtype = listtyperef.instance_type
if not isinstance(listtype, ListType):
raise TypingError('expected a {}'.format(ListType))
def codegen(context, builder, sig, args):
[tmi, tdref] = sig.args
td = tdref.instance_type
[mi, _] = args
ctor = cgutils.create_struct_proxy(td)
dstruct = ctor(context, builder)
data_pointer = context.nrt.meminfo_data(builder, mi)
data_pointer = builder.bitcast(data_pointer, ll_list_type.as_pointer())
dstruct.data = builder.load(data_pointer)
dstruct.meminfo = mi
return impl_ret_borrowed(
context,
builder,
listtype,
dstruct._getvalue(),
)
sig = listtype(mi, listtyperef)
return sig, codegen
def _list_codegen_set_method_table(context, builder, lp, itemty):
vtablety = ir.LiteralStructType([
ll_voidptr_type, # item incref
ll_voidptr_type, # item decref
])
setmethod_fnty = ir.FunctionType(
ir.VoidType(),
[ll_list_type, vtablety.as_pointer()]
)
setmethod_fn = cgutils.get_or_insert_function(
builder.module,
setmethod_fnty,
'numba_list_set_method_table')
vtable = cgutils.alloca_once(builder, vtablety, zfill=True)
# install item incref/decref
item_incref_ptr = cgutils.gep_inbounds(builder, vtable, 0, 0)
item_decref_ptr = cgutils.gep_inbounds(builder, vtable, 0, 1)
dm_item = context.data_model_manager[itemty]
if dm_item.contains_nrt_meminfo():
item_incref, item_decref = _get_incref_decref(
context, builder.module, dm_item, "list"
)
builder.store(
builder.bitcast(item_incref, item_incref_ptr.type.pointee),
item_incref_ptr,
)
builder.store(
builder.bitcast(item_decref, item_decref_ptr.type.pointee),
item_decref_ptr,
)
builder.call(setmethod_fn, [lp, vtable])
@intrinsic
def _list_set_method_table(typingctx, lp, itemty):
"""Wrap numba_list_set_method_table
"""
resty = types.void
sig = resty(lp, itemty)
def codegen(context, builder, sig, args):
_list_codegen_set_method_table(
context, builder, args[0], itemty.instance_type)
return sig, codegen
@lower_builtin(operator.is_, types.ListType, types.ListType)
def list_is(context, builder, sig, args):
a_meminfo = _container_get_meminfo(context, builder, sig.args[0], args[0])
b_meminfo = _container_get_meminfo(context, builder, sig.args[1], args[1])
ma = builder.ptrtoint(a_meminfo, cgutils.intp_t)
mb = builder.ptrtoint(b_meminfo, cgutils.intp_t)
return builder.icmp_signed('==', ma, mb)
def _call_list_free(context, builder, ptr):
"""Call numba_list_free(ptr)
"""
fnty = ir.FunctionType(
ir.VoidType(),
[ll_list_type],
)
free = cgutils.get_or_insert_function(builder.module, fnty,
'numba_list_free')
builder.call(free, [ptr])
# FIXME: this needs a careful review
def _imp_dtor(context, module):
"""Define the dtor for list
"""
llvoidptr = context.get_value_type(types.voidptr)
llsize = context.get_value_type(types.uintp)
fnty = ir.FunctionType(
ir.VoidType(),
[llvoidptr, llsize, llvoidptr],
)
fname = '_numba_list_dtor'
fn = cgutils.get_or_insert_function(module, fnty, fname)
if fn.is_declaration:
# Set linkage
fn.linkage = 'linkonce_odr'
# Define
builder = ir.IRBuilder(fn.append_basic_block())
lp = builder.bitcast(fn.args[0], ll_list_type.as_pointer())
l = builder.load(lp)
_call_list_free(context, builder, l)
builder.ret_void()
return fn
def new_list(item, allocated=DEFAULT_ALLOCATED):
"""Construct a new list. (Not implemented in the interpreter yet)
Parameters
----------
item: TypeRef
Item type of the new list.
allocated: int
number of items to pre-allocate
"""
# With JIT disabled, ignore all arguments and return a Python list.
return list()
def _add_meminfo(context, builder, lstruct):
alloc_size = context.get_abi_sizeof(
context.get_value_type(types.voidptr),
)
dtor = _imp_dtor(context, builder.module)
meminfo = context.nrt.meminfo_alloc_dtor(
builder,
context.get_constant(types.uintp, alloc_size),
dtor,
)
data_pointer = context.nrt.meminfo_data(builder, meminfo)
data_pointer = builder.bitcast(data_pointer, ll_list_type.as_pointer())
builder.store(lstruct.data, data_pointer)
lstruct.meminfo = meminfo
@intrinsic
def _make_list(typingctx, itemty, ptr):
"""Make a list struct with the given *ptr*
Parameters
----------
itemty: Type
Type of the item.
ptr : llvm pointer value
Points to the list object.
"""
list_ty = types.ListType(itemty.instance_type)
def codegen(context, builder, signature, args):
ptr = args[1]
ctor = cgutils.create_struct_proxy(list_ty)
lstruct = ctor(context, builder)
lstruct.data = ptr
_add_meminfo(context, builder, lstruct)
return lstruct._getvalue()
sig = list_ty(itemty, ptr)
return sig, codegen
def _list_new_codegen(context, builder, itemty, new_size, error_handler):
fnty = ir.FunctionType(
ll_status,
[ll_list_type.as_pointer(), ll_ssize_t, ll_ssize_t],
)
fn = cgutils.get_or_insert_function(builder.module, fnty, 'numba_list_new')
# Determine sizeof item types
ll_item = context.get_data_type(itemty)
sz_item = context.get_abi_sizeof(ll_item)
reflp = cgutils.alloca_once(builder, ll_list_type, zfill=True)
status = builder.call(
fn,
[reflp, ll_ssize_t(sz_item), new_size],
)
msg = "Failed to allocate list"
error_handler(
builder,
status,
msg,
)
lp = builder.load(reflp)
return lp
@intrinsic
def _list_new(typingctx, itemty, allocated):
"""Wrap numba_list_new.
Allocate a new list object with zero capacity.
Parameters
----------
itemty: Type
Type of the items
allocated: int
number of items to pre-allocate
"""
resty = types.voidptr
sig = resty(itemty, allocated)
def codegen(context, builder, sig, args):
error_handler = ErrorHandler(context)
return _list_new_codegen(context,
builder,
itemty.instance_type,
args[1],
error_handler,
)
return sig, codegen
@overload(new_list)
def impl_new_list(item, allocated=DEFAULT_ALLOCATED):
"""Creates a new list.
Parameters
----------
item: Numba type
type of the list item.
allocated: int
number of items to pre-allocate
"""
if not isinstance(item, Type):
raise TypeError("expecting *item* to be a numba Type")
itemty = item
def imp(item, allocated=DEFAULT_ALLOCATED):
if allocated < 0:
raise RuntimeError("expecting *allocated* to be >= 0")
lp = _list_new(itemty, allocated)
_list_set_method_table(lp, itemty)
l = _make_list(itemty, lp)
return l
return imp
@overload(len)
def impl_len(l):
"""len(list)
"""
if isinstance(l, types.ListType):
def impl(l):
return _list_length(l)
return impl
@intrinsic
def _list_length(typingctx, l):
"""Wrap numba_list_length
Returns the length of the list.
"""
sig = types.intp(l)
def codegen(context, builder, sig, args):
[tl] = sig.args
[l] = args
fnty = ir.FunctionType(
ll_ssize_t,
[ll_list_type],
)
fname = 'numba_list_size_address'
fn = cgutils.get_or_insert_function(builder.module, fnty, fname)
fn.attributes.add('alwaysinline')
fn.attributes.add('readonly')
fn.attributes.add('nounwind')
lp = _container_get_data(context, builder, tl, l)
len_addr = builder.call(fn, [lp,],)
ptr = builder.inttoptr(len_addr, cgutils.intp_t.as_pointer())
return builder.load(ptr)
return sig, codegen
@overload_method(types.ListType, "_allocated")
def impl_allocated(l):
"""list._allocated()
"""
if isinstance(l, types.ListType):
def impl(l):
return _list_allocated(l)
return impl
@intrinsic
def _list_allocated(typingctx, l):
"""Wrap numba_list_allocated
Returns the allocation of the list.
"""
resty = types.intp
sig = resty(l)
def codegen(context, builder, sig, args):
fnty = ir.FunctionType(
ll_ssize_t,
[ll_list_type],
)
fn = cgutils.get_or_insert_function(builder.module, fnty,
'numba_list_allocated')
[l] = args
[tl] = sig.args
lp = _container_get_data(context, builder, tl, l)
n = builder.call(fn, [lp])
return n
return sig, codegen
@overload_method(types.ListType, "_is_mutable")
def impl_is_mutable(l):
"""list._is_mutable()"""
if isinstance(l, types.ListType):
def impl(l):
return bool(_list_is_mutable(l))
return impl
@intrinsic
def _list_is_mutable(typingctx, l):
"""Wrap numba_list_is_mutable
Returns the state of the is_mutable member
"""
resty = types.int32
sig = resty(l)
def codegen(context, builder, sig, args):
fnty = ir.FunctionType(
ll_status,
[ll_list_type],
)
fn = cgutils.get_or_insert_function(builder.module, fnty,
'numba_list_is_mutable')
[l] = args
[tl] = sig.args
lp = _container_get_data(context, builder, tl, l)
n = builder.call(fn, [lp])
return n
return sig, codegen
@overload_method(types.ListType, "_make_mutable")
def impl_make_mutable(l):
"""list._make_mutable()"""
if isinstance(l, types.ListType):
def impl(l):
_list_set_is_mutable(l, 1)
return impl
@overload_method(types.ListType, "_make_immutable")
def impl_make_immutable(l):
"""list._make_immutable()"""
if isinstance(l, types.ListType):
def impl(l):
_list_set_is_mutable(l, 0)
return impl
@intrinsic
def _list_set_is_mutable(typingctx, l, is_mutable):
"""Wrap numba_list_set_mutable
Sets the state of the is_mutable member.
"""
resty = types.void
sig = resty(l, is_mutable)
def codegen(context, builder, sig, args):
fnty = ir.FunctionType(
ir.VoidType(),
[ll_list_type, cgutils.intp_t],
)
fn = cgutils.get_or_insert_function(builder.module, fnty,
'numba_list_set_is_mutable')
[l, i] = args
[tl, ti] = sig.args
lp = _container_get_data(context, builder, tl, l)
builder.call(fn, [lp, i])
return sig, codegen
@intrinsic
def _list_append(typingctx, l, item):
"""Wrap numba_list_append
"""
resty = types.int32
sig = resty(l, l.item_type)
def codegen(context, builder, sig, args):
fnty = ir.FunctionType(
ll_status,
[ll_list_type, ll_bytes],
)
[l, item] = args
[tl, titem] = sig.args
fn = cgutils.get_or_insert_function(builder.module, fnty,
'numba_list_append')
dm_item = context.data_model_manager[titem]
data_item = dm_item.as_data(builder, item)
ptr_item = cgutils.alloca_once_value(builder, data_item)
lp = _container_get_data(context, builder, tl, l)
status = builder.call(
fn,
[
lp,
_as_bytes(builder, ptr_item),
],
)
return status
return sig, codegen
@overload_method(types.ListType, 'append')
def impl_append(l, item):
if not isinstance(l, types.ListType):
return
itemty = l.item_type
def impl(l, item):
casteditem = _cast(item, itemty)
status = _list_append(l, casteditem)
if status == ListStatus.LIST_OK:
return
elif status == ListStatus.LIST_ERR_IMMUTABLE:
raise ValueError('list is immutable')
elif status == ListStatus.LIST_ERR_NO_MEMORY:
raise MemoryError('Unable to allocate memory to append item')
else:
raise RuntimeError('list.append failed unexpectedly')
if l.is_precise():
# Handle the precise case.
return impl
else:
# Handle the imprecise case.
l = l.refine(item)
# Re-bind the item type to match the arguments.
itemty = l.item_type
# Create the signature that we wanted this impl to have.
sig = typing.signature(types.void, l, itemty)
return sig, impl
@intrinsic
def fix_index(tyctx, list_ty, index_ty):
sig = types.intp(list_ty, index_ty)
def codegen(context, builder, sig, args):
[list_ty, index_ty] = sig.args
[ll_list, ll_idx] = args
is_negative = builder.icmp_signed('<', ll_idx,
ir.Constant(ll_idx.type, 0))
fast_len_sig, length_fn = _list_length._defn(context.typing_context,
list_ty)
length = length_fn(context, builder, fast_len_sig, (ll_list,))
# length is an intp
# index can be any sort of int
# indexing in general is done with a ssize_t which correlates to an
# intp. In llvmlite sext and trunc are guarded to return the value
# itself if the types are the same, so there's no need to handle the
# "equal widths" case separately. This sexts/truncs the index to the
# length type such that `add` works for the wraparound case.
st = 'sext' if ll_idx.type.width < length.type.width else 'trunc'
op = getattr(builder, st)
fixedup_idx = op(ll_idx, length.type)
wrapped_index = builder.add(fixedup_idx, length)
return builder.select(is_negative, wrapped_index, fixedup_idx)
return sig, codegen
@register_jitable
def handle_index(l, index):
"""Handle index.
If the index is negative, convert it. If the index is out of range, raise
an IndexError.
"""
# convert negative indices to positive ones
index = fix_index(l, index)
# check that the index is in range
if index < 0 or index >= len(l):
raise IndexError("list index out of range")
return index
@register_jitable
def handle_slice(l, s):
"""Handle slice.
Convert a slice object for a given list into a range object that can be
used to index the list. Many subtle caveats here, especially if the step is
negative.
"""
if len(l) == 0: # ignore slice for empty list
return range(0)
ll, sa, so, se = len(l), s.start, s.stop, s.step
if se > 0:
start = max(ll + sa, 0) if s.start < 0 else min(ll, sa)
stop = max(ll + so, 0) if so < 0 else min(ll, so)
elif se < 0:
start = max(ll + sa, -1) if s.start < 0 else min(ll - 1, sa)
stop = max(ll + so, -1) if so < 0 else min(ll, so)
else:
# should be caught earlier, but isn't, so we raise here
raise ValueError("slice step cannot be zero")
return range(start, stop, s.step)
def _gen_getitem(borrowed):
@intrinsic
def impl(typingctx, l_ty, index_ty):
is_none = isinstance(l_ty.item_type, types.NoneType)
if is_none:
resty = types.Tuple([types.int32, l_ty.item_type])
else:
resty = types.Tuple([types.int32, types.Optional(l_ty.item_type)])
sig = resty(l_ty, index_ty)
def codegen(context, builder, sig, args):
[tl, tindex] = sig.args
[l, index] = args
fnty = ir.FunctionType(
ll_voidptr_type,
[ll_list_type],
)
fname = 'numba_list_base_ptr'
fn = cgutils.get_or_insert_function(builder.module, fnty, fname)
fn.attributes.add('alwaysinline')
fn.attributes.add('nounwind')
fn.attributes.add('readonly')
lp = _container_get_data(context, builder, tl, l)
base_ptr = builder.call(
fn,
[lp,],
)
llty = context.get_data_type(tl.item_type)
casted_base_ptr = builder.bitcast(base_ptr, llty.as_pointer())
item_ptr = cgutils.gep(builder, casted_base_ptr, index)
if is_none:
out = builder.load(item_ptr)
else:
out = context.make_optional_none(builder, tl.item_type)
pout = cgutils.alloca_once_value(builder, out)
dm_item = context.data_model_manager[tl.item_type]
item = dm_item.load_from_data_pointer(builder, item_ptr)
if not borrowed:
context.nrt.incref(builder, tl.item_type, item)
if is_none:
loaded = item
else:
loaded = context.make_optional_value(builder, tl.item_type,
item)
builder.store(loaded, pout)
out = builder.load(pout)
return context.make_tuple(builder, resty, [ll_status(0), out])
return sig, codegen
return impl
_list_getitem = _gen_getitem(False)
_list_getitem_borrowed = _gen_getitem(True)
@overload(operator.getitem)
def impl_getitem(l, index):
if not isinstance(l, types.ListType):
return
indexty = INDEXTY
itemty = l.item_type
IS_NOT_NONE = not isinstance(l.item_type, types.NoneType)
if index in index_types:
if IS_NOT_NONE:
def integer_non_none_impl(l, index):
index = handle_index(l, index)
castedindex = _cast(index, indexty)
status, item = _list_getitem(l, castedindex)
if status == ListStatus.LIST_OK:
return _nonoptional(item)
else:
raise AssertionError("internal list error during getitem")
return integer_non_none_impl
else:
def integer_none_impl(l, index):
index = handle_index(l, index)
return None
return integer_none_impl
elif isinstance(index, types.SliceType):
def slice_impl(l, index):
newl = new_list(itemty)
for i in handle_slice(l, index):
newl.append(l[i])
return newl
return slice_impl
else:
raise TypingError("list indices must be integers or slices")
@intrinsic
def _list_setitem(typingctx, l, index, item):
"""Wrap numba_list_setitem
"""
resty = types.int32
sig = resty(l, index, item)
def codegen(context, builder, sig, args):
fnty = ir.FunctionType(
ll_status,
[ll_list_type, ll_ssize_t, ll_bytes],
)
[l, index, item] = args
[tl, tindex, titem] = sig.args
fn = cgutils.get_or_insert_function(builder.module, fnty,
'numba_list_setitem')
dm_item = context.data_model_manager[titem]
data_item = dm_item.as_data(builder, item)
ptr_item = cgutils.alloca_once_value(builder, data_item)
lp = _container_get_data(context, builder, tl, l)
status = builder.call(
fn,
[
lp,
index,
_as_bytes(builder, ptr_item),
],
)
return status
return sig, codegen
@overload(operator.setitem)
def impl_setitem(l, index, item):
if not isinstance(l, types.ListType):
return
indexty = INDEXTY
itemty = l.item_type
if index in index_types:
def impl_integer(l, index, item):
index = handle_index(l, index)
castedindex = _cast(index, indexty)
casteditem = _cast(item, itemty)
status = _list_setitem(l, castedindex, casteditem)
if status == ListStatus.LIST_OK:
return
elif status == ListStatus.LIST_ERR_IMMUTABLE:
raise ValueError("list is immutable")
else:
raise AssertionError("internal list error during settitem")
return impl_integer
elif isinstance(index, types.SliceType):
if not isinstance(item, types.IterableType):
raise TypingError("can only assign an iterable when using a slice "
"with assignment/setitem")
def impl_slice(l, index, item):
if not l._is_mutable():
raise ValueError("list is immutable")
# special case "a[i:j] = a", need to copy first
if l is item:
item = item.copy()
slice_range = handle_slice(l, index)
# non-extended (simple) slices
if slice_range.step == 1:
# replace
if len(item) == len(slice_range):
for i, j in zip(slice_range, item):
l[i] = j
# replace and insert
if len(item) > len(slice_range):
# do the replaces we can
for i, j in zip(slice_range, item[:len(slice_range)]):
l[i] = j
# insert the remaining ones
insert_range = range(slice_range.stop,
slice_range.stop +
len(item) - len(slice_range))
for i, k in zip(insert_range, item[len(slice_range):]):
# FIXME: This may be slow. Each insert can incur a
# memory copy of one or more items.
l.insert(i, k)
# replace and delete
if len(item) < len(slice_range):
# do the replaces we can
replace_range = range(slice_range.start,
slice_range.start + len(item))
for i,j in zip(replace_range, item):
l[i] = j
# delete remaining ones
del l[slice_range.start + len(item):slice_range.stop]
# Extended slices
else:
if len(slice_range) != len(item):
raise ValueError("length mismatch for extended slice "
"and sequence")
# extended slice can only replace
for i, j in zip(slice_range, item):
l[i] = j
return impl_slice
else:
raise TypingError("list indices must be integers or slices")
@overload_method(types.ListType, 'pop')
def impl_pop(l, index=-1):
if not isinstance(l, types.ListType):
return
_check_for_none_typed(l, 'pop')
indexty = INDEXTY
# FIXME: this type check works, but it isn't clear why and if it optimal
if (isinstance(index, int)
or index in index_types
or isinstance(index, types.Omitted)):
def impl(l, index=-1):
if len(l) == 0:
raise IndexError("pop from empty list")
cindex = _cast(handle_index(l, index), indexty)
item = l[cindex]
del l[cindex]
return item
return impl
else:
raise TypingError("argument for pop must be an integer")
@intrinsic
def _list_delitem(typingctx, l, index):
resty = types.int32
sig = resty(l, index)
def codegen(context, builder, sig, args):
fnty = ir.FunctionType(
ll_status,
[ll_list_type, ll_ssize_t],
)
[tl, tindex] = sig.args
[l, index] = args
fn = cgutils.get_or_insert_function(builder.module, fnty,
'numba_list_delitem')
lp = _container_get_data(context, builder, tl, l)
status = builder.call(fn, [lp, index])
return status
return sig, codegen
@intrinsic
def _list_delete_slice(typingctx, l, start, stop, step):
"""Wrap numba_list_delete_slice
"""
resty = types.int32
sig = resty(l, start, stop, step)
def codegen(context, builder, sig, args):
fnty = ir.FunctionType(
ll_status,
[ll_list_type, ll_ssize_t, ll_ssize_t, ll_ssize_t],
)
[l, start, stop, step] = args
[tl, tstart, tstop, tstep] = sig.args
fn = cgutils.get_or_insert_function(builder.module, fnty,
'numba_list_delete_slice')
lp = _container_get_data(context, builder, tl, l)
status = builder.call(
fn,
[
lp,
start,
stop,
step,
],
)
return status
return sig, codegen
@overload(operator.delitem)
def impl_delitem(l, index):
if not isinstance(l, types.ListType):
return
_check_for_none_typed(l, 'delitem')
if index in index_types:
def integer_impl(l, index):
cindex = _cast(handle_index(l, index), INDEXTY)
status = _list_delitem(l, cindex)
if status == ListStatus.LIST_OK:
return
elif status == ListStatus.LIST_ERR_IMMUTABLE:
raise ValueError("list is immutable")
else:
raise AssertionError("internal list error during delitem")
return integer_impl
elif isinstance(index, types.SliceType):
def slice_impl(l, index):
slice_range = handle_slice(l, index)
status = _list_delete_slice(
l,
slice_range.start,
slice_range.stop,
slice_range.step)
if status == ListStatus.LIST_ERR_MUTATED:
raise ValueError("list is immutable")
return slice_impl
else:
raise TypingError("list indices must be integers or slices")
@overload(operator.contains)
def impl_contains(l, item):
if not isinstance(l, types.ListType):
return
itemty = l.item_type
_check_for_none_typed(l, "__contains__")
def impl(l, item):
casteditem = _cast(item, itemty)
for i in l:
if i == casteditem:
return True
else:
return False
return impl
@overload_method(types.ListType, 'count')
def impl_count(l, item):
if not isinstance(l, types.ListType):
return
_check_for_none_typed(l, 'count')
itemty = l.item_type
def impl(l, item):
casteditem = _cast(item, itemty)
total = 0
for i in l:
if i == casteditem:
total += 1
return total
return impl
@overload_method(types.ListType, 'extend')
def impl_extend(l, iterable):
if not isinstance(l, types.ListType):
return
if not isinstance(iterable, types.IterableType):
raise TypingError("extend argument must be iterable")
_check_for_none_typed(l, 'extend')
def select_impl():
if isinstance(iterable, types.ListType):
def impl(l, iterable):
if not l._is_mutable():
raise ValueError("list is immutable")
# guard against l.extend(l)
if l is iterable:
iterable = iterable.copy()
for i in iterable:
l.append(i)
return impl
else:
def impl(l, iterable):
for i in iterable:
l.append(i)
return impl
if l.is_precise():
# Handle the precise case.
return select_impl()
else:
# Handle the imprecise case, try to 'guess' the underlying type of the
# values in the iterable.
if hasattr(iterable, "dtype"): # tuples and arrays
ty = iterable.dtype
elif hasattr(iterable, "item_type"): # lists
ty = iterable.item_type
elif hasattr(iterable, "yield_type"): # iterators and generators
ty = iterable.yield_type
elif isinstance(iterable, types.UnicodeType):
ty = iterable
else:
raise TypingError("unable to extend list, iterable is missing "
"either *dtype*, *item_type* or *yield_type*.")
l = l.refine(ty)
# Create the signature that we wanted this impl to have
sig = typing.signature(types.void, l, iterable)
return sig, select_impl()
@overload_method(types.ListType, 'insert')
def impl_insert(l, index, item):
if not isinstance(l, types.ListType):
return
_check_for_none_typed(l, 'insert')
# insert can refine
if isinstance(item, NoneType):
raise TypingError("method support for List[None] is limited")
if index in index_types:
def impl(l, index, item):
# If the index is larger than the size of the list or if the list is
# empty, just append.
if index >= len(l) or len(l) == 0:
l.append(item)
# Else, do the insert dance
else:
# convert negative indices
if index < 0:
# if the index is still negative after conversion, use 0
index = max(len(l) + index, 0)
# grow the list by one, make room for item to insert
l.append(l[0])
# reverse iterate over the list and shift all elements
i = len(l) - 1
while(i > index):
l[i] = l[i - 1]
i -= 1
# finally, insert the item
l[index] = item
if l.is_precise():
# Handle the precise case.
return impl
else:
# Handle the imprecise case
l = l.refine(item)
# Re-bind the item type to match the arguments.
itemty = l.item_type
# Create the signature that we wanted this impl to have.
sig = typing.signature(types.void, l, INDEXTY, itemty)
return sig, impl
else:
raise TypingError("list insert indices must be integers")
@overload_method(types.ListType, 'remove')
def impl_remove(l, item):
if not isinstance(l, types.ListType):
return
_check_for_none_typed(l, 'remove')
itemty = l.item_type
def impl(l, item):
casteditem = _cast(item, itemty)
for i, n in enumerate(l):
if casteditem == n:
del l[i]
return
else:
raise ValueError("list.remove(x): x not in list")
return impl
@overload_method(types.ListType, 'clear')
def impl_clear(l):
if not isinstance(l, types.ListType):
return
def impl(l):
while len(l):
del l[-1]
return impl
@overload_method(types.ListType, 'reverse')
def impl_reverse(l):
if not isinstance(l, types.ListType):
return
_check_for_none_typed(l, 'reverse')
def impl(l):
if not l._is_mutable():
raise ValueError("list is immutable")
front = 0
back = len(l) - 1
while front < back:
l[front], l[back] = l[back], l[front]
front += 1
back -= 1
return impl
@overload_method(types.ListType, 'copy')
def impl_copy(l):
_check_for_none_typed(l, 'copy')
itemty = l.item_type
if isinstance(l, types.ListType):
def impl(l):
newl = new_list(itemty, len(l))
for i in l:
newl.append(i)
return newl
return impl
@overload_method(types.ListType, 'index')
def impl_index(l, item, start=None, end=None):
if not isinstance(l, types.ListType):
return
_check_for_none_typed(l, 'index')
itemty = l.item_type
def check_arg(arg, name):
if not (arg is None
or arg in index_types
or isinstance(arg, (types.Omitted, types.NoneType))):
raise TypingError("{} argument for index must be an integer"
.format(name))
check_arg(start, "start")
check_arg(end, "end")
def impl(l, item, start=None, end=None):
casteditem = _cast(item, itemty)
for i in handle_slice(l, slice(start, end, 1)):
if l[i] == casteditem:
return i
else:
raise ValueError("item not in list")
return impl
@overload_method(types.ListType, "sort")
def ol_list_sort(lst, key=None, reverse=False):
# The following is mostly borrowed from listobj.ol_list_sort
from numba.typed import List
listobj._sort_check_key(key)
listobj._sort_check_reverse(reverse)
if cgutils.is_nonelike(key):
KEY = False
sort_f = listobj.sort_forwards
sort_b = listobj.sort_backwards
elif isinstance(key, types.Dispatcher):
KEY = True
sort_f = listobj.arg_sort_forwards
sort_b = listobj.arg_sort_backwards
def impl(lst, key=None, reverse=False):
if not lst._is_mutable():
raise ValueError("list is immutable")
if KEY is True:
# There's an unknown refct problem in reflected list.
# Using an explicit loop with typedlist somehow "fixed" it.
_lst = List()
for x in lst:
_lst.append(key(x))
else:
_lst = lst
if reverse is False or reverse == 0:
tmp = sort_f(_lst)
else:
tmp = sort_b(_lst)
if KEY is True:
# There's an unknown refct problem in reflected list.
# Using an explicit loop with typedlist somehow "fixed" it.
ordered = List()
for i in tmp:
ordered.append(lst[i])
lst[:] = ordered
return impl
@overload_method(types.ListType, "getitem_unchecked")
def ol_getitem_unchecked(lst, index):
if not isinstance(index, types.Integer):
return
def impl(lst, index):
index = fix_index(lst, index)
castedindex = _cast(index, types.intp)
_, item = _list_getitem(lst, castedindex)
return _nonoptional(item)
return impl
@overload_attribute(types.ListType, '_dtype')
def impl_dtype(l):
if not isinstance(l, types.ListType):
return
dt = l.dtype
def impl(l):
return dt
return impl
def _equals_helper(this, other, OP):
if not isinstance(this, types.ListType):
return
if not isinstance(other, types.ListType):
return lambda this, other: False
this_is_none = isinstance(this.dtype, types.NoneType)
other_is_none = isinstance(other.dtype, types.NoneType)
if this_is_none or other_is_none:
def impl_some_none(this, other):
def equals(this, other):
# Equal if both none-typed and have equal length
return bool(this_is_none == other_is_none
and len(this) == len(other))
return OP(equals(this, other))
return impl_some_none
else:
def impl_not_none(this, other):
def equals(this, other):
if len(this) != len(other):
return False
for i in range(len(this)):
if this[i] != other[i]:
return False
else:
return True
return OP(equals(this, other))
return impl_not_none
@overload(operator.eq)
def impl_equals(this, other):
return _equals_helper(this, other, operator.truth)
@overload(operator.ne)
def impl_not_equals(this, other):
return _equals_helper(this, other, operator.not_)
@register_jitable
def compare_not_none(this, other):
"""Oldschool (python 2.x) cmp.
if this < other return -1
if this = other return 0
if this > other return 1
"""
if len(this) != len(other):
return -1 if len(this) < len(other) else 1
for i in range(len(this)):
this_item, other_item = this[i], other[i]
if this_item != other_item:
return -1 if this_item < other_item else 1
else:
return 0
@register_jitable
def compare_some_none(this, other, this_is_none, other_is_none):
"""Oldschool (python 2.x) cmp for None typed lists.
if this < other return -1
if this = other return 0
if this > other return 1
"""
if len(this) != len(other):
return -1 if len(this) < len(other) else 1
if this_is_none and other_is_none: # both none
return 0
# to get here there is precisely one none, and if the first is none, by
# induction, the second cannot be
return -1 if this_is_none else 1
def compare_helper(this, other, accepted):
if not isinstance(this, types.ListType):
return
if not isinstance(other, types.ListType):
return lambda this, other: False
this_is_none = isinstance(this.dtype, types.NoneType)
other_is_none = isinstance(other.dtype, types.NoneType)
if this_is_none or other_is_none:
def impl(this, other):
return compare_some_none(
this, other, this_is_none, other_is_none) in accepted
else:
def impl(this, other):
return compare_not_none(this, other) in accepted
return impl
@overload(operator.lt)
def impl_less_than(this, other):
return compare_helper(this, other, (-1, ))
@overload(operator.le)
def impl_less_than_or_equal(this, other):
return compare_helper(this, other, (-1, 0))
@overload(operator.gt)
def impl_greater_than(this, other):
return compare_helper(this, other, (1,))
@overload(operator.ge)
def impl_greater_than_or_equal(this, other):
return compare_helper(this, other, (0, 1))
class ListIterInstance(object):
def __init__(self, context, builder, iter_type, iter_val):
self._context = context
self._builder = builder
self._iter_ty = iter_type
self._list_ty = self._iter_ty.parent
self._iter = context.make_helper(builder, iter_type, iter_val)
@classmethod
def from_list(cls, context, builder, iter_type, list_val):
self = cls(context, builder, iter_type, None)
index = context.get_constant(types.intp, 0)
self._iter.index = cgutils.alloca_once_value(builder, index)
self._iter.parent = list_val
self._iter.size = cls._size_of_list(context, builder, self._list_ty,
self._iter.parent)
return self
@classmethod
def _size_of_list(cls, context, builder, list_ty, ll_list):
tyctx = context.typing_context
fnty = tyctx.resolve_value_type(len)
sig = fnty.get_call_type(tyctx, (list_ty,), {})
impl = context.get_function(fnty, sig)
return impl(builder, (ll_list,))
@property
def size(self):
tyctx = self._context.typing_context
fnty = tyctx.resolve_value_type(len)
ty = self._list_ty
sig = fnty.get_call_type(tyctx, (ty,), {})
impl = self._context.get_function(fnty, sig)
return impl(self._builder, (self._iter.parent,))
@property
def value(self):
return self._iter._getvalue()
def getitem(self, index):
tyctx = self._context.typing_context
ty = self._list_ty
sig, fn = _list_getitem_borrowed._defn(tyctx, ty, types.intp)
statnitem = fn(self._context, self._builder, sig, (self._iter.parent,
index))
_, item = cgutils.unpack_tuple(self._builder, statnitem)
retty = sig.return_type[1]
if isinstance(self._list_ty.dtype, types.NoneType):
raw_ty = self._list_ty.dtype
else:
raw_ty = retty.type
raw_item = self._context.cast(self._builder, item, retty, raw_ty)
return raw_item
@property
def index(self):
return self._builder.load(self._iter.index)
@index.setter
def index(self, value):
self._builder.store(value, self._iter.index)
@lower_builtin('getiter', types.ListType)
def getiter_list(context, builder, sig, args):
inst = ListIterInstance.from_list(context, builder, sig.return_type,
args[0])
return impl_ret_borrowed(context, builder, sig.return_type, inst.value)
@lower_builtin('iternext', types.ListTypeIteratorType)
@iternext_impl(RefType.BORROWED)
def iternext_listiter(context, builder, sig, args, result):
inst = ListIterInstance(context, builder, sig.args[0], args[0])
index = inst.index
nitems = inst.size # this is current size
init_size = inst._iter.size # this is initial size
# if the current count is different to the initial count, bail, list is
# being mutated whilst iterated.
is_mutated = builder.icmp_signed('!=', init_size, nitems)
with builder.if_then(is_mutated, likely=False):
context.call_conv.return_user_exc(
builder, RuntimeError, ("list was mutated during iteration",))
is_valid = builder.icmp_signed('<', index, nitems)
result.set_valid(is_valid)
with builder.if_then(is_valid):
result.yield_(inst.getitem(index))
inst.index = builder.add(index, context.get_constant(types.intp, 1))
|
|
#
# Copyright 2015-2019 University of Southern California
# Distributed under the Apache License, Version 2.0. See LICENSE for more info.
#
"""Filesystem-backed object bulk storage for Hatrac.
This module handles only low-level byte storage. Object and
object-version lifecycle and authorization is handled by the caller.
"""
import os
import hashlib
import base64
import binascii
import random
import struct
import io
from ...core import BadRequest, Conflict, coalesce
def make_file(dirname, relname, accessmode):
"""Create and open file with accessmode, including missing parents.
Returns fp.
"""
# TODO: test for conflicts during creation?
filename = "%s/%s" % (dirname, relname)
if not os.path.exists(dirname):
os.makedirs(dirname, mode=0o755)
return open(filename, accessmode, 0)
class HatracStorage (object):
"""Implement HatracStorage API using basic POSIX filesystem mapping.
A configured storage rootdir, object name, and object version
are combined to form one filename to store the immutable
object:
/ rootdir / object_name : object_version
consistent with Hatrac rules. The incoming name may include
RFC3986 percent-encoded URL characters, which we assume our
filesystem can tolerate.
"""
track_chunks = False
_bufsize = 1024**2
def __init__(self, config):
self.root = config.get('storage_path', '/var/www/hatrac')
def _dirname_relname(self, name, version):
"""Map Hatrac identifiers to backend storage."""
# TODO: consider hashing if too many namespaces exist at top level
assert name
assert version
assert ':' not in version
dirname = self.root
nameparts = [ n for n in name.split('/') if n ]
dirparts = nameparts[0:-1]
relpart = nameparts[-1]
relname = "%s:%s" % (relpart, version)
assert relpart
if dirparts:
dirname = "%s/%s" % (self.root, "/".join(dirparts))
else:
dirname = self.root
return (dirname, relname)
def create_from_file(self, name, input, nbytes, metadata={}):
"""Create an entire file-version object from input content, returning version ID."""
version = base64.b32encode(
(struct.pack('Q', random.getrandbits(64))
+ struct.pack('Q', random.getrandbits(64)))[0:26]
).decode().replace('=', '') # strip off '=' padding
dirname, relname = self._dirname_relname(name, version)
f = make_file(dirname, relname, 'wb')
# upload whole content at offset 0 (for code reuse)
self.upload_chunk_from_file(None, None, 0, 0, input, nbytes, metadata, f)
return version
def create_upload(self, name, nbytes=None, metadata={}):
upload_id = self.create_from_file(name, io.BytesIO(b''), 0)
return upload_id
def cancel_upload(self, name, upload_id):
# this backend uses upload_id as version_id
self.delete(name, upload_id)
return None
def finalize_upload(self, name, upload_id, chunk_data, metadata={}):
# nothing changes in storage for this backend strategy
version_id = upload_id
assert chunk_data is None
# aggressively validate uploaded content against pre-defined MD5 if it was given at job start
if 'content-md5' in metadata:
dirname, relname = self._dirname_relname(name, version_id)
fullname = "%s/%s" % (dirname, relname)
f = open(fullname, "rb")
hasher = hashlib.md5()
eof = False
while not eof:
buf = f.read(self._bufsize)
if len(buf) != 0:
hasher.update(buf)
else:
eof = True
stored_md5 = hasher.digest()
if metadata['content-md5'] != stored_md5:
raise Conflict(
'Current uploaded content MD5 %s does not match expected %s.'
% (binascii.hexlify(stored_md5), binascii.hexlify(metadata['content-md5']))
)
return version_id
def upload_chunk_from_file(self, name, version, position, chunksize, input, nbytes, metadata={}, f=None):
"""Save chunk data into storage.
If self.track_chunks, return value must be None or a value
that can be serialized using webauthn2.util.jsonWriteRaw,
i.e. dict, array, or scalar values.
"""
if f is None:
dirname, relname = self._dirname_relname(name, version)
fullname = "%s/%s" % (dirname, relname)
f = open(fullname, "r+b")
f.seek(position*chunksize)
if 'content-md5' in metadata:
hasher = hashlib.md5()
else:
hasher = None
rbytes = 0
eof = False
while not eof:
if nbytes is not None:
bufsize = min(nbytes-rbytes, self._bufsize)
else:
bufsize = self._bufsize
buf = input.read(bufsize)
f.write(buf)
bufsize = len(buf)
rbytes += bufsize
if hasher:
hasher.update(buf)
if nbytes is not None:
if rbytes >= nbytes:
eof = True
elif bufsize == 0:
f.close()
raise BadRequest('Only received %s of %s expected bytes.' % (rbytes, nbytes))
elif bufsize == 0:
eof = True
if hasher:
received_md5 = hasher.digest()
if metadata['content-md5'] != received_md5:
raise BadRequest(
'Received content MD5 %r does not match expected %r.'
% (received_md5, metadata['content-md5'])
#% (binascii.hexlify(received_md5), binascii.hexlify(metadata['content-md5'].encode()))
)
return "test"
def get_content(self, name, version, metadata={}, aux={}):
return self.get_content_range(name, version, metadata, aux=aux)
def get_content_range(self, name, version, metadata={}, get_slice=None, aux={}):
"""Return (nbytes, metadata, data_iterator) tuple for existing file-version object."""
dirname, relname = self._dirname_relname(name, version)
fullname = "%s/%s" % (dirname, relname)
nbytes = os.path.getsize(fullname)
if get_slice is not None:
pos = coalesce(get_slice.start, 0)
limit = coalesce(get_slice.stop, nbytes)
else:
pos = 0
limit = nbytes
if pos != 0 or limit != nbytes:
# most object metadata does not apply to partial read content
metadata = {
k: v
for k, v in metadata.items()
if k in {'content-type'}
}
length = limit - pos
def helper():
if 'content-md5' in metadata:
hasher = hashlib.md5()
else:
hasher = None
rpos = pos
eof = False
with open(fullname, 'rb') as f:
f.seek(rpos)
while not eof:
buf = f.read(min(limit-rpos, self._bufsize))
buflen = len(buf)
rpos += buflen
if hasher:
hasher.update(buf)
if rpos >= (limit-1):
eof = True
elif buflen == 0:
raise IOError('Read truncated at %s when %s expected.' % (rpos, limit))
if eof and hasher:
retrieved_md5 = hasher.digest()
if metadata['content-md5'] != retrieved_md5:
raise IOError(
'Retrieved content MD5 %s does not match expected %s.'
% (binascii.hexlify(retrieved_md5), binascii.hexlify(metadata['content-md5']))
)
yield buf
return (length, metadata, helper())
def delete(self, name, version, aux={}):
"""Delete object version."""
dirname, relname = self._dirname_relname(name, version)
fullname = "%s/%s" % (dirname, relname)
os.remove(fullname)
def delete_namespace(self, name):
"""Tidy up after an empty namespace that has been deleted."""
dirname, relname = self._dirname_relname(name, 'dummy')
try:
os.removedirs(dirname)
except OSError:
pass
|
|
import numpy as np
import argparse
from tqdm import tqdm
import json
class QualAnalysis(object):
def __init__(self):
self.kb_facts = self.read_kb_facts(kb_file) if use_kb else None
self.text_kb_facts = self.read_text_kb_facts(text_kb_file) if use_text else None
self.questions = self.read_questions(input_test_file)
# print('Reading mid to word map')
# self.mid_to_word_map = self.mid_to_word()
def read_kb_facts(self, input_file):
facts = []
print('Reading kb file at {}'.format(input_file))
with open(input_file) as fb:
for line in tqdm(fb):
line = line.strip()
line = line[1:-1]
e1, r1, r2, e2 = [a.strip('"') for a in [x.strip() for x in line.split(',')]]
r = r1 + '_' + r2
facts.append({'e1': e1, 'r': r, 'e2': e2})
return facts
def read_text_kb_facts(self, input_file):
facts = []
print('Reading text kb file at {}'.format(input_file))
with open(input_file) as fin:
for counter, line in tqdm(enumerate(fin)):
kb_instance = json.loads(line)
facts.append(kb_instance)
return facts
def read_questions(self, input_file):
questions = []
print('Reading file at {}'.format(input_file))
with open(input_file) as f_in:
for counter, line in tqdm(enumerate(f_in)):
question = json.loads(line)
questions.append(question)
return questions
def get_relevant_memory(self, question_index, mem_index, use_kb=True):
"""
get the relevant memory either kb or text. Note one of use_kb and use_text can be true
at a given time, if both are true this needs to be called twice with each value of use_kb (true, false)
and the returned value needs to be handled appropriately.
:param question_index:
:param mem_index:
:param use_kb:
:return:
"""
question = self.questions[question_index]
mem_index += 1 # convert from 0 index
start_index_key = 'start_indices' if use_kb else 'text_kb_start_indices'
length_key = 'lengths' if use_kb else 'text_kb_lengths'
memory = self.kb_facts if use_kb else self.text_kb_facts
start_indices = question[start_index_key]
lengths = question[length_key]
q_start_indices = np.asarray(start_indices)
q_fact_lengths = np.asarray(lengths)
sorted_index = np.argsort(q_fact_lengths)
q_fact_lengths = q_fact_lengths[sorted_index]
q_start_indices = q_start_indices[sorted_index]
cum_num_mem_slots = 0
counter = 0
for fact_len in q_fact_lengths:
if cum_num_mem_slots + fact_len >= mem_index: # the mem lies in the next partition
# calculate the off set
offset = mem_index - cum_num_mem_slots - 1 # -1 because converting to zero index
return memory[q_start_indices[counter] + offset]
else:
cum_num_mem_slots += fact_len
counter += 1
def read_attn_wts_file(self, input_file, input_predicted_answer_file):
f_out = open(output_dir+'/attn_memory.txt','a')
f_out_correct = open(output_dir + '/attn_memory.txt.correct', 'a')
print('Loading the attn wights...')
attn_wts = np.load(input_file)
print('Loading predicted answer file')
num_questions = len(self.questions)
answers = np.fromfile(input_predicted_answer_file)
answers = answers.reshape(num_questions, -1)
assert attn_wts.ndim == 2
num_data, max_mem_slots = attn_wts.shape
# get the index
print('Sorting....')
sorted_index = np.argsort(attn_wts, axis=1)
sorted_wts = np.sort(attn_wts, axis=1)
print('done...')
# get the slice we are interested in
start_index = max_mem_slots - topk
sorted_index = sorted_index[:, start_index:]
sorted_wts = sorted_wts[:, start_index:]
for data_index in range(num_data): # refactor the double loop
sentence = self.questions[data_index]['sentence']
split_sentence = sentence.split(' ')
entities = self.questions[data_index]['entities']
for entity in entities:
split_sentence[entity['index']] = entity['entity']
sentence_with_entities = ' '.join(split_sentence)
is_correct = (answers[data_index][1] == answers[data_index][0])
f_out.write('Sentence: {}\n'.format(sentence))
f_out.write('Sentence with entities: {}\n'.format(sentence_with_entities))
f_out.write('Correct Answer: {}\n'.format(rev_entity_vocab[int(answers[data_index][1])]))
f_out.write('Predicted Answer: {}\n'.format(rev_entity_vocab[int(answers[data_index][0])]))
f_out.write('Memories\n')
if is_correct:
f_out_correct.write('Sentence: {}\n'.format(sentence))
f_out_correct.write('Sentence with entities: {}\n'.format(sentence_with_entities))
f_out_correct.write('Correct Answer: {}\n'.format(rev_entity_vocab[int(answers[data_index][1])]))
f_out_correct.write('Predicted Answer: {}\n'.format(rev_entity_vocab[int(answers[data_index][0])]))
f_out_correct.write('Memories\n')
for index in reversed(range(topk)):
mem_index = self.get_relevant_memory(data_index, sorted_index[data_index][index], use_kb=use_kb)
f_out.write('Attn wt: {0:10.4f}\n'.format(sorted_wts[data_index][index]))
f_out.write('Memory: {}\n'.format(mem_index))
if is_correct:
f_out_correct.write('Attn wt: {0:10.4f}\n'.format(sorted_wts[data_index][index]))
f_out_correct.write('Memory: {}\n'.format(mem_index))
if mem_index is not None:
mem_index['value'] = self.mid_to_word_map[mem_index['value']] if mem_index['value'] in self.mid_to_word_map else mem_index['value']
key = [self.mid_to_word_map[word] if word in self.mid_to_word_map else word for word in mem_index['key']]
mem_index['key'] = key
mem_index['entity'] = self.mid_to_word_map[mem_index['entity']] if mem_index['entity'] in self.mid_to_word_map else mem_index['entity']
f_out_correct.write('Memory in words: {}\n'.format(mem_index))
f_out.write('Memory in words: {}\n'.format(mem_index))
f_out.write("=============\n")
if is_correct:
f_out_correct.write("=============\n")
def get_siva_output(self, input_predicted_answer_file):
num_questions = len(self.questions)
outputs = np.fromfile(input_predicted_answer_file) # manzil had changed the output structure storing (sentence, prediction). Also he changed np.save to np.tofile so reading would change
# outputs = np.load(input_predicted_answer_file)
outputs = outputs.reshape(num_questions, -1)
num_questions, sequence_length = outputs.shape
predicted_answers = outputs[:,sequence_length-1] #last column
# predicted_answers = outputs[:, 0] # last column
correct_counter = 0
for counter, question in enumerate(self.questions):
print(question['sentence']+'\t'+ '[\"'+question['answerSubset'][0]+'\"]'+'\t'+'[\"'+rev_entity_vocab[predicted_answers[counter]]+'\"]')
if question['answerSubset'][0] == rev_entity_vocab[predicted_answers[counter]]:
correct_counter += 1
print('Accuracy: {}'.format(correct_counter*1.0/num_questions))
# print(question['answerSubset'])
# print(rev_entity_vocab[predicted_answers[counter]])
def __call__(self, *args, **kwargs):
self.read_attn_wts_file(input_attn_file, input_predicted_answer_file)
# self.get_siva_output(input_predicted_answer_file)
print('Done')
def mid_to_word(self):
word_to_mid = {}
mid_to_word = {}
with open('/iesl/canvas/pat/data/freebase/freebase_names', 'r') as f:
for line in tqdm(f):
mid, word, _ = line.split('\t')
word_to_mid[word] = 'm.' + mid[2:]
mid_to_word['m.' + mid[2:]] = word
return mid_to_word
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--use_kb", default=1, type=int)
parser.add_argument("--use_text", default=0, type=int)
parser.add_argument("--kb_file", required=True)
parser.add_argument("--text_kb_file", required=True)
parser.add_argument("--attn_file", required=True)
parser.add_argument("--answer_file", required=True)
parser.add_argument("--input_test_file", required=True)
parser.add_argument("--k", default=5, type=int)
parser.add_argument("--output_dir", required=True)
args = parser.parse_args()
kb_file = args.kb_file
text_kb_file = args.text_kb_file
use_kb = (args.use_kb == 1)
use_text = (args.use_text == 1)
input_attn_file = args.attn_file
input_predicted_answer_file = args.answer_file
input_test_file = args.input_test_file
topk = args.k
output_dir = args.output_dir
vocab_dir = "/home/rajarshi/research/joint-text-and-kb-inference-semantic-parsing/vocab"
print('Reading entity vocab')
entity_vocab = json.load(open(vocab_dir + '/entity_vocab.json'))
rev_entity_vocab = {}
for k,v in entity_vocab.iteritems():
rev_entity_vocab[v] = k
qual_analysis = QualAnalysis()
qual_analysis()
|
|
from __future__ import with_statement
__all__ = ['FriendlyCURL', 'threadCURLSingleton', 'url_parameters',
'CurlHTTPConnection', 'CurlHTTPSConnection', 'CurlHTTPResponse',]
import contextlib
import logging
import os
import os.path
import pickle
import tempfile
import shutil
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
import pycurl
from pycurl import error as PyCURLError
from cStringIO import StringIO
import urllib
import urlparse
import mimetools
import httplib
from httplib2 import iri2uri
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
log.addHandler(logging.StreamHandler())
DEFAULT_URI_ENCODING = 'utf'
def url_parameters(base_url, **kwargs):
"""Uses any extra keyword arguments to create a "query string" and
append it to base_url."""
if kwargs:
for k, v in kwargs.items():
if isinstance(v, list):
kwargs[k] = [unicode(e).encode(DEFAULT_URI_ENCODING) for e in v]
else:
kwargs[k] = unicode(v).encode(DEFAULT_URI_ENCODING)
base_url += '?' + urllib.urlencode(kwargs, doseq=True)
return base_url
def debugfunction(curl_info, data):
if curl_info == pycurl.INFOTYPE_TEXT:
log.debug("Info: %r", data)
elif curl_info == pycurl.INFOTYPE_HEADER_IN:
log.debug("Header From Peer: %r", data)
elif curl_info == pycurl.INFOTYPE_HEADER_OUT:
log.debug("Header Sent to Peer: %r", data)
elif curl_info == pycurl.INFOTYPE_DATA_IN:
#log.debug("Data From Peer: %r", data)
pass
elif curl_info == pycurl.INFOTYPE_DATA_OUT:
#log.debug("Data To Peer: %r", data)
pass
return 0
class FriendlyCURL(object):
"""Friendly wrapper for a PyCURL Handle object. You probably don't want to
instantiate this yourself. Instead, use :func:`threadCURLSingleton`."""
def __init__(self):
self.curl_handle = pycurl.Curl()
def _common_perform(self, url, headers,
accept_self_signed_SSL=False,
follow_location=True,
body_buffer=None, debug=False):
"""Perform activities common to all FriendlyCURL operations. Several
parameters are passed through and processed identically for all of the
\*_url functions, and all produce the same return type.
:param url: The URL to access. If a unicode string, it will be treated\
as an IRI and converted to a URI.
:type url: str or unicode
:param headers: Additional headers to add to the request.
:type headers: dict
:param accept_self_signed_SSL: Whether to accept self-signed SSL certs.
:type accept_self_signed_SSL: bool
:param follow_location: If True, FriendlyCURL will follow location\
headers on HTTP redirects. If False, the redirect will be returned.
:type follow_location: bool
:param body_buffer: A buffer to write body content into.
:type body_buffer: ``.write(str)``-able file-like object
:param debug: Turn on debug logging for this request.
:type debug: bool
:returns: A tuple containing a dictionary of response headers, including\
the HTTP status as an int in 'status' and a buffer containing the body\
of the response."""
self.curl_handle.setopt(
pycurl.HTTPHEADER,
['%s: %s' % (header, str(value)) for header, value in headers.iteritems()])
if isinstance(url, unicode):
url = str(iri2uri(url))
self.curl_handle.setopt(pycurl.URL, url)
if body_buffer:
body = body_buffer
else:
body = StringIO()
self.curl_handle.setopt(pycurl.FORBID_REUSE, 1)
self.curl_handle.setopt(pycurl.WRITEFUNCTION, body.write)
header = StringIO()
self.curl_handle.setopt(pycurl.HEADERFUNCTION, header.write)
if accept_self_signed_SSL == True:
self.curl_handle.setopt(pycurl.SSL_VERIFYPEER, 0)
if follow_location == True:
self.curl_handle.setopt(pycurl.FOLLOWLOCATION, 1)
if debug:
self.curl_handle.setopt(pycurl.VERBOSE, 1)
self.curl_handle.setopt(pycurl.DEBUGFUNCTION, debugfunction)
self.curl_handle.perform()
body.seek(0)
headers = [hdr.split(': ') for hdr in header.getvalue().strip().split('\r\n') if
hdr and not hdr.startswith('HTTP/')]
response = dict((header[0].lower(), header[1]) for header in headers)
response['status'] = self.curl_handle.getinfo(pycurl.HTTP_CODE)
return (response, body)
def get_url(self, url, headers = None, use_cache = True, **kwargs):
"""Perform a regular HTTP GET using pycurl. See :meth:`_common_perform`
for details.
:param use_cache: Defaults to true, will use the cache if cache_dir is\
set. Pass false or unset cache_dir to ignore cache and not cache the\
result of the request."""
headers = headers or {}
self.curl_handle.setopt(pycurl.HTTPGET, 1)
cache_base_name = hash(url)
if not (use_cache and hasattr(self, '_cache_dir')):
try:
result = self._common_perform(url, headers, **kwargs)
finally:
self.reset()
return result
response_cache_filename = os.path.join(self.cache_dir,
'%s.response' % cache_base_name)
body_cache_filename = os.path.join(self.cache_dir,
'%s.body' % cache_base_name)
temp_buffer_fd, temp_buffer_path = tempfile.mkstemp()
try:
if 'body_buffer' in kwargs:
body_buffer = kwargs['body_buffer']
del kwargs['body_buffer']
else:
body_buffer = StringIO()
with os.fdopen(temp_buffer_fd, 'w') as temp_buffer:
cached_response = {}
if os.path.exists(response_cache_filename):
with open(response_cache_filename, 'r') as response_cache:
cached_response = pickle.load(response_cache)
if 'etag' in cached_response:
# Retrieved before, do a conditional get.
headers['If-None-Match'] = cached_response['etag']
response, body = self._common_perform(
url, headers, body_buffer=temp_buffer, **kwargs)
if response['status'] == 304:
with open(body_cache_filename, 'r') as cached_body:
shutil.copyfileobj(cached_body, body_buffer)
body_buffer.seek(0)
return cached_response, body_buffer
else:
# Retrieve the resource for the first time.
response, body = self._common_perform(
url, headers, body_buffer=temp_buffer, **kwargs)
with contextlib.nested(open(temp_buffer_path, 'r'),
open(body_cache_filename, 'w'),
open(response_cache_filename, 'w')) as\
(temp_buffer, body_cache, response_cache):
pickle.dump(response, response_cache)
shutil.copyfileobj(temp_buffer, body_cache)
temp_buffer.seek(0)
shutil.copyfileobj(temp_buffer, body_buffer)
body_buffer.seek(0)
return response, body_buffer
finally:
os.unlink(temp_buffer_path)
def head_url(self, url, headers = None, **kwargs):
"""Performs an HTTP HEAD using pycurl. See :meth:_common_perform`
for details."""
headers = headers or {}
self.curl_handle.setopt(pycurl.NOBODY, 1)
try:
result = self._common_perform(url, headers, **kwargs)
finally:
self.reset()
return result
def post_url(self, url, data=None, upload_file=None, upload_file_length=None,
content_type='application/x-www-form-urlencoded',
headers = None, **kwargs):
"""Performs an HTTP POST using pycurl. If ``headers`` is provided, it
will have Content-Type and Content-Length added to it. See
:meth:`_common_perform` for further details.
:param data: The data to use as the POST body. Will over-ride\
``upload_file`` and ``upload_file_length`` if provided.
:type data: str or unicode
:param upload_file: The data to use as the POST body.
:type upload_file: ``.read()``-able file-like object
:param upload_file_length: The length of ``upload_file``. If\
``upload_file`` is provided and this is not, ``friendly_curl`` will use\
``os.fstat`` to calculate it.
:param content_type: The type of the data being POSTed."""
headers = headers or {}
self.curl_handle.setopt(pycurl.POST, 1)
if data:
upload_file = StringIO(data)
upload_file_length = len(data)
if not upload_file_length and hasattr(upload_file, 'fileno'):
upload_file_length = os.fstat(upload_file.fileno()).st_size
self.curl_handle.setopt(pycurl.READFUNCTION, upload_file.read)
headers['Content-Type'] = content_type
headers['Content-Length'] = upload_file_length
try:
result = self._common_perform(url, headers, **kwargs)
finally:
self.reset()
return result
def put_url(self, url, data=None, upload_file=None, upload_file_length=None,
content_type='application/x-www-form-urlencoded',
headers = None, **kwargs):
"""Perform an HTTP PUT using pycurl. See :meth:`post_url` and
:meth:`_common_perform` for further details."""
headers = headers or {}
self.curl_handle.setopt(pycurl.UPLOAD, 1)
if data:
upload_file = StringIO(data)
upload_file_length = len(data)
if not upload_file_length and hasattr(upload_file, 'fileno'):
upload_file_length = os.fstat(upload_file.fileno()).st_size
self.curl_handle.setopt(pycurl.READFUNCTION, upload_file.read)
headers['Content-Type'] = content_type
headers['Content-Length'] = upload_file_length
headers['Transfer-Encoding'] = ''
try:
result = self._common_perform(url, headers, **kwargs)
finally:
self.reset()
return result
def delete_url(self, url, headers = None, **kwargs):
"""Perform an HTTP DELETE using pycurl. See :meth:`_common_perform` for
further details."""
headers = headers or {}
self.curl_handle.setopt(pycurl.CUSTOMREQUEST, 'DELETE')
try:
result = self._common_perform(url, headers, **kwargs)
finally:
self.reset()
return result
def reset(self):
"""Resets the CURL handle to its base state. Automatically called after
a HEAD, POST, PUT, or DELETE.
Will use the pycurl handle's ``reset()`` method if available. Otherwise
discards and replaces the pycurl handle."""
if hasattr(self.curl_handle, 'reset'):
self.curl_handle.reset()
else:
self.curl_handle = pycurl.Curl()
def cache_dir():
def fget(self):
return self._cache_dir
def fset(self, value):
self._cache_dir = os.path.abspath(value)
def fdel(self):
del self._cache_dir
doc = """Sets the directory to be used to store cache files. Whatever
value is provided will be run through os.path.abspath."""
return locals()
cache_dir = property(**cache_dir())
local = _threading.local()
def threadCURLSingleton():
"""Creates or returns a single :class:`FriendlyCURL` object per thread. You
will usually want to call this to obtain a :class:`FriendlyCURL` object."""
if not hasattr(local, 'fcurl'):
local.fcurl = FriendlyCURL()
return local.fcurl
class CurlHTTPConnection(object):
"""A HTTPConncetion-style object that uses pycurl to actually do the work.
Intended for use with httplib2. To use, import friendly_curl and httplib2
and monkey-patch httplib2 as follows::
httplib2.HTTPConnectionWithTimeout = CurlHTTPConnection
httplib2.HTTPSConnectionWithTimeout = CurlHTTPSConnection"""
def __init__(self, host, port=None,
key_file=None, cert_file=None, strict=False,
timeout=None, proxy_info=None):
self.host = host
self.port = port
self.key_file = key_file
self.cert_file = cert_file
self.strict = strict
self.timeout = timeout
self.proxy_info = proxy_info
self.handle = None
self.scheme = 'http'
def request(self, method, uri, body=None, headers=None):
if not self.handle:
self.connect()
handle = self.fcurl.curl_handle
if headers is None:
headers = {}
if method == 'GET':
handle.setopt(pycurl.HTTPGET, 1)
elif method == 'HEAD':
handle.setopt(pycurl.NOBODY, 1)
elif method == 'POST':
handle.setopt(pycurl.POST, 1)
if body:
headers['Content-Length'] = len(body)
body_IO = StringIO(body)
handle.setopt(pycurl.READFUNCTION, body_IO.read)
elif method == 'PUT':
handle.setopt(pycurl.UPLOAD, 1)
if body:
headers['Content-Length'] = len(body)
body_IO = StringIO(body)
handle.setopt(pycurl.READFUNCTION, body_IO.read)
elif method == 'PATCH':
handle.setopt(pycurl.UPLOAD, 1)
handle.setopt(pycurl.CUSTOMREQUEST, 'PATCH')
if body:
headers['Transfer-Encoding'] = ''
headers['Content-Length'] = len(body)
body_IO = StringIO(body)
handle.setopt(pycurl.READFUNCTION, body_IO.read)
elif body is not None:
# Custom method and body provided, error.
raise Exception("body not supported with custom method %s." % method)
else:
# Custom method and no body provided, pretend to do a GET.
handle.setopt(pycurl.CUSTOMREQUEST, method)
if self.port:
netloc = '%s:%s' % (self.host, self.port)
else:
netloc = self.host
url = urlparse.urlunparse((self.scheme, netloc, uri, '', '', ''))
self.url = str(iri2uri(url))
handle.setopt(pycurl.URL, self.url)
if headers:
handle.setopt(pycurl.HTTPHEADER, ['%s: %s' % (header, str(value)) for
header, value in
headers.iteritems()])
handle.setopt(pycurl.SSL_VERIFYPEER, 0)
handle.setopt(pycurl.NOSIGNAL, 1)
if self.key_file:
handle.setopt(pycurl.SSLKEY, self.key_file)
if self.cert_file:
handle.setopt(pycurl.SSLCERT, self.cert_file)
if self.timeout:
handle.setopt(pycurl.TIMEOUT, self.timeout)
# Proxy not supported yet.
def getresponse(self):
handle = self.fcurl.curl_handle
body = StringIO()
handle.setopt(pycurl.WRITEFUNCTION, body.write)
headers = StringIO()
handle.setopt(pycurl.HEADERFUNCTION, headers.write)
handle.perform()
self.fcurl.reset()
return CurlHTTPResponse(body, headers)
def set_debuglevel(self, level):
pass
def connect(self):
self.fcurl = threadCURLSingleton()
self.fcurl.reset()
def close(self):
"""Also doesn't actually do anything."""
self.fcurl = None
def putrequest(self, request, selector, skip_host, skip_accept_encoding):
raise NotImplementedError()
def putheader(self, header, argument, **kwargs):
raise NotImplementedError()
def endheaders(self):
raise NotImplementedError()
def send(self, data):
raise NotImplementedError()
class CurlHTTPSConnection(CurlHTTPConnection):
"""Like :class:`CurlHTTPConnection`, but uses https rather than plain http.
As with :class:`CurlHTTPConnection`, you probably don't want to use this
directly."""
def __init__(self, host, port=None,
key_file=None, cert_file=None, strict=False,
timeout=None, proxy_info=None):
super(CurlHTTPSConnection, self).__init__(host, port, key_file,
cert_file, strict, timeout,
proxy_info)
self.scheme = 'https'
class CurlHTTPResponse(httplib.HTTPResponse):
"""Used by :class:`CurlHTTPConnection` and :class:`CurlHTTPSConnection` to
return the HTTP response."""
def __init__(self, body, headers):
self.body = body
self.body.seek(0)
headers.seek(0)
status_line = headers.readline()
(http_version, sep, status_line) = status_line.partition(' ')
(status, sep, reason) = status_line.partition(' ')
self.version = int(''.join(ch for ch in http_version if ch.isdigit()))
self.status = int(status)
self.reason = reason.strip()
self.msg = mimetools.Message(headers)
def read(self, amt=-1):
"""Read data from the body of the HTTP response."""
return self.body.read(amt)
def getheader(self, name, default=None):
"""Get a header from the HTTP response.
:param default: The value to return if the header is not present.\
Defaults to ``None``."""
value = self.msg.get(name)
if value is None:
return default
return value
def getheaders(self):
"""Get a dictionary of all HTTP response headers."""
return [(header, self.msg.get(header)) for header in self.msg]
|
|
#!/usr/bin/env python3
# dividedramsey.py
# Glenn G. Chappell
# Date: 26 Aug 2016
# Requires Python 3.
"""Compute k-divided Ramsey numbers & related extremal graphs.
Command-line usage: dividedramsey.py [OPTIONS] k a b
Compute and print the generalized Ramsey number R*_k(a, b), along with
extremal graphs, using "DOT language". If not in quiet mode (see below),
also print the number of counterexample graphs of each order, up to
isomorphism, as they are computed. By a "counterexample graph" we mean a
graph G for which G has no a-vertex k-divided set, and the complement of
G has no b-vertex k-divided set.
OPTIONS:
-q, --quiet Quiet mode; do not print info on counterexample graphs.
The following options perform special operations; if they are given,
then arguments k, a, b are ignored and may be omitted.
-h, --help Print this usage message.
--test Run module tests (uses Python doctests), non-verbose mode.
--Test Run module tests, verbose mode.
To call from a Python 3 program, first do
import dividedramsey
To obtain a Ramsey number and related extremal graphs, do
value, extremal_list = dividedramsey.find_extremals(k, a, b)
with k, a, b set appropriately. Alternatively do
dividedramsey.print_extremals(k, a, b)
to print information to the standard output.
This software was written as a companion to the paper "On subgraphs
without large components" by Glenn G. Chappell and John Gimbel. See that
paper for mathematical background and related results.
"""
import isograph # for dot_str, isomorphic
import genramsey # for extremals
import sys # for argv, exit, stderr
import getopt # for error, getopt
# ----------------------------------------------------------------------
# Checking whether a set is k-divided
# ----------------------------------------------------------------------
def make_k_divided_func(k):
"""Return func f(g,s) -> True if s is k-divided in g.
Given positive integer k, returns a function f taking a graph g
and a subset of the vertex set of g, and returning bool. The
returned function returns True if s is k-divided in g.
The returned function is an induced-hereditary predicate, as the
term is used in genramsey.py.
Arguments:
k -- positive int; the "k" in k-divided
See isograph.py for our graph representation.
>>> f1 = make_k_divided_func(1)
>>> f2 = make_k_divided_func(2)
>>> f3 = make_k_divided_func(3)
>>> f4 = make_k_divided_func(4)
>>> f5 = make_k_divided_func(5)
>>> g = [ [1], [0,2], [1,3], [2,4], [3] ]
>>> s = [0,1,2,3,4]
>>> f1(g, s)
False
>>> f2(g, s)
False
>>> f3(g, s)
False
>>> f4(g, s)
False
>>> f5(g, s)
True
>>> s = [0,1,3,4]
>>> f1(g, s)
False
>>> f2(g, s)
True
>>> f3(g, s)
True
>>> s = [0,2,4]
>>> f1(g, s)
True
>>> f2(g, s)
True
>>> f3(g, s)
True
"""
def is_k_divided(g, s):
# k >= 1
pushed = [False] * len(g)
for v in s:
if pushed[v]: continue
compsize = 1
stack = [v]
pushed[v] = True
while stack:
x = stack.pop()
for y in s:
if pushed[y] or y not in g[x]: continue
if compsize >= k:
return False
compsize += 1
stack.append(y)
pushed[y] = True
return True
assert k >= 1
return is_k_divided
def make_k_divided_compl_func(k):
"""Return func f(g,s) -> True if s is k-divided in complement of g.
Given nonnegative integer k, returns a function f taking a graph g
and a subset of the vertex set of g, and returning bool. The
returned function returns True if s is k-divided in the complement
of g.
The returned function is an induced-hereditary predicate, as the
term is used in genramsey.py.
Arguments:
k -- nonnegative int; the "k" in k-divided
See isograph.py for our graph representation.
>>> f1 = make_k_divided_compl_func(1)
>>> f2 = make_k_divided_compl_func(2)
>>> f3 = make_k_divided_compl_func(3)
>>> f4 = make_k_divided_compl_func(4)
>>> f5 = make_k_divided_compl_func(5)
>>> g = [ [2,3,4], [3,4], [0,4], [0,1], [0,1,2] ]
>>> s = [0,1,2,3,4]
>>> f1(g, s)
False
>>> f2(g, s)
False
>>> f3(g, s)
False
>>> f4(g, s)
False
>>> f5(g, s)
True
>>> s = [0,1,3,4]
>>> f1(g, s)
False
>>> f2(g, s)
True
>>> f3(g, s)
True
>>> s = [0,2,4]
>>> f1(g, s)
True
>>> f2(g, s)
True
>>> f3(g, s)
True
"""
def is_k_divided_compl(g, s):
# k >= 1
pushed = [False] * len(g)
for v in s:
if pushed[v]: continue
compsize = 1
stack = [v]
pushed[v] = True
while stack:
x = stack.pop()
for y in s:
if pushed[y] or y in g[x]: continue
# Note: no ck for y == x, as pushed[x] is True
if compsize >= k:
return False
compsize += 1
stack.append(y)
pushed[y] = True
return True
assert k >= 1
return is_k_divided_compl
# ----------------------------------------------------------------------
# Finding k-Divided Ramsey Numbers & Extremal Graphs
# ----------------------------------------------------------------------
def find_extremals(k, a, b, printflag=None):
"""Return R*_k(a,b), list of extremal graphs.
If printflag is True, prints, one on each line, pairs of the form
u v, where u is an integer from 0 to R*_k(a, b), and v is the number
of counterexample graphs of order u.
Arguments:
k -- positive int; the "k" in R*_k(a,b)
a -- nonnegative int; the "a" in R*_k(a,b)
b -- nonnegative int; the "b" in R*_k(a,b)
printflag -- optional bool: whether to print ongoing messages
Default is False.
See isograph.py for our graph representation.
>>> n, gs = find_extremals(1, 3, 3)
>>> n
6
>>> len(gs)
1
>>> c5 = [[1,4],[0,2],[1,3],[2,4],[3,0]]
>>> isograph.isomorphic(c5, gs[0])
True
>>> n, gs = find_extremals(1, 3, 3, printflag=True)
Order & number of counterexample graphs:
0 1
1 1
2 2
3 2
4 3
5 1
6 0
"""
assert k >= 1
assert a >= 0
assert b >= 0
f1 = make_k_divided_func(k)
f2 = make_k_divided_compl_func(k)
return genramsey.extremals(f1, f2, a, b, printflag)
def print_extremals(k, a, b, printflag=None):
"""Print R*_k(a,b) + extremal graphs in DOT language.
If printflag is True, prints, one on each line, pairs of the form
u v, where u is an integer from 0 to R*_k(a, b), and v is the number
of counterexample graphs of order u.
Arguments:
k -- positive int; the "k" in R*_k(a,b)
a -- nonnegative int; the "a" in R*_k(a,b)
b -- nonnegative int; the "b" in R*_k(a,b)
printflag -- optional bool: whether to print ongoing messages
Default is False.
See isograph.py for our graph representation.
>>> print_extremals(1, 2, 2)
Finding R*_1(2,2)
<BLANKLINE>
1 extremal graph(s):
<BLANKLINE>
graph rs1_2_2e1 {
1;
}
<BLANKLINE>
R*_1(2,2) = 2
1 extremal graph(s)
>>> print_extremals(1, 2, 2, printflag=True)
Finding R*_1(2,2)
<BLANKLINE>
Order & number of counterexample graphs:
0 1
1 1
2 0
<BLANKLINE>
1 extremal graph(s):
<BLANKLINE>
graph rs1_2_2e1 {
1;
}
<BLANKLINE>
R*_1(2,2) = 2
1 extremal graph(s)
"""
assert k >= 1
assert a >= 0
assert b >= 0
print("Finding R*_"+str(k)+"("+str(a)+","+str(b)+")")
print()
n, gs = find_extremals(k, a, b, printflag)
if printflag:
print()
print(len(gs), "extremal graph(s):")
print()
graphbasename = "rs"+str(k)+"_"+str(a)+"_"+str(b)+"e"
gcount = 0
for g in gs:
gcount += 1
graphname = graphbasename + str(gcount)
print(isograph.dot_str(g, graphname))
print()
print("R*_"+str(k)+"("+str(a)+","+str(b)+") = "+str(n))
print(len(gs), "extremal graph(s)")
# ----------------------------------------------------------------------
# Main program
# ----------------------------------------------------------------------
class UsageError(Exception):
"""Exception class for command-line usage errors.
>>> isinstance(UsageError(""), Exception)
True
>>> UsageError("abc").msg
'abc'
"""
def __init__(self, msg):
"""Create UsageError object with the given message."""
self.msg = msg
def main(argv=None):
"""Print R*_k(a,b) & extremal graphs, based on command-line options.
Argument argv is an optional list or tuple of strings, in the format
of sys.argv (which is its default value).
If command-line arguments k, a, b are passed, find R*_k(a,b) and all
extremal graphs (up to isomorphism) and print these, along with
explanatory output. Graphs are printed in DOT language.
Return zero on no error, nonzero on error.
The code in this function does command-line option processing and
error checking. Function print_extremals is called to do the actual
computation.
"""
if argv is None:
argv = sys.argv
printcounterexamples = True
try:
try:
optlist, args = getopt.getopt(argv[1:], "hq",
["help", "quiet", "test", "Test"])
except getopt.error as msg:
raise UsageError(msg)
for o, a in optlist:
if o in ["-h", "--help"]:
print(__doc__, end="") # Usage message
return 0
elif o in ["-q", "--quiet"]:
printcounterexamples = False
elif o == "--test" or o == "--Test":
import doctest # for testmod
verbose = (o == "--Test")
if verbose:
print("Running doctests (verbose mode)")
else:
print("Running doctests")
doctest.testmod(verbose=verbose)
return 0
else:
assert False, "unhandled option"
if len(args) != 3:
raise UsageError("Must have exactly 3 arguments")
try:
k = int(args[0])
a = int(args[1])
b = int(args[2])
except:
raise UsageError("Arguments must be integers")
except UsageError as err:
print(argv[0]+":", err.msg, file=sys.stderr)
print("For help use --help", file=sys.stderr)
return 2
print_extremals(k, a, b, printflag=printcounterexamples)
return 0
# Execute main() if running as program, not if imported as module
if __name__ == "__main__":
sys.exit(main())
|
|
from Lexeme import Lexeme
class Node():
Error = []
def __init__(self, lx):
self.index = 0
self.lexemes = lx
self.nextLexeme = self.lexemes[0]
self.prevLexeme = None
return
def parseError(self, err):
self.Error.append(err)
return
def lookahead(self):
self.index += 1
if self.index < len(self.lexemes):
self.prevLexeme = self.lexemes[self.index-1]
self.nextLexeme = self.lexemes[self.index]
return
def backtrack(self):
self.index -= 1
if self.index > 0 and self.index < len(self.lexemes):
self.nextLexeme = self.lexemes[self.index]
return
def panic(self):
while self.nextLexeme.label not in [')', '}', ';']:
self.lookahead()
def parse(self):
self.codeblock()
return
def codeblock(self):
#<CODEBLOCK>:= <STATEMENT>
self.statement()
#<CODEBLOCK>:= <STATEMENT><CODEBLOCK>
if self.index < len(self.lexemes):
self.lookahead()
self.codeblock()
return
def statement(self):
#<STATEMENT>:= <EXPRESSION>|<VARDEC>|<ARRAYDEC>|<ASSIGN>|<IF-THEN>|<FOR-LOOP>|<WHILE-LOOP>|<DO-WHILE-LOOP>|<FUNCTIONDEC>|<FUNCTIONCALL>|<OUTPUT>|<INPUT>
if self.nextLexeme.label == 'new':
self.lookahead()
self.varDec()
elif self.nextLexeme.label == 'print':
self.lookahead()
self.output()
elif self.nextLexeme.label == 'scan':
self.lookahead()
self.input()
elif self.nextLexeme.label == 'if':
self.lookahead()
self.ifcond()
self.lookahead()
if self.nextLexeme.label == 'else':
self.lookahead()
self.elsecond()
else:
self.backtrack()
elif self.nextLexeme.label == 'for':
self.lookahead()
self.forLoop()
elif self.nextLexeme.label == 'while':
self.lookahead()
self.whileLoop()
elif self.nextLexeme.label == 'do':
self.lookahead()
self.doLoop()
elif self.nextLexeme.label == 'func':
self.lookahead()
self.funcDec()
elif self.nextLexeme.label == 'call':
self.lookahead()
self.funcCall()
elif self.expression():
self.lookahead()
return
def varDec(self):
#<VARDEC>:= new <VARIDENT><LINE-DELIMITER>
#<ARRAYDEC>:= new <VARIDENT><ARRAY-SIZE><LINE-DELIMITER>
save = self.index
if self.nextLexeme.label == 'Variable Identifier':
self.lookahead()
if self.nextLexeme.label == '[':
self.lookahead()
if not self.operation():
if not self.operand():
self.parseError('Expected argument for array declaration at line '+str(self.prevLexeme.lineNumber))
self.lookahead()
if self.nextLexeme.label == ']':
self.lookahead()
else:
self.parseError('Expected \']\' for array declaration at line '+str(self.prevLexeme.lineNumber))
if self.nextLexeme.label != ';':
self.index = save+1
self.backtrack()
if not self.assign():
self.parseError('Expected \';\' at line '+str(self.prevLexeme.lineNumber))
else:
self.parseError('Expected Variable Identifier at line '+str(self.prevLexeme.lineNumber))
return True
def assign(self):
if self.nextLexeme.label == 'Variable Identifier':
self.lookahead()
if self.nextLexeme.label == '=':
self.lookahead()
if not self.operation():
if not self.operand():
self.parseError('Expected argument for array declaration at line '+str(self.prevLexeme.lineNumber))
self.lookahead()
else:
self.lookahead()
return True
else:
return True
return False
def splice(self):
if self.nextLexeme.label == '(':
self.lookahead()
if self.nextLexeme.label in ['String Literal', 'Variable Identifier']:
self.lookahead()
if self.nextLexeme.label == ',':
self.lookahead()
if not self.operation():
if not self.operand():
self.parseError('Expected \'int\' for splice function at line '+str(self.prevLexeme.lineNumber))
self.lookahead()
if self.nextLexeme.label == ',':
self.lookahead()
if not self.operation():
if not self.operand():
self.parseError('Expected \'int\' for splice function at line '+str(self.prevLexeme.lineNumber))
self.panic()
else:
self.lookahead()
if self.nextLexeme.label == ')':
self.lookahead()
return True
else:
self.parseError('Expected \')\' at line '+str(self.prevLexeme.lineNumber))
else:
self.parseError('Invalid argument count for splice function at line '+str(self.prevLexeme.lineNumber))
self.panic()
print("wow "+self.nextLexeme.label)
else:
self.parseError('Invalid argument count for splice function at line '+str(self.prevLexeme.lineNumber))
self.panic()
else:
self.parseError('Expected String for splice function at line '+str(self.prevLexeme.lineNumber))
else:
self.parseError('Expected \'(\' for splice function at line '+str(self.prevLexeme.lineNumber))
return False
def concat(self):
if self.nextLexeme.label == '(':
self.lookahead()
if self.nextLexeme.label in ['String Literal', 'Variable Identifier']:
self.lookahead()
if self.nextLexeme.label == ',':
self.lookahead()
if self.nextLexeme.label in ['String Literal', 'Variable Identifier']:
self.lookahead()
else:
self.parseError('Invalid argument 2 for concat function at line '+str(self.prevLexeme.lineNumber))
self.panic()
if self.nextLexeme.label == ')':
self.lookahead()
return True
else:
self.parseError('Expected \')\' at line '+str(self.prevLexeme.lineNumber))
else:
self.parseError('Invalid argument count for concat function at line '+str(self.prevLexeme.lineNumber))
self.panic()
else:
self.parseError('Invalid argument 1 for concat function at line '+str(self.prevLexeme.lineNumber))
self.panic()
else:
self.parseError('Expected \'(\' for concat function at line '+str(self.prevLexeme.lineNumber))
return False
def length(self):
if self.nextLexeme.label == '(':
self.lookahead()
if self.nextLexeme.label in ['String Literal', 'Variable Identifier']:
self.lookahead()
else:
self.parseError('Invalid argument for length function at line '+str(self.prevLexeme.lineNumber))
self.panic()
if self.nextLexeme.label == ')':
self.lookahead()
return True
else:
self.parseError('Expected \')\' at line '+str(self.prevLexeme.lineNumber))
else:
self.parseError('Expected \'(\' for length function at line '+str(self.prevLexeme.lineNumber))
return False
def output(self):
if self.nextLexeme.label == '(':
self.lookahead()
if self.nextLexeme.label in ['String Literal', 'Variable Identifier']:
self.lookahead()
elif self.nextLexeme.label == 'concat':
self.lookahead()
self.concat()
elif self.nextLexeme.label == 'splice':
self.lookahead()
self.splice()
else:
self.parseError('Invalid argument for print function at line '+str(self.prevLexeme.lineNumber))
self.panic()
if self.nextLexeme.label == ')':
self.lookahead()
else:
self.parseError('Expected \')\' at line '+str(self.prevLexeme.lineNumber))
if self.nextLexeme.label != ';':
self.parseError('Expected \';\' at line '+str(self.prevLexeme.lineNumber))
self.backtrack()
else:
self.parseError('Expected \'(\' for print function at line '+str(self.prevLexeme.lineNumber))
return
def input(self):
if self.nextLexeme.label == '(':
self.lookahead()
if self.nextLexeme.label == 'Variable Identifier':
self.lookahead()
if self.nextLexeme.label == ',':
self.lookahead()
if self.nextLexeme.label in ['String Literal', 'Variable Identifier']:
self.lookahead()
if self.nextLexeme.label == ')':
self.lookahead()
else:
self.parseError('Expected \')\' at line '+str(self.prevLexeme.lineNumber))
if self.nextLexeme.label != ';':
self.parseError('Expected \';\' at line '+str(self.prevLexeme.lineNumber))
self.backtrack()
else:
self.parseError('Expected string argument for scan function at line '+str(self.prevLexeme.lineNumber))
else:
self.parseError('Invalid argument count for scan function at line '+str(self.prevLexeme.lineNumber))
else:
self.parseError('Expected variable argument for scan function at line '+str(self.prevLexeme.lineNumber))
else:
self.parseError('Expected \'(\' for scan function at line '+str(self.prevLexeme.lineNumber))
return
return
def contentBlock(self, name, index):
if self.nextLexeme.label == '{':
self.lookahead()
while self.nextLexeme.label != '}':
if self.index >= len(self.lexemes):
self.parseError('Expected \'}\' for '+name+' at line '+str(index))
return False
self.statement()
self.lookahead()
else:
self.parseError('Expected \'{\' for '+name+' at line '+str(index))
return False
return True
def ifcond(self):
ifIndex = self.prevLexeme.lineNumber
if self.nextLexeme.label == '(':
self.lookahead()
if not self.booleanOp():
self.parseError('Expected boolean condition for if-condition at line '+str(ifIndex))
self.panic()
if self.nextLexeme.label == ')':
self.lookahead()
else:
self.parseError('Expected \')\' at line '+str(self.nextLexeme.lineNumber))
if not self.contentBlock('if-condition', ifIndex):
return
else:
self.parseError('Expected \'(\' for if-condition at line '+str(ifIndex))
self.lookahead()
if self.nextLexeme.label == 'elsif':
self.lookahead()
self.ifcond()
else:
self.backtrack()
return
def elsecond(self):
elseIndex = self.prevLexeme.lineNumber
if self.nextLexeme.label == '{':
self.lookahead()
while self.nextLexeme.label != '}':
if self.index == len(self.lexemes):
self.parseError('Expected \'}\' for else-conditon at line '+str(elseIndex))
return
self.statement()
self.lookahead()
else:
self.parseError('Expected \'{\' for else-conditon at line '+str(elseIndex))
return
def forLoop(self):
forIndex = self.prevLexeme.lineNumber
if self.nextLexeme.label == '(':
self.lookahead()
if not self.assign():
self.parseError('Expected expression for argument 1 in for condition at line '+str(forIndex))
self.panic()
if self.nextLexeme.label == ';':
self.lookahead()
if not self.booleanOp():
self.parseError('Expected boolean for argument 2 in for condition at line '+str(forIndex))
self.panic()
if self.nextLexeme.label == ';':
self.lookahead()
if not self.assign():
self.parseError('Expected expression for argument 3 in for condition at line '+str(forIndex))
self.panic()
else:
self.parseError('Expected \';\' after argument 2 in for condition at line '+str(forIndex))
else:
self.parseError('Expected \';\' after argument 1 in for condition at line '+str(forIndex))
if self.nextLexeme.label == ')':
self.lookahead()
else:
self.parseError('Expected \')\' at line '+str(self.nextLexeme.lineNumber))
if not self.contentBlock('for-loop', forIndex):
return
else:
self.parseError('Expected \'(\' for for-loop at line '+str(forIndex))
return
def whileLoop(self):
whileIndex = self.prevLexeme.lineNumber
if self.nextLexeme.label == '(':
self.lookahead()
if not self.booleanOp():
self.parseError('Expected boolean condition for while-loop at line '+str(whileIndex))
self.panic()
if self.nextLexeme.label == ')':
self.lookahead()
else:
self.parseError('Expected \')\' at line '+str(self.nextLexeme.lineNumber))
if not self.contentBlock('while-loop', whileIndex):
return
else:
self.parseError('Expected \'(\' for while-loop at line '+str(whileIndex))
return
def doLoop(self):
doIndex = self.prevLexeme.lineNumber
if self.contentBlock('do-while-loop', doIndex):
self.lookahead()
if self.nextLexeme.label == 'while':
self.lookahead()
if self.nextLexeme.label == '(':
self.lookahead()
if not self.booleanOp():
self.parseError('Expected boolean condition for do-while-loop at line '+str(doIndex))
self.panic()
if self.nextLexeme.label == ')':
self.lookahead()
else:
self.parseError('Expected \')\' at line '+str(self.nextLexeme.lineNumber))
if self.nextLexeme.label == ";":
self.lookahead()
else:
self.parseError('Expected \';\' at line '+str(self.prevLexeme.lineNumber))
else:
self.parseError('Expected \'(\' for do-while-loop at line '+str(doIndex))
else:
self.parseError('Expected while part for do-while-loop at line '+str(doIndex))
return
def funcDec(self):
funcIndex = self.prevLexeme.lineNumber
if self.nextLexeme.label == 'Variable Identifier':
self.lookahead()
if self.nextLexeme.label == '(':
self.lookahead()
while self.nextLexeme.label != ')':
if self.nextLexeme.label == 'Variable Identifier':
self.lookahead()
if self.nextLexeme.label == ',':
self.lookahead()
else:
self.parseError('Invalid argument for function declaration at line '+str(funcIndex))
self.panic()
self.lookahead()
self.contentBlock('function-declaration', funcIndex)
else:
self.parseError('Expected \'(\' for function declaration at line '+str(funcIndex))
else:
self.parseError('Expected function name for function declaration at line '+str(funcIndex))
return
def funcCall(self):
callIndex = self.prevLexeme.lineNumber
if self.nextLexeme.label == 'Variable Identifier':
self.lookahead()
if self.nextLexeme.label == '(':
self.lookahead()
while self.nextLexeme.label != ')':
if self.operation():
if self.nextLexeme.label == ',':
self.lookahead()
elif self.operand():
self.lookahead()
if self.nextLexeme.label == ',':
self.lookahead()
else:
self.parseError('Invalid argument for function call at line '+str(callIndex))
while self.nextLexeme.label not in [',', ')']:
self.lookahead()
self.lookahead()
if self.nextLexeme.label != ';':
self.parseError('Expected \';\' at line '+str(callIndex))
self.backtrack()
else:
self.parseError('Expected \'(\' for function call at line '+str(callIndex))
return
def expression(self):
save = self.index
if self.operation():
if self.nextLexeme.label != ';':
self.parseError('Expected \';\' at line '+str(self.prevLexeme.lineNumber))
self.backtrack()
else:
return True
self.index = save+1
self.backtrack()
if self.booleanOp():
if self.nextLexeme.label != ';':
self.parseError('Expected \';\' at line '+str(self.prevLexeme.lineNumber))
self.backtrack()
else:
return True
self.index = save+1
self.backtrack()
if self.nextLexeme.label == 'splice':
self.lookahead()
if self.splice():
return True
self.index = save+1
self.backtrack()
if self.nextLexeme.label == 'concat':
self.lookahead()
if self.concat():
return True
self.index = save+1
self.backtrack()
if self.nextLexeme.label == 'len':
self.lookahead()
if self.length():
return True
return False
def operation(self):
if self.operand():
self.lookahead()
if self.nextLexeme.label in ['+', '-', '*', '/', '%']:
self.lookahead()
if not self.operation():
if self.operand():
self.lookahead()
return True
else:
self.parseError('Invalid operation syntax at line '+str(self.prevLexeme.lineNumber))
self.backtrack()
else:
return True
else:
self.backtrack()
if self.nextLexeme.label == '(':
self.lookahead()
if self.operation():
if self.nextLexeme.label == ')':
self.lookahead()
return True
else:
self.parseError('Expected \')\' at line'+str(self.nextLexeme.lineNumber))
return False
def operand(self):
if self.nextLexeme.label in ['Integer Literal', 'Float Literal', 'String Literal', 'Variable Identifier', '(']:
return True
return False
def booleanOp(self):
save = self.index
if self.boolean():
self.lookahead()
if self.nextLexeme.label in ['and', 'or']:
self.lookahead()
if not self.booleanOp():
if self.boolean():
self.lookahead()
return True
else:
self.parseError('Invalid operation syntax at line'+str(self.prevLexeme.lineNumber))
self.backtrack()
else:
return True
else:
self.backtrack()
if self.nextLexeme.label == '(':
self.lookahead()
if self.booleanOp():
if self.nextLexeme.label == ')':
self.lookahead()
return True
else:
self.parseError('Expected \')\' at line'+str(self.nextLexeme.lineNumber))
self.index = save-1
self.lookahead()
if self.boolean():
self.lookahead()
return True
return False
def boolean(self):
if self.nextLexeme.label in ['true', 'false', '(']:
return True
elif self.operand():
self.lookahead()
if self.nextLexeme.label in ['==', '>', '<', '>=', '<=']:
self.lookahead()
if not self.operation():
if self.operand():
return True
else:
self.parseError('Invalid boolean syntax at line '+str(self.prevLexeme.lineNumber))
self.backtrack()
return False
def parser(tokens):
global PTree
PTree = Node(tokens)
PTree.parse()
for s in PTree.Error:
print("Syntax Error: "+s)
return
|
|
import collections
import sys
import config
import time, datetime, pigpio
from datetime import timedelta
import argparse
import ephem
# Debug statements to help diagnose issues when Python wont run this script
# print(sys.executable)
# import os
# print(os.getcwd())
# print(sys.path)
# import platform
# print(platform.python_version())
class TerrariumLights:
demo_time = None # Used instead of current time when in Demo mode
LOG_TIME_FORMAT = '%Y-%m-%d %H:%M:%S'
# Temp and Humidity
temperature = None
humidity = None
# Date/Time of Sunrise/Sunset
previous_sunrise = None
sunrise = None
sunset = None
def __init__(self, test_mode=False):
self.test_mode = test_mode
# Parse Aruments
parser = argparse.ArgumentParser(description='Service to control LED Lighting in a Terrarium')
parser.add_argument("r", nargs='?', default=0)
parser.add_argument("g", nargs='?', default=0)
parser.add_argument("b", nargs='?', default=0)
parser.add_argument("w", nargs='?', default=0)
args = parser.parse_args()
print "Starting Auto LED Controller"
self.start_time = time.time()
print "Initializing connection to pigpio daemon"
# pigpio daemon is required for accurate PWM timing
if not self.test_mode:
self.pi = pigpio.pi()
if any(x > 0 for x in (args.r, args.g, args.b, args.w)):
self._set_rgb_led(r=args.r, g=args.g, b=args.b)
self._set_white_led(w=args.w)
time.sleep(0.5)
self.pi.stop()
# time.sleep(3)
sys.exit(0)
print "Initializing Temp/Humidity sensor"
# Sensor should be set to Adafruit_DHT.DHT11,
# Adafruit_DHT.DHT22, or Adafruit_DHT.AM2302.
# self.s = config.sensor
if config.DEMO_MODE:
print "Running in Demo Mode"
self.demo_time = datetime.datetime.now()
self._get_sunrise_sunset(self.demo_time)
# Go to the start of Sunset
self.demo_time = self.sunrise - datetime.timedelta(0, config.sunrise_colour_map.keys()[0])
# try:
while True and not self.test_mode:
if config.DEMO_MODE:
self._run(self.demo_time)
self.demo_time = self.demo_time + datetime.timedelta(0, config.DEMO_SPEED)
time.sleep(1)
else:
self._run(datetime.datetime.now())
time.sleep(60)
# except Exception as e:
# self.pi.stop()
# print 'Error: ', e.message
def _run(self, now):
print 'Running: ', self._timestamp(now)
self._get_sunrise_sunset(now)
# if not config.DEMO_MODE:
# if self._get_temp_and_humidity():
# if config.ENABLE_DB:
# self._save_to_db(self.temperature, self.humidity)
# #self._update_graphs()
self._update_lighting(now)
print ''
sys.stdout.flush()
def _get_sunrise_sunset(self, now):
if None in (self.sunrise, self.sunset) \
or now > self.sunrise \
or now > self.sunset:
print "Calculating sunrise and sunset times"
o = ephem.Observer()
# o.horizon = -6 #http://rhodesmill.org/pyephem/rise-set.html#naval-observatory-risings-and-settings
if config.DEMO_MODE:
o.date = now - (datetime.datetime.now() - datetime.datetime.utcnow()) # ephem uses UTC Time
o.lat = config.latitude
o.long = config.longitude
s = ephem.Sun()
s.compute()
try:
self.last_sunrise = ephem.localtime(o.previous_rising(s))
except (ephem.AlwaysUpError, ephem.NeverUpError):
print 'got that damn error'
pass
self.sunrise = ephem.localtime(o.next_rising(s))
self.sunrise += timedelta(
hours=1) # Terrarium was bright too early. Sunrise calculation may be slightly out, potentially an issue with how Sunrise is calculated versus what a person on the ground sees. IE Actual Astronomical Sunrise may be calculated from the moment the sun is in position, rather than when the sunrise looks to be happening.
self.sunset = ephem.localtime(o.next_setting(s))
print "Date: ", ephem.localtime(o.date)
print "Latitude: %s" % o.lat
print "Longitude: %s" % o.long
print "Previous Sunrise: %s" % self._timestamp(self.previous_sunrise)
print "Next Sunrise: %s" % self._timestamp(self.sunrise)
print "Next Sunset: %s" % self._timestamp(self.sunset)
def _update_lighting(self, effective_time):
is_light = self.sunrise > self.sunset
if is_light:
effective_colour = config.day_colour
current_phase, next_phase = 'day', 'sunset'
delta = (self.sunset - effective_time).total_seconds()
colour_map = config.sunset_colour_map
else:
effective_colour = config.night_colour
current_phase, next_phase = 'night', 'sunrise'
delta = (self.sunrise - effective_time).total_seconds()
colour_map = config.sunrise_colour_map
print "Currently ", current_phase
start_delta, start_colour = self.get_start_colour_from_map(delta, colour_map)
if start_delta is not None:
blending_required = True
effective_colour = start_colour
end_delta, end_colour = self.get_end_colour_from_map(delta, colour_map)
else:
blending_required = False
end_delta = None
end_colour = None
print "%s in %d seconds" % (next_phase, delta)
if blending_required:
blend_percentage = self.get_blend_percentage(delta, start_delta, end_delta)
effective_colour = self.blend_colours(start_colour, end_colour, blend_percentage)
print "Blend Percentage: ", blend_percentage
print "Start Colour: ", start_colour
print "End Colour: ", end_colour
print "Current Delta: ", delta
print "Start Delta: ", start_delta
print "End Delta: ", end_delta
print "Effective Colour: ", effective_colour
# Update LEDs
if (effective_colour[0] >= 0) and (effective_colour[1] >= 0) and (effective_colour[2] >= 0) and (
effective_colour[3] >= 0):
self._set_rgb_led(r=effective_colour[0], g=effective_colour[1], b=effective_colour[2])
self._set_white_led(w=effective_colour[3])
def get_start_colour_from_map(self, delta, colour_map):
delta_result, colour_result = None, None
sorted_colour_map = collections.OrderedDict(sorted(colour_map.items()))
for d, c in sorted_colour_map.iteritems():
if delta <= d:
delta_result, colour_result = d, c
return delta_result, colour_result
def get_end_colour_from_map(self, delta, colour_map):
sorted_colour_map = collections.OrderedDict(sorted(colour_map.items()))
for d, c in sorted_colour_map.iteritems():
if delta >= d:
return d, c
return None, None
def get_blend_percentage(self, delta, start_delta, end_delta):
diff = start_delta - end_delta
if diff == 0:
return 0
v = start_delta - delta
blended = (v / diff) * 100
if blended > 0:
return (v / diff) * 100
else:
return 0
def blend_colours(self, start_colour, end_colour, blend_percentage):
print "Blend Percentage: ", blend_percentage
blended_colour = [0, 0, 0, 0]
for i in range(4):
diff = end_colour[i] - start_colour[i]
print "diff: ", diff
if diff != 0:
test = diff / 100
# print "test: ", test
blend_shift = ((diff / 100) * blend_percentage)
# print "Blend Shift: ", blend_shift
blended_colour[i] = start_colour[i] + blend_shift
else:
blended_colour[i] = start_colour[i]
print "Blend Test: ", blended_colour
return blended_colour
def _set_rgb_led(self, r=0, g=0, b=0):
if not self.test_mode:
self.pi.set_PWM_dutycycle(config.r_channel, r)
self.pi.set_PWM_dutycycle(config.g_channel, g)
self.pi.set_PWM_dutycycle(config.b_channel, b)
def _set_white_led(self, w=0):
if not self.test_mode:
self.pi.set_PWM_dutycycle(config.w_channel, w)
def _timestamp(self, value=None):
if value == None:
value = datetime.datetime.now()
return datetime.datetime.strftime(value, self.LOG_TIME_FORMAT)
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Astropy is a package intended to contain core functionality and some
common tools needed for performing astronomy and astrophysics research with
Python. It also provides an index for other astronomy packages and tools for
managing them.
"""
import os
import sys
from warnings import warn
from .version import version as __version__
def _is_astropy_source(path=None):
"""
Returns whether the source for this module is directly in an astropy
source distribution or checkout.
"""
# If this __init__.py file is in ./astropy/ then import is within a source
# dir .astropy-root is a file distributed with the source, but that should
# not installed
if path is None:
path = os.path.join(os.path.dirname(__file__), os.pardir)
elif os.path.isfile(path):
path = os.path.dirname(path)
source_dir = os.path.abspath(path)
return os.path.exists(os.path.join(source_dir, '.astropy-root'))
# The location of the online documentation for astropy
# This location will normally point to the current released version of astropy
if 'dev' in __version__:
online_docs_root = 'https://docs.astropy.org/en/latest/'
else:
online_docs_root = f'https://docs.astropy.org/en/{__version__}/'
from . import config as _config # noqa: E402
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy`.
"""
unicode_output = _config.ConfigItem(
False,
'When True, use Unicode characters when outputting values, and '
'displaying widgets at the console.')
use_color = _config.ConfigItem(
sys.platform != 'win32',
'When True, use ANSI color escape sequences when writing to the console.',
aliases=['astropy.utils.console.USE_COLOR', 'astropy.logger.USE_COLOR'])
max_lines = _config.ConfigItem(
None,
description='Maximum number of lines in the display of pretty-printed '
'objects. If not provided, try to determine automatically from the '
'terminal size. Negative numbers mean no limit.',
cfgtype='integer(default=None)',
aliases=['astropy.table.pprint.max_lines'])
max_width = _config.ConfigItem(
None,
description='Maximum number of characters per line in the display of '
'pretty-printed objects. If not provided, try to determine '
'automatically from the terminal size. Negative numbers mean no '
'limit.',
cfgtype='integer(default=None)',
aliases=['astropy.table.pprint.max_width'])
conf = Conf()
# Define a base ScienceState for configuring constants and units
from .utils.state import ScienceState # noqa: E402
class base_constants_version(ScienceState):
"""
Base class for the real version-setters below
"""
_value = 'test'
_versions = dict(test='test')
@classmethod
def validate(cls, value):
if value not in cls._versions:
raise ValueError(f'Must be one of {list(cls._versions.keys())}')
return cls._versions[value]
@classmethod
def set(cls, value):
"""
Set the current constants value.
"""
import sys
if 'astropy.units' in sys.modules:
raise RuntimeError('astropy.units is already imported')
if 'astropy.constants' in sys.modules:
raise RuntimeError('astropy.constants is already imported')
return super().set(value)
class physical_constants(base_constants_version):
"""
The version of physical constants to use
"""
# Maintainers: update when new constants are added
_value = 'codata2018'
_versions = dict(codata2018='codata2018', codata2014='codata2014',
codata2010='codata2010', astropyconst40='codata2018',
astropyconst20='codata2014', astropyconst13='codata2010')
class astronomical_constants(base_constants_version):
"""
The version of astronomical constants to use
"""
# Maintainers: update when new constants are added
_value = 'iau2015'
_versions = dict(iau2015='iau2015', iau2012='iau2012',
astropyconst40='iau2015', astropyconst20='iau2015',
astropyconst13='iau2012')
# Create the test() function
from .tests.runner import TestRunner # noqa: E402
test = TestRunner.make_test_runner_in(__path__[0])
# if we are *not* in setup mode, import the logger and possibly populate the
# configuration file with the defaults
def _initialize_astropy():
try:
from .utils import _compiler # noqa: F401
except ImportError:
if _is_astropy_source():
raise ImportError('You appear to be trying to import astropy from '
'within a source checkout or from an editable '
'installation without building the extension '
'modules first. Either run:\n\n'
' pip install -e .\n\nor\n\n'
' python setup.py build_ext --inplace\n\n'
'to make sure the extension modules are built ')
else:
# Outright broken installation, just raise standard error
raise
# Set the bibtex entry to the article referenced in CITATION.
def _get_bibtex():
citation_file = os.path.join(os.path.dirname(__file__), 'CITATION')
with open(citation_file, 'r') as citation:
refs = citation.read().split('@ARTICLE')[1:]
if len(refs) == 0:
return ''
bibtexreference = f'@ARTICLE{refs[0]}'
return bibtexreference
__citation__ = __bibtex__ = _get_bibtex()
from .logger import _init_log, _teardown_log # noqa: E402, F401
log = _init_log()
_initialize_astropy()
from .utils.misc import find_api_page # noqa: E402, F401
def online_help(query):
"""
Search the online Astropy documentation for the given query.
Opens the results in the default web browser. Requires an active
Internet connection.
Parameters
----------
query : str
The search query.
"""
import webbrowser
from urllib.parse import urlencode
version = __version__
if 'dev' in version:
version = 'latest'
else:
version = 'v' + version
url = f"https://docs.astropy.org/en/{version}/search.html?{urlencode({'q': query})}"
webbrowser.open(url)
__dir_inc__ = ['__version__', '__githash__',
'__bibtex__', 'test', 'log', 'find_api_page', 'online_help',
'online_docs_root', 'conf', 'physical_constants',
'astronomical_constants']
from types import ModuleType as __module_type__ # noqa: E402
# Clean up top-level namespace--delete everything that isn't in __dir_inc__
# or is a magic attribute, and that isn't a submodule of this package
for varname in dir():
if not ((varname.startswith('__') and varname.endswith('__')) or
varname in __dir_inc__ or
(varname[0] != '_' and
isinstance(locals()[varname], __module_type__) and
locals()[varname].__name__.startswith(__name__ + '.'))):
# The last clause in the the above disjunction deserves explanation:
# When using relative imports like ``from .. import config``, the
# ``config`` variable is automatically created in the namespace of
# whatever module ``..`` resolves to (in this case astropy). This
# happens a few times just in the module setup above. This allows
# the cleanup to keep any public submodules of the astropy package
del locals()[varname]
del varname, __module_type__
|
|
# Copyright 2014 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import config
from tempest.lib import exceptions as lib_exc
import testtools
from testtools import testcase as tc
from manila_tempest_tests import share_exceptions
from manila_tempest_tests.tests.api import base
CONF = config.CONF
class SharesNegativeTest(base.BaseSharesTest):
@classmethod
def resource_setup(cls):
super(SharesNegativeTest, cls).resource_setup()
cls.share = cls.create_share(
name='public_share',
description='public_share_desc',
is_public=True,
metadata={'key': 'value'}
)
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
def test_update_share_with_wrong_public_value(self):
self.assertRaises(lib_exc.BadRequest,
self.shares_client.update_share, self.share["id"],
is_public="truebar")
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
@testtools.skipUnless(CONF.share.run_snapshot_tests,
"Snapshot tests are disabled.")
def test_try_delete_share_with_existing_snapshot(self):
# share can not be deleted while snapshot exists
# create share
share = self.create_share()
# create snapshot
self.create_snapshot_wait_for_active(share["id"])
# try delete share
self.assertRaises(lib_exc.Forbidden,
self.shares_client.delete_share, share["id"])
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
@testtools.skipUnless(CONF.share.run_snapshot_tests,
"Snapshot tests are disabled.")
def test_create_share_from_snap_with_less_size(self):
# requires minimum 5Gb available space
skip_msg = "Check disc space for this test"
try: # create share
size = CONF.share.share_size + 1
share = self.create_share(size=size, cleanup_in_class=False)
except share_exceptions.ShareBuildErrorException:
self.skip(skip_msg)
try: # create snapshot
snap = self.create_snapshot_wait_for_active(
share["id"], cleanup_in_class=False)
except share_exceptions.SnapshotBuildErrorException:
self.skip(skip_msg)
# try create share from snapshot with less size
self.assertRaises(lib_exc.BadRequest,
self.create_share,
snapshot_id=snap["id"],
cleanup_in_class=False)
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
@testtools.skipIf(not CONF.share.multitenancy_enabled,
"Only for multitenancy.")
def test_create_share_with_nonexistant_share_network(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.create_share,
share_network_id="wrong_sn_id")
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
@testtools.skipIf(not CONF.share.multitenancy_enabled,
"Only for multitenancy.")
@testtools.skipUnless(CONF.share.run_snapshot_tests,
"Snapshot tests are disabled.")
def test_create_share_from_snap_with_different_share_network(self):
# create share
share = self.create_share(cleanup_in_class=False)
# get parent's share network
parent_share = self.shares_client.get_share(share["id"])
parent_sn = self.shares_client.get_share_network(
parent_share["share_network_id"])
# create new share-network - net duplicate of parent's share
new_duplicated_sn = self.create_share_network(
cleanup_in_class=False,
neutron_net_id=parent_sn["neutron_net_id"],
neutron_subnet_id=parent_sn["neutron_subnet_id"],
)
# create snapshot of parent share
snap = self.create_snapshot_wait_for_active(
share["id"], cleanup_in_class=False)
# try create share with snapshot using another share-network
# 400 bad request is expected
self.assertRaises(
lib_exc.BadRequest,
self.create_share,
cleanup_in_class=False,
share_network_id=new_duplicated_sn["id"],
snapshot_id=snap["id"],
)
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
def test_update_other_tenants_public_share(self):
isolated_client = self.get_client_with_isolated_creds(
type_of_creds='alt')
self.assertRaises(lib_exc.Forbidden, isolated_client.update_share,
self.share["id"], name="new_name")
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
def test_delete_other_tenants_public_share(self):
isolated_client = self.get_client_with_isolated_creds(
type_of_creds='alt')
self.assertRaises(lib_exc.Forbidden,
isolated_client.delete_share,
self.share['id'])
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
def test_set_metadata_of_other_tenants_public_share(self):
isolated_client = self.get_client_with_isolated_creds(
type_of_creds='alt')
self.assertRaises(lib_exc.Forbidden,
isolated_client.set_metadata,
self.share['id'],
{'key': 'value'})
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
def test_update_metadata_of_other_tenants_public_share(self):
isolated_client = self.get_client_with_isolated_creds(
type_of_creds='alt')
self.assertRaises(lib_exc.Forbidden,
isolated_client.update_all_metadata,
self.share['id'],
{'key': 'value'})
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
def test_delete_metadata_of_other_tenants_public_share(self):
isolated_client = self.get_client_with_isolated_creds(
type_of_creds='alt')
self.assertRaises(lib_exc.Forbidden,
isolated_client.delete_metadata,
self.share['id'],
'key')
class SharesAPIOnlyNegativeTest(base.BaseSharesTest):
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_unmanage_share_by_user(self):
self.assertRaises(lib_exc.Forbidden,
self.shares_client.unmanage_share,
'fake-id')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_manage_share_by_user(self):
self.assertRaises(lib_exc.Forbidden,
self.shares_client.manage_share,
'fake-host', 'nfs', '/export/path',
'fake-type')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_list_by_share_server_by_user(self):
self.assertRaises(lib_exc.Forbidden,
self.shares_client.list_shares,
params={'share_server_id': 12345})
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_create_share_non_existent_az(self):
self.assertRaises(lib_exc.NotFound,
self.shares_v2_client.create_share,
availability_zone='fake_az')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_create_share_with_zero_size(self):
self.assertRaises(lib_exc.BadRequest,
self.shares_client.create_share, size=0)
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_create_share_with_invalid_size(self):
self.assertRaises(lib_exc.BadRequest,
self.shares_client.create_share, size="#$%")
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_create_share_with_out_passing_size(self):
self.assertRaises(lib_exc.BadRequest,
self.shares_client.create_share, size="")
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
@testtools.skipUnless(CONF.share.run_snapshot_tests,
"Snapshot tests are disabled.")
def test_delete_snapshot_with_wrong_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.delete_snapshot,
"wrong_share_id")
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
@testtools.skipUnless(CONF.share.run_snapshot_tests,
"Snapshot tests are disabled.")
def test_create_snapshot_with_wrong_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.create_snapshot,
"wrong_share_id")
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_create_share_with_invalid_protocol(self):
self.assertRaises(lib_exc.BadRequest,
self.shares_client.create_share,
share_protocol="nonexistent_protocol")
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_create_share_with_wrong_public_value(self):
self.assertRaises(lib_exc.BadRequest,
self.shares_client.create_share, is_public='truebar')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_get_share_with_wrong_id(self):
self.assertRaises(lib_exc.NotFound, self.shares_client.get_share,
"wrong_share_id")
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_get_share_without_passing_share_id(self):
# Should not be able to get share when empty ID is passed
self.assertRaises(lib_exc.NotFound,
self.shares_client.get_share, '')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_list_shares_nonadmin_with_nonexistent_share_server_filter(self):
# filtering by share server allowed only for admins by default
self.assertRaises(lib_exc.Forbidden,
self.shares_client.list_shares_with_detail,
{'share_server_id': 'fake_share_server_id'})
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_delete_share_with_wrong_id(self):
self.assertRaises(lib_exc.NotFound, self.shares_client.delete_share,
"wrong_share_id")
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_delete_share_without_passing_share_id(self):
# Should not be able to delete share when empty ID is passed
self.assertRaises(lib_exc.NotFound,
self.shares_client.delete_share, '')
|
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Unit tests for the contents of device_utils.py (mostly DeviceUtils).
"""
# pylint: disable=C0321
# pylint: disable=W0212
# pylint: disable=W0613
import collections
import datetime
import logging
import os
import re
import sys
import unittest
from pylib import android_commands
from pylib import cmd_helper
from pylib import constants
from pylib import device_signal
from pylib.device import adb_wrapper
from pylib.device import device_errors
from pylib.device import device_utils
from pylib.device import intent
from pylib.sdk import split_select
from pylib.utils import mock_calls
# RunCommand from third_party/android_testrunner/run_command.py is mocked
# below, so its path needs to be in sys.path.
sys.path.append(os.path.join(
constants.DIR_SOURCE_ROOT, 'third_party', 'android_testrunner'))
sys.path.append(os.path.join(
constants.DIR_SOURCE_ROOT, 'third_party', 'pymock'))
import mock # pylint: disable=F0401
class DeviceUtilsInitTest(unittest.TestCase):
def testInitWithStr(self):
serial_as_str = str('0123456789abcdef')
d = device_utils.DeviceUtils('0123456789abcdef')
self.assertEqual(serial_as_str, d.adb.GetDeviceSerial())
def testInitWithUnicode(self):
serial_as_unicode = unicode('fedcba9876543210')
d = device_utils.DeviceUtils(serial_as_unicode)
self.assertEqual(serial_as_unicode, d.adb.GetDeviceSerial())
def testInitWithAdbWrapper(self):
serial = '123456789abcdef0'
a = adb_wrapper.AdbWrapper(serial)
d = device_utils.DeviceUtils(a)
self.assertEqual(serial, d.adb.GetDeviceSerial())
def testInitWithAndroidCommands(self):
serial = '0fedcba987654321'
a = android_commands.AndroidCommands(device=serial)
d = device_utils.DeviceUtils(a)
self.assertEqual(serial, d.adb.GetDeviceSerial())
def testInitWithMissing_fails(self):
with self.assertRaises(ValueError):
device_utils.DeviceUtils(None)
with self.assertRaises(ValueError):
device_utils.DeviceUtils('')
class DeviceUtilsGetAVDsTest(mock_calls.TestCase):
def testGetAVDs(self):
with self.assertCall(
mock.call.pylib.cmd_helper.GetCmdOutput([mock.ANY, 'list', 'avd']),
'Available Android Virtual Devices:\n'
' Name: my_android5.0\n'
' Path: /some/path/to/.android/avd/my_android5.0.avd\n'
' Target: Android 5.0 (API level 21)\n'
' Tag/ABI: default/x86\n'
' Skin: WVGA800\n'):
self.assertEquals(['my_android5.0'],
device_utils.GetAVDs())
class DeviceUtilsRestartServerTest(mock_calls.TestCase):
@mock.patch('time.sleep', mock.Mock())
def testRestartServer_succeeds(self):
with self.assertCalls(
mock.call.pylib.device.adb_wrapper.AdbWrapper.KillServer(),
(mock.call.pylib.cmd_helper.GetCmdStatusAndOutput(['pgrep', 'adb']),
(1, '')),
mock.call.pylib.device.adb_wrapper.AdbWrapper.StartServer(),
(mock.call.pylib.cmd_helper.GetCmdStatusAndOutput(['pgrep', 'adb']),
(1, '')),
(mock.call.pylib.cmd_helper.GetCmdStatusAndOutput(['pgrep', 'adb']),
(0, '123\n'))):
device_utils.RestartServer()
class MockTempFile(object):
def __init__(self, name='/tmp/some/file'):
self.file = mock.MagicMock(spec=file)
self.file.name = name
self.file.name_quoted = cmd_helper.SingleQuote(name)
def __enter__(self):
return self.file
def __exit__(self, exc_type, exc_val, exc_tb):
pass
@property
def name(self):
return self.file.name
class _PatchedFunction(object):
def __init__(self, patched=None, mocked=None):
self.patched = patched
self.mocked = mocked
def _AdbWrapperMock(test_serial):
adb = mock.Mock(spec=adb_wrapper.AdbWrapper)
adb.__str__ = mock.Mock(return_value=test_serial)
adb.GetDeviceSerial.return_value = test_serial
return adb
class DeviceUtilsTest(mock_calls.TestCase):
def setUp(self):
self.adb = _AdbWrapperMock('0123456789abcdef')
self.device = device_utils.DeviceUtils(
self.adb, default_timeout=10, default_retries=0)
self.watchMethodCalls(self.call.adb, ignore=['GetDeviceSerial'])
def AdbCommandError(self, args=None, output=None, status=None, msg=None):
if args is None:
args = ['[unspecified]']
return mock.Mock(side_effect=device_errors.AdbCommandFailedError(
args, output, status, msg, str(self.device)))
def CommandError(self, msg=None):
if msg is None:
msg = 'Command failed'
return mock.Mock(side_effect=device_errors.CommandFailedError(
msg, str(self.device)))
def ShellError(self, output=None, status=1):
def action(cmd, *args, **kwargs):
raise device_errors.AdbShellCommandFailedError(
cmd, output, status, str(self.device))
if output is None:
output = 'Permission denied\n'
return action
def TimeoutError(self, msg=None):
if msg is None:
msg = 'Operation timed out'
return mock.Mock(side_effect=device_errors.CommandTimeoutError(
msg, str(self.device)))
class DeviceUtilsEqTest(DeviceUtilsTest):
def testEq_equal_deviceUtils(self):
other = device_utils.DeviceUtils(_AdbWrapperMock('0123456789abcdef'))
self.assertTrue(self.device == other)
self.assertTrue(other == self.device)
def testEq_equal_adbWrapper(self):
other = adb_wrapper.AdbWrapper('0123456789abcdef')
self.assertTrue(self.device == other)
self.assertTrue(other == self.device)
def testEq_equal_string(self):
other = '0123456789abcdef'
self.assertTrue(self.device == other)
self.assertTrue(other == self.device)
def testEq_devicesNotEqual(self):
other = device_utils.DeviceUtils(_AdbWrapperMock('0123456789abcdee'))
self.assertFalse(self.device == other)
self.assertFalse(other == self.device)
def testEq_identity(self):
self.assertTrue(self.device == self.device)
def testEq_serialInList(self):
devices = [self.device]
self.assertTrue('0123456789abcdef' in devices)
class DeviceUtilsLtTest(DeviceUtilsTest):
def testLt_lessThan(self):
other = device_utils.DeviceUtils(_AdbWrapperMock('ffffffffffffffff'))
self.assertTrue(self.device < other)
self.assertTrue(other > self.device)
def testLt_greaterThan_lhs(self):
other = device_utils.DeviceUtils(_AdbWrapperMock('0000000000000000'))
self.assertFalse(self.device < other)
self.assertFalse(other > self.device)
def testLt_equal(self):
other = device_utils.DeviceUtils(_AdbWrapperMock('0123456789abcdef'))
self.assertFalse(self.device < other)
self.assertFalse(other > self.device)
def testLt_sorted(self):
devices = [
device_utils.DeviceUtils(_AdbWrapperMock('ffffffffffffffff')),
device_utils.DeviceUtils(_AdbWrapperMock('0000000000000000')),
]
sorted_devices = sorted(devices)
self.assertEquals('0000000000000000',
sorted_devices[0].adb.GetDeviceSerial())
self.assertEquals('ffffffffffffffff',
sorted_devices[1].adb.GetDeviceSerial())
class DeviceUtilsStrTest(DeviceUtilsTest):
def testStr_returnsSerial(self):
with self.assertCalls(
(self.call.adb.GetDeviceSerial(), '0123456789abcdef')):
self.assertEqual('0123456789abcdef', str(self.device))
class DeviceUtilsIsOnlineTest(DeviceUtilsTest):
def testIsOnline_true(self):
with self.assertCall(self.call.adb.GetState(), 'device'):
self.assertTrue(self.device.IsOnline())
def testIsOnline_false(self):
with self.assertCall(self.call.adb.GetState(), 'offline'):
self.assertFalse(self.device.IsOnline())
def testIsOnline_error(self):
with self.assertCall(self.call.adb.GetState(), self.CommandError()):
self.assertFalse(self.device.IsOnline())
class DeviceUtilsHasRootTest(DeviceUtilsTest):
def testHasRoot_true(self):
with self.assertCall(self.call.adb.Shell('ls /root'), 'foo\n'):
self.assertTrue(self.device.HasRoot())
def testHasRoot_false(self):
with self.assertCall(self.call.adb.Shell('ls /root'), self.ShellError()):
self.assertFalse(self.device.HasRoot())
class DeviceUtilsEnableRootTest(DeviceUtilsTest):
def testEnableRoot_succeeds(self):
with self.assertCalls(
(self.call.device.IsUserBuild(), False),
self.call.adb.Root(),
self.call.device.WaitUntilFullyBooted()):
self.device.EnableRoot()
def testEnableRoot_userBuild(self):
with self.assertCalls(
(self.call.device.IsUserBuild(), True)):
with self.assertRaises(device_errors.CommandFailedError):
self.device.EnableRoot()
def testEnableRoot_rootFails(self):
with self.assertCalls(
(self.call.device.IsUserBuild(), False),
(self.call.adb.Root(), self.CommandError())):
with self.assertRaises(device_errors.CommandFailedError):
self.device.EnableRoot()
class DeviceUtilsIsUserBuildTest(DeviceUtilsTest):
def testIsUserBuild_yes(self):
with self.assertCall(
self.call.device.GetProp('ro.build.type', cache=True), 'user'):
self.assertTrue(self.device.IsUserBuild())
def testIsUserBuild_no(self):
with self.assertCall(
self.call.device.GetProp('ro.build.type', cache=True), 'userdebug'):
self.assertFalse(self.device.IsUserBuild())
class DeviceUtilsGetExternalStoragePathTest(DeviceUtilsTest):
def testGetExternalStoragePath_succeeds(self):
with self.assertCall(
self.call.adb.Shell('echo $EXTERNAL_STORAGE'), '/fake/storage/path\n'):
self.assertEquals('/fake/storage/path',
self.device.GetExternalStoragePath())
def testGetExternalStoragePath_fails(self):
with self.assertCall(self.call.adb.Shell('echo $EXTERNAL_STORAGE'), '\n'):
with self.assertRaises(device_errors.CommandFailedError):
self.device.GetExternalStoragePath()
class DeviceUtilsGetApplicationPathsTest(DeviceUtilsTest):
def testGetApplicationPaths_exists(self):
with self.assertCalls(
(self.call.adb.Shell('getprop ro.build.version.sdk'), '19\n'),
(self.call.adb.Shell('pm path android'),
'package:/path/to/android.apk\n')):
self.assertEquals(['/path/to/android.apk'],
self.device.GetApplicationPaths('android'))
def testGetApplicationPaths_notExists(self):
with self.assertCalls(
(self.call.adb.Shell('getprop ro.build.version.sdk'), '19\n'),
(self.call.adb.Shell('pm path not.installed.app'), '')):
self.assertEquals([],
self.device.GetApplicationPaths('not.installed.app'))
def testGetApplicationPaths_fails(self):
with self.assertCalls(
(self.call.adb.Shell('getprop ro.build.version.sdk'), '19\n'),
(self.call.adb.Shell('pm path android'),
self.CommandError('ERROR. Is package manager running?\n'))):
with self.assertRaises(device_errors.CommandFailedError):
self.device.GetApplicationPaths('android')
class DeviceUtilsGetApplicationDataDirectoryTest(DeviceUtilsTest):
def testGetApplicationDataDirectory_exists(self):
with self.assertCall(
self.call.device._RunPipedShellCommand(
'pm dump foo.bar.baz | grep dataDir='),
['dataDir=/data/data/foo.bar.baz']):
self.assertEquals(
'/data/data/foo.bar.baz',
self.device.GetApplicationDataDirectory('foo.bar.baz'))
def testGetApplicationDataDirectory_notExists(self):
with self.assertCall(
self.call.device._RunPipedShellCommand(
'pm dump foo.bar.baz | grep dataDir='),
self.ShellError()):
self.assertIsNone(self.device.GetApplicationDataDirectory('foo.bar.baz'))
@mock.patch('time.sleep', mock.Mock())
class DeviceUtilsWaitUntilFullyBootedTest(DeviceUtilsTest):
def testWaitUntilFullyBooted_succeedsNoWifi(self):
with self.assertCalls(
self.call.adb.WaitForDevice(),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'), ''),
# pm_ready
(self.call.device.GetApplicationPaths('android'),
['package:/some/fake/path']),
# boot_completed
(self.call.device.GetProp('sys.boot_completed'), '1')):
self.device.WaitUntilFullyBooted(wifi=False)
def testWaitUntilFullyBooted_succeedsWithWifi(self):
with self.assertCalls(
self.call.adb.WaitForDevice(),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'), ''),
# pm_ready
(self.call.device.GetApplicationPaths('android'),
['package:/some/fake/path']),
# boot_completed
(self.call.device.GetProp('sys.boot_completed'), '1'),
# wifi_enabled
(self.call.adb.Shell('dumpsys wifi'),
'stuff\nWi-Fi is enabled\nmore stuff\n')):
self.device.WaitUntilFullyBooted(wifi=True)
def testWaitUntilFullyBooted_deviceNotInitiallyAvailable(self):
with self.assertCalls(
self.call.adb.WaitForDevice(),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), self.AdbCommandError()),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), self.AdbCommandError()),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), self.AdbCommandError()),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), self.AdbCommandError()),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'), ''),
# pm_ready
(self.call.device.GetApplicationPaths('android'),
['package:/some/fake/path']),
# boot_completed
(self.call.device.GetProp('sys.boot_completed'), '1')):
self.device.WaitUntilFullyBooted(wifi=False)
def testWaitUntilFullyBooted_sdCardReadyFails_noPath(self):
with self.assertCalls(
self.call.adb.WaitForDevice(),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), self.CommandError())):
with self.assertRaises(device_errors.CommandFailedError):
self.device.WaitUntilFullyBooted(wifi=False)
def testWaitUntilFullyBooted_sdCardReadyFails_notExists(self):
with self.assertCalls(
self.call.adb.WaitForDevice(),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'), self.ShellError()),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'), self.ShellError()),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'),
self.TimeoutError())):
with self.assertRaises(device_errors.CommandTimeoutError):
self.device.WaitUntilFullyBooted(wifi=False)
def testWaitUntilFullyBooted_devicePmFails(self):
with self.assertCalls(
self.call.adb.WaitForDevice(),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'), ''),
# pm_ready
(self.call.device.GetApplicationPaths('android'), self.CommandError()),
# pm_ready
(self.call.device.GetApplicationPaths('android'), self.CommandError()),
# pm_ready
(self.call.device.GetApplicationPaths('android'), self.TimeoutError())):
with self.assertRaises(device_errors.CommandTimeoutError):
self.device.WaitUntilFullyBooted(wifi=False)
def testWaitUntilFullyBooted_bootFails(self):
with self.assertCalls(
self.call.adb.WaitForDevice(),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'), ''),
# pm_ready
(self.call.device.GetApplicationPaths('android'),
['package:/some/fake/path']),
# boot_completed
(self.call.device.GetProp('sys.boot_completed'), '0'),
# boot_completed
(self.call.device.GetProp('sys.boot_completed'), '0'),
# boot_completed
(self.call.device.GetProp('sys.boot_completed'), self.TimeoutError())):
with self.assertRaises(device_errors.CommandTimeoutError):
self.device.WaitUntilFullyBooted(wifi=False)
def testWaitUntilFullyBooted_wifiFails(self):
with self.assertCalls(
self.call.adb.WaitForDevice(),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'), ''),
# pm_ready
(self.call.device.GetApplicationPaths('android'),
['package:/some/fake/path']),
# boot_completed
(self.call.device.GetProp('sys.boot_completed'), '1'),
# wifi_enabled
(self.call.adb.Shell('dumpsys wifi'), 'stuff\nmore stuff\n'),
# wifi_enabled
(self.call.adb.Shell('dumpsys wifi'), 'stuff\nmore stuff\n'),
# wifi_enabled
(self.call.adb.Shell('dumpsys wifi'), self.TimeoutError())):
with self.assertRaises(device_errors.CommandTimeoutError):
self.device.WaitUntilFullyBooted(wifi=True)
@mock.patch('time.sleep', mock.Mock())
class DeviceUtilsRebootTest(DeviceUtilsTest):
def testReboot_nonBlocking(self):
with self.assertCalls(
self.call.adb.Reboot(),
(self.call.device.IsOnline(), True),
(self.call.device.IsOnline(), False)):
self.device.Reboot(block=False)
def testReboot_blocking(self):
with self.assertCalls(
self.call.adb.Reboot(),
(self.call.device.IsOnline(), True),
(self.call.device.IsOnline(), False),
self.call.device.WaitUntilFullyBooted(wifi=False)):
self.device.Reboot(block=True)
def testReboot_blockUntilWifi(self):
with self.assertCalls(
self.call.adb.Reboot(),
(self.call.device.IsOnline(), True),
(self.call.device.IsOnline(), False),
self.call.device.WaitUntilFullyBooted(wifi=True)):
self.device.Reboot(block=True, wifi=True)
class DeviceUtilsInstallTest(DeviceUtilsTest):
def testInstall_noPriorInstall(self):
with self.assertCalls(
(mock.call.pylib.utils.apk_helper.GetPackageName('/fake/test/app.apk'),
'this.is.a.test.package'),
(self.call.device.GetApplicationPaths('this.is.a.test.package'), []),
self.call.adb.Install('/fake/test/app.apk', reinstall=False)):
self.device.Install('/fake/test/app.apk', retries=0)
def testInstall_differentPriorInstall(self):
with self.assertCalls(
(mock.call.pylib.utils.apk_helper.GetPackageName('/fake/test/app.apk'),
'this.is.a.test.package'),
(self.call.device.GetApplicationPaths('this.is.a.test.package'),
['/fake/data/app/this.is.a.test.package.apk']),
(self.call.device._GetChangedAndStaleFiles(
'/fake/test/app.apk', '/fake/data/app/this.is.a.test.package.apk'),
([('/fake/test/app.apk', '/fake/data/app/this.is.a.test.package.apk')],
[])),
self.call.adb.Uninstall('this.is.a.test.package'),
self.call.adb.Install('/fake/test/app.apk', reinstall=False)):
self.device.Install('/fake/test/app.apk', retries=0)
def testInstall_differentPriorInstall_reinstall(self):
with self.assertCalls(
(mock.call.pylib.utils.apk_helper.GetPackageName('/fake/test/app.apk'),
'this.is.a.test.package'),
(self.call.device.GetApplicationPaths('this.is.a.test.package'),
['/fake/data/app/this.is.a.test.package.apk']),
(self.call.device._GetChangedAndStaleFiles(
'/fake/test/app.apk', '/fake/data/app/this.is.a.test.package.apk'),
([('/fake/test/app.apk', '/fake/data/app/this.is.a.test.package.apk')],
[])),
self.call.adb.Install('/fake/test/app.apk', reinstall=True)):
self.device.Install('/fake/test/app.apk', reinstall=True, retries=0)
def testInstall_identicalPriorInstall(self):
with self.assertCalls(
(mock.call.pylib.utils.apk_helper.GetPackageName('/fake/test/app.apk'),
'this.is.a.test.package'),
(self.call.device.GetApplicationPaths('this.is.a.test.package'),
['/fake/data/app/this.is.a.test.package.apk']),
(self.call.device._GetChangedAndStaleFiles(
'/fake/test/app.apk', '/fake/data/app/this.is.a.test.package.apk'),
([], []))):
self.device.Install('/fake/test/app.apk', retries=0)
def testInstall_fails(self):
with self.assertCalls(
(mock.call.pylib.utils.apk_helper.GetPackageName('/fake/test/app.apk'),
'this.is.a.test.package'),
(self.call.device.GetApplicationPaths('this.is.a.test.package'), []),
(self.call.adb.Install('/fake/test/app.apk', reinstall=False),
self.CommandError('Failure\r\n'))):
with self.assertRaises(device_errors.CommandFailedError):
self.device.Install('/fake/test/app.apk', retries=0)
class DeviceUtilsInstallSplitApkTest(DeviceUtilsTest):
def testInstallSplitApk_noPriorInstall(self):
with self.assertCalls(
(self.call.device._CheckSdkLevel(21)),
(mock.call.pylib.sdk.split_select.SelectSplits(
self.device, 'base.apk',
['split1.apk', 'split2.apk', 'split3.apk']),
['split2.apk']),
(mock.call.pylib.utils.apk_helper.GetPackageName('base.apk'),
'this.is.a.test.package'),
(self.call.device.GetApplicationPaths('this.is.a.test.package'), []),
(self.call.adb.InstallMultiple(
['base.apk', 'split2.apk'], partial=None, reinstall=False))):
self.device.InstallSplitApk('base.apk',
['split1.apk', 'split2.apk', 'split3.apk'], retries=0)
def testInstallSplitApk_partialInstall(self):
with self.assertCalls(
(self.call.device._CheckSdkLevel(21)),
(mock.call.pylib.sdk.split_select.SelectSplits(
self.device, 'base.apk',
['split1.apk', 'split2.apk', 'split3.apk']),
['split2.apk']),
(mock.call.pylib.utils.apk_helper.GetPackageName('base.apk'),
'test.package'),
(self.call.device.GetApplicationPaths('test.package'),
['base-on-device.apk', 'split2-on-device.apk']),
(mock.call.pylib.utils.md5sum.CalculateDeviceMd5Sums(
['base-on-device.apk', 'split2-on-device.apk'], self.device),
{'base-on-device.apk': 'AAA', 'split2-on-device.apk': 'BBB'}),
(mock.call.pylib.utils.md5sum.CalculateHostMd5Sums(
['base.apk', 'split2.apk']),
{'base.apk': 'AAA', 'split2.apk': 'CCC'}),
(self.call.adb.InstallMultiple(
['split2.apk'], partial='test.package', reinstall=True))):
self.device.InstallSplitApk('base.apk',
['split1.apk', 'split2.apk', 'split3.apk'], reinstall=True, retries=0)
class DeviceUtilsRunShellCommandTest(DeviceUtilsTest):
def setUp(self):
super(DeviceUtilsRunShellCommandTest, self).setUp()
self.device.NeedsSU = mock.Mock(return_value=False)
def testRunShellCommand_commandAsList(self):
with self.assertCall(self.call.adb.Shell('pm list packages'), ''):
self.device.RunShellCommand(['pm', 'list', 'packages'])
def testRunShellCommand_commandAsListQuoted(self):
with self.assertCall(self.call.adb.Shell("echo 'hello world' '$10'"), ''):
self.device.RunShellCommand(['echo', 'hello world', '$10'])
def testRunShellCommand_commandAsString(self):
with self.assertCall(self.call.adb.Shell('echo "$VAR"'), ''):
self.device.RunShellCommand('echo "$VAR"')
def testNewRunShellImpl_withEnv(self):
with self.assertCall(
self.call.adb.Shell('VAR=some_string echo "$VAR"'), ''):
self.device.RunShellCommand('echo "$VAR"', env={'VAR': 'some_string'})
def testNewRunShellImpl_withEnvQuoted(self):
with self.assertCall(
self.call.adb.Shell('PATH="$PATH:/other/path" run_this'), ''):
self.device.RunShellCommand('run_this', env={'PATH': '$PATH:/other/path'})
def testNewRunShellImpl_withEnv_failure(self):
with self.assertRaises(KeyError):
self.device.RunShellCommand('some_cmd', env={'INVALID NAME': 'value'})
def testNewRunShellImpl_withCwd(self):
with self.assertCall(self.call.adb.Shell('cd /some/test/path && ls'), ''):
self.device.RunShellCommand('ls', cwd='/some/test/path')
def testNewRunShellImpl_withCwdQuoted(self):
with self.assertCall(
self.call.adb.Shell("cd '/some test/path with/spaces' && ls"), ''):
self.device.RunShellCommand('ls', cwd='/some test/path with/spaces')
def testRunShellCommand_withHugeCmd(self):
payload = 'hi! ' * 1024
expected_cmd = "echo '%s'" % payload
with self.assertCalls(
(mock.call.pylib.utils.device_temp_file.DeviceTempFile(
self.adb, suffix='.sh'), MockTempFile('/sdcard/temp-123.sh')),
self.call.device._WriteFileWithPush('/sdcard/temp-123.sh', expected_cmd),
(self.call.adb.Shell('sh /sdcard/temp-123.sh'), payload + '\n')):
self.assertEquals([payload],
self.device.RunShellCommand(['echo', payload]))
def testRunShellCommand_withHugeCmdAmdSU(self):
payload = 'hi! ' * 1024
expected_cmd = """su -c sh -c 'echo '"'"'%s'"'"''""" % payload
with self.assertCalls(
(self.call.device.NeedsSU(), True),
(mock.call.pylib.utils.device_temp_file.DeviceTempFile(
self.adb, suffix='.sh'), MockTempFile('/sdcard/temp-123.sh')),
self.call.device._WriteFileWithPush('/sdcard/temp-123.sh', expected_cmd),
(self.call.adb.Shell('sh /sdcard/temp-123.sh'), payload + '\n')):
self.assertEquals(
[payload],
self.device.RunShellCommand(['echo', payload], as_root=True))
def testRunShellCommand_withSu(self):
with self.assertCalls(
(self.call.device.NeedsSU(), True),
(self.call.adb.Shell("su -c sh -c 'setprop service.adb.root 0'"), '')):
self.device.RunShellCommand('setprop service.adb.root 0', as_root=True)
def testRunShellCommand_manyLines(self):
cmd = 'ls /some/path'
with self.assertCall(self.call.adb.Shell(cmd), 'file1\nfile2\nfile3\n'):
self.assertEquals(['file1', 'file2', 'file3'],
self.device.RunShellCommand(cmd))
def testRunShellCommand_singleLine_success(self):
cmd = 'echo $VALUE'
with self.assertCall(self.call.adb.Shell(cmd), 'some value\n'):
self.assertEquals('some value',
self.device.RunShellCommand(cmd, single_line=True))
def testRunShellCommand_singleLine_successEmptyLine(self):
cmd = 'echo $VALUE'
with self.assertCall(self.call.adb.Shell(cmd), '\n'):
self.assertEquals('',
self.device.RunShellCommand(cmd, single_line=True))
def testRunShellCommand_singleLine_successWithoutEndLine(self):
cmd = 'echo -n $VALUE'
with self.assertCall(self.call.adb.Shell(cmd), 'some value'):
self.assertEquals('some value',
self.device.RunShellCommand(cmd, single_line=True))
def testRunShellCommand_singleLine_successNoOutput(self):
cmd = 'echo -n $VALUE'
with self.assertCall(self.call.adb.Shell(cmd), ''):
self.assertEquals('',
self.device.RunShellCommand(cmd, single_line=True))
def testRunShellCommand_singleLine_failTooManyLines(self):
cmd = 'echo $VALUE'
with self.assertCall(self.call.adb.Shell(cmd),
'some value\nanother value\n'):
with self.assertRaises(device_errors.CommandFailedError):
self.device.RunShellCommand(cmd, single_line=True)
def testRunShellCommand_checkReturn_success(self):
cmd = 'echo $ANDROID_DATA'
output = '/data\n'
with self.assertCall(self.call.adb.Shell(cmd), output):
self.assertEquals([output.rstrip()],
self.device.RunShellCommand(cmd, check_return=True))
def testRunShellCommand_checkReturn_failure(self):
cmd = 'ls /root'
output = 'opendir failed, Permission denied\n'
with self.assertCall(self.call.adb.Shell(cmd), self.ShellError(output)):
with self.assertRaises(device_errors.AdbCommandFailedError):
self.device.RunShellCommand(cmd, check_return=True)
def testRunShellCommand_checkReturn_disabled(self):
cmd = 'ls /root'
output = 'opendir failed, Permission denied\n'
with self.assertCall(self.call.adb.Shell(cmd), self.ShellError(output)):
self.assertEquals([output.rstrip()],
self.device.RunShellCommand(cmd, check_return=False))
def testRunShellCommand_largeOutput_enabled(self):
cmd = 'echo $VALUE'
temp_file = MockTempFile('/sdcard/temp-123')
cmd_redirect = '%s > %s' % (cmd, temp_file.name)
with self.assertCalls(
(mock.call.pylib.utils.device_temp_file.DeviceTempFile(self.adb),
temp_file),
(self.call.adb.Shell(cmd_redirect)),
(self.call.device.ReadFile(temp_file.name, force_pull=True),
'something')):
self.assertEquals(
['something'],
self.device.RunShellCommand(
cmd, large_output=True, check_return=True))
def testRunShellCommand_largeOutput_disabledNoTrigger(self):
cmd = 'something'
with self.assertCall(self.call.adb.Shell(cmd), self.ShellError('')):
with self.assertRaises(device_errors.AdbCommandFailedError):
self.device.RunShellCommand(cmd, check_return=True)
def testRunShellCommand_largeOutput_disabledTrigger(self):
cmd = 'echo $VALUE'
temp_file = MockTempFile('/sdcard/temp-123')
cmd_redirect = '%s > %s' % (cmd, temp_file.name)
with self.assertCalls(
(self.call.adb.Shell(cmd), self.ShellError('', None)),
(mock.call.pylib.utils.device_temp_file.DeviceTempFile(self.adb),
temp_file),
(self.call.adb.Shell(cmd_redirect)),
(self.call.device.ReadFile(mock.ANY, force_pull=True),
'something')):
self.assertEquals(['something'],
self.device.RunShellCommand(cmd, check_return=True))
class DeviceUtilsRunPipedShellCommandTest(DeviceUtilsTest):
def testRunPipedShellCommand_success(self):
with self.assertCall(
self.call.device.RunShellCommand(
'ps | grep foo; echo "PIPESTATUS: ${PIPESTATUS[@]}"',
check_return=True),
['This line contains foo', 'PIPESTATUS: 0 0']):
self.assertEquals(['This line contains foo'],
self.device._RunPipedShellCommand('ps | grep foo'))
def testRunPipedShellCommand_firstCommandFails(self):
with self.assertCall(
self.call.device.RunShellCommand(
'ps | grep foo; echo "PIPESTATUS: ${PIPESTATUS[@]}"',
check_return=True),
['PIPESTATUS: 1 0']):
with self.assertRaises(device_errors.AdbShellCommandFailedError) as ec:
self.device._RunPipedShellCommand('ps | grep foo')
self.assertEquals([1, 0], ec.exception.status)
def testRunPipedShellCommand_secondCommandFails(self):
with self.assertCall(
self.call.device.RunShellCommand(
'ps | grep foo; echo "PIPESTATUS: ${PIPESTATUS[@]}"',
check_return=True),
['PIPESTATUS: 0 1']):
with self.assertRaises(device_errors.AdbShellCommandFailedError) as ec:
self.device._RunPipedShellCommand('ps | grep foo')
self.assertEquals([0, 1], ec.exception.status)
def testRunPipedShellCommand_outputCutOff(self):
with self.assertCall(
self.call.device.RunShellCommand(
'ps | grep foo; echo "PIPESTATUS: ${PIPESTATUS[@]}"',
check_return=True),
['foo.bar'] * 256 + ['foo.ba']):
with self.assertRaises(device_errors.AdbShellCommandFailedError) as ec:
self.device._RunPipedShellCommand('ps | grep foo')
self.assertIs(None, ec.exception.status)
class DeviceUtilsGetDevicePieWrapper(DeviceUtilsTest):
def testGetDevicePieWrapper_jb(self):
with self.assertCall(
self.call.device.build_version_sdk(),
constants.ANDROID_SDK_VERSION_CODES.JELLY_BEAN):
self.assertEqual('', self.device.GetDevicePieWrapper())
def testGetDevicePieWrapper_ics(self):
with self.assertCalls(
(self.call.device.build_version_sdk(),
constants.ANDROID_SDK_VERSION_CODES.ICE_CREAM_SANDWICH),
(mock.call.pylib.constants.GetOutDirectory(), '/foo/bar'),
(mock.call.os.path.exists(mock.ANY), True),
(self.call.adb.Push(mock.ANY, mock.ANY), '')):
self.assertNotEqual('', self.device.GetDevicePieWrapper())
@mock.patch('time.sleep', mock.Mock())
class DeviceUtilsKillAllTest(DeviceUtilsTest):
def testKillAll_noMatchingProcessesFailure(self):
with self.assertCall(self.call.device.GetPids('test_process'), {}):
with self.assertRaises(device_errors.CommandFailedError):
self.device.KillAll('test_process')
def testKillAll_noMatchingProcessesQuiet(self):
with self.assertCall(self.call.device.GetPids('test_process'), {}):
self.assertEqual(0, self.device.KillAll('test_process', quiet=True))
def testKillAll_nonblocking(self):
with self.assertCalls(
(self.call.device.GetPids('some.process'), {'some.process': '1234'}),
(self.call.adb.Shell('kill -9 1234'), '')):
self.assertEquals(
1, self.device.KillAll('some.process', blocking=False))
def testKillAll_blocking(self):
with self.assertCalls(
(self.call.device.GetPids('some.process'), {'some.process': '1234'}),
(self.call.adb.Shell('kill -9 1234'), ''),
(self.call.device.GetPids('some.process'), {'some.process': '1234'}),
(self.call.device.GetPids('some.process'), [])):
self.assertEquals(
1, self.device.KillAll('some.process', blocking=True))
def testKillAll_root(self):
with self.assertCalls(
(self.call.device.GetPids('some.process'), {'some.process': '1234'}),
(self.call.device.NeedsSU(), True),
(self.call.adb.Shell("su -c sh -c 'kill -9 1234'"), '')):
self.assertEquals(
1, self.device.KillAll('some.process', as_root=True))
def testKillAll_sigterm(self):
with self.assertCalls(
(self.call.device.GetPids('some.process'), {'some.process': '1234'}),
(self.call.adb.Shell('kill -15 1234'), '')):
self.assertEquals(
1, self.device.KillAll('some.process', signum=device_signal.SIGTERM))
class DeviceUtilsStartActivityTest(DeviceUtilsTest):
def testStartActivity_actionOnly(self):
test_intent = intent.Intent(action='android.intent.action.VIEW')
with self.assertCall(
self.call.adb.Shell('am start '
'-a android.intent.action.VIEW'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent)
def testStartActivity_success(self):
test_intent = intent.Intent(action='android.intent.action.VIEW',
package='this.is.a.test.package',
activity='.Main')
with self.assertCall(
self.call.adb.Shell('am start '
'-a android.intent.action.VIEW '
'-n this.is.a.test.package/.Main'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent)
def testStartActivity_failure(self):
test_intent = intent.Intent(action='android.intent.action.VIEW',
package='this.is.a.test.package',
activity='.Main')
with self.assertCall(
self.call.adb.Shell('am start '
'-a android.intent.action.VIEW '
'-n this.is.a.test.package/.Main'),
'Error: Failed to start test activity'):
with self.assertRaises(device_errors.CommandFailedError):
self.device.StartActivity(test_intent)
def testStartActivity_blocking(self):
test_intent = intent.Intent(action='android.intent.action.VIEW',
package='this.is.a.test.package',
activity='.Main')
with self.assertCall(
self.call.adb.Shell('am start '
'-W '
'-a android.intent.action.VIEW '
'-n this.is.a.test.package/.Main'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent, blocking=True)
def testStartActivity_withCategory(self):
test_intent = intent.Intent(action='android.intent.action.VIEW',
package='this.is.a.test.package',
activity='.Main',
category='android.intent.category.HOME')
with self.assertCall(
self.call.adb.Shell('am start '
'-a android.intent.action.VIEW '
'-c android.intent.category.HOME '
'-n this.is.a.test.package/.Main'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent)
def testStartActivity_withMultipleCategories(self):
test_intent = intent.Intent(action='android.intent.action.VIEW',
package='this.is.a.test.package',
activity='.Main',
category=['android.intent.category.HOME',
'android.intent.category.BROWSABLE'])
with self.assertCall(
self.call.adb.Shell('am start '
'-a android.intent.action.VIEW '
'-c android.intent.category.HOME '
'-c android.intent.category.BROWSABLE '
'-n this.is.a.test.package/.Main'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent)
def testStartActivity_withData(self):
test_intent = intent.Intent(action='android.intent.action.VIEW',
package='this.is.a.test.package',
activity='.Main',
data='http://www.google.com/')
with self.assertCall(
self.call.adb.Shell('am start '
'-a android.intent.action.VIEW '
'-d http://www.google.com/ '
'-n this.is.a.test.package/.Main'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent)
def testStartActivity_withStringExtra(self):
test_intent = intent.Intent(action='android.intent.action.VIEW',
package='this.is.a.test.package',
activity='.Main',
extras={'foo': 'test'})
with self.assertCall(
self.call.adb.Shell('am start '
'-a android.intent.action.VIEW '
'-n this.is.a.test.package/.Main '
'--es foo test'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent)
def testStartActivity_withBoolExtra(self):
test_intent = intent.Intent(action='android.intent.action.VIEW',
package='this.is.a.test.package',
activity='.Main',
extras={'foo': True})
with self.assertCall(
self.call.adb.Shell('am start '
'-a android.intent.action.VIEW '
'-n this.is.a.test.package/.Main '
'--ez foo True'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent)
def testStartActivity_withIntExtra(self):
test_intent = intent.Intent(action='android.intent.action.VIEW',
package='this.is.a.test.package',
activity='.Main',
extras={'foo': 123})
with self.assertCall(
self.call.adb.Shell('am start '
'-a android.intent.action.VIEW '
'-n this.is.a.test.package/.Main '
'--ei foo 123'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent)
def testStartActivity_withTraceFile(self):
test_intent = intent.Intent(action='android.intent.action.VIEW',
package='this.is.a.test.package',
activity='.Main')
with self.assertCall(
self.call.adb.Shell('am start '
'--start-profiler test_trace_file.out '
'-a android.intent.action.VIEW '
'-n this.is.a.test.package/.Main'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent,
trace_file_name='test_trace_file.out')
def testStartActivity_withForceStop(self):
test_intent = intent.Intent(action='android.intent.action.VIEW',
package='this.is.a.test.package',
activity='.Main')
with self.assertCall(
self.call.adb.Shell('am start '
'-S '
'-a android.intent.action.VIEW '
'-n this.is.a.test.package/.Main'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent, force_stop=True)
def testStartActivity_withFlags(self):
test_intent = intent.Intent(action='android.intent.action.VIEW',
package='this.is.a.test.package',
activity='.Main',
flags='0x10000000')
with self.assertCall(
self.call.adb.Shell('am start '
'-a android.intent.action.VIEW '
'-n this.is.a.test.package/.Main '
'-f 0x10000000'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent)
class DeviceUtilsStartInstrumentationTest(DeviceUtilsTest):
def testStartInstrumentation_nothing(self):
with self.assertCalls(
self.call.device.RunShellCommand(
['am', 'instrument', 'test.package/.TestInstrumentation'],
check_return=True, large_output=True)):
self.device.StartInstrumentation(
'test.package/.TestInstrumentation',
finish=False, raw=False, extras=None)
def testStartInstrumentation_finish(self):
with self.assertCalls(
(self.call.device.RunShellCommand(
['am', 'instrument', '-w', 'test.package/.TestInstrumentation'],
check_return=True, large_output=True),
['OK (1 test)'])):
output = self.device.StartInstrumentation(
'test.package/.TestInstrumentation',
finish=True, raw=False, extras=None)
self.assertEquals(['OK (1 test)'], output)
def testStartInstrumentation_raw(self):
with self.assertCalls(
self.call.device.RunShellCommand(
['am', 'instrument', '-r', 'test.package/.TestInstrumentation'],
check_return=True, large_output=True)):
self.device.StartInstrumentation(
'test.package/.TestInstrumentation',
finish=False, raw=True, extras=None)
def testStartInstrumentation_extras(self):
with self.assertCalls(
self.call.device.RunShellCommand(
['am', 'instrument', '-e', 'foo', 'Foo', '-e', 'bar', 'Bar',
'test.package/.TestInstrumentation'],
check_return=True, large_output=True)):
self.device.StartInstrumentation(
'test.package/.TestInstrumentation',
finish=False, raw=False, extras={'foo': 'Foo', 'bar': 'Bar'})
class DeviceUtilsBroadcastIntentTest(DeviceUtilsTest):
def testBroadcastIntent_noExtras(self):
test_intent = intent.Intent(action='test.package.with.an.INTENT')
with self.assertCall(
self.call.adb.Shell('am broadcast -a test.package.with.an.INTENT'),
'Broadcasting: Intent { act=test.package.with.an.INTENT } '):
self.device.BroadcastIntent(test_intent)
def testBroadcastIntent_withExtra(self):
test_intent = intent.Intent(action='test.package.with.an.INTENT',
extras={'foo': 'bar value'})
with self.assertCall(
self.call.adb.Shell(
"am broadcast -a test.package.with.an.INTENT --es foo 'bar value'"),
'Broadcasting: Intent { act=test.package.with.an.INTENT } '):
self.device.BroadcastIntent(test_intent)
def testBroadcastIntent_withExtra_noValue(self):
test_intent = intent.Intent(action='test.package.with.an.INTENT',
extras={'foo': None})
with self.assertCall(
self.call.adb.Shell(
'am broadcast -a test.package.with.an.INTENT --esn foo'),
'Broadcasting: Intent { act=test.package.with.an.INTENT } '):
self.device.BroadcastIntent(test_intent)
class DeviceUtilsGoHomeTest(DeviceUtilsTest):
def testGoHome_popupsExist(self):
with self.assertCalls(
(self.call.device.RunShellCommand(
['dumpsys', 'window', 'windows'], check_return=True,
large_output=True), []),
(self.call.device.RunShellCommand(
['am', 'start', '-W', '-a', 'android.intent.action.MAIN',
'-c', 'android.intent.category.HOME'], check_return=True),
'Starting: Intent { act=android.intent.action.MAIN }\r\n'''),
(self.call.device.RunShellCommand(
['dumpsys', 'window', 'windows'], check_return=True,
large_output=True), []),
(self.call.device.RunShellCommand(
['input', 'keyevent', '66'], check_return=True)),
(self.call.device.RunShellCommand(
['input', 'keyevent', '4'], check_return=True)),
(self.call.device.RunShellCommand(
['dumpsys', 'window', 'windows'], check_return=True,
large_output=True),
['mCurrentFocus Launcher'])):
self.device.GoHome()
def testGoHome_willRetry(self):
with self.assertCalls(
(self.call.device.RunShellCommand(
['dumpsys', 'window', 'windows'], check_return=True,
large_output=True), []),
(self.call.device.RunShellCommand(
['am', 'start', '-W', '-a', 'android.intent.action.MAIN',
'-c', 'android.intent.category.HOME'], check_return=True),
'Starting: Intent { act=android.intent.action.MAIN }\r\n'''),
(self.call.device.RunShellCommand(
['dumpsys', 'window', 'windows'], check_return=True,
large_output=True), []),
(self.call.device.RunShellCommand(
['input', 'keyevent', '66'], check_return=True,)),
(self.call.device.RunShellCommand(
['input', 'keyevent', '4'], check_return=True)),
(self.call.device.RunShellCommand(
['dumpsys', 'window', 'windows'], check_return=True,
large_output=True), []),
(self.call.device.RunShellCommand(
['input', 'keyevent', '66'], check_return=True)),
(self.call.device.RunShellCommand(
['input', 'keyevent', '4'], check_return=True)),
(self.call.device.RunShellCommand(
['dumpsys', 'window', 'windows'], check_return=True,
large_output=True),
self.TimeoutError())):
with self.assertRaises(device_errors.CommandTimeoutError):
self.device.GoHome()
def testGoHome_alreadyFocused(self):
with self.assertCall(
self.call.device.RunShellCommand(
['dumpsys', 'window', 'windows'], check_return=True,
large_output=True),
['mCurrentFocus Launcher']):
self.device.GoHome()
def testGoHome_alreadyFocusedAlternateCase(self):
with self.assertCall(
self.call.device.RunShellCommand(
['dumpsys', 'window', 'windows'], check_return=True,
large_output=True),
[' mCurrentFocus .launcher/.']):
self.device.GoHome()
def testGoHome_obtainsFocusAfterGoingHome(self):
with self.assertCalls(
(self.call.device.RunShellCommand(
['dumpsys', 'window', 'windows'], check_return=True,
large_output=True), []),
(self.call.device.RunShellCommand(
['am', 'start', '-W', '-a', 'android.intent.action.MAIN',
'-c', 'android.intent.category.HOME'], check_return=True),
'Starting: Intent { act=android.intent.action.MAIN }\r\n'''),
(self.call.device.RunShellCommand(
['dumpsys', 'window', 'windows'], check_return=True,
large_output=True),
['mCurrentFocus Launcher'])):
self.device.GoHome()
class DeviceUtilsForceStopTest(DeviceUtilsTest):
def testForceStop(self):
with self.assertCall(
self.call.adb.Shell('am force-stop this.is.a.test.package'),
''):
self.device.ForceStop('this.is.a.test.package')
class DeviceUtilsClearApplicationStateTest(DeviceUtilsTest):
def testClearApplicationState_packageDoesntExist(self):
with self.assertCalls(
(self.call.adb.Shell('getprop ro.build.version.sdk'), '17\n'),
(self.call.device.GetApplicationPaths('this.package.does.not.exist'),
[])):
self.device.ClearApplicationState('this.package.does.not.exist')
def testClearApplicationState_packageDoesntExistOnAndroidJBMR2OrAbove(self):
with self.assertCalls(
(self.call.adb.Shell('getprop ro.build.version.sdk'), '18\n'),
(self.call.adb.Shell('pm clear this.package.does.not.exist'),
'Failed\r\n')):
self.device.ClearApplicationState('this.package.does.not.exist')
def testClearApplicationState_packageExists(self):
with self.assertCalls(
(self.call.adb.Shell('getprop ro.build.version.sdk'), '17\n'),
(self.call.device.GetApplicationPaths('this.package.exists'),
['/data/app/this.package.exists.apk']),
(self.call.adb.Shell('pm clear this.package.exists'),
'Success\r\n')):
self.device.ClearApplicationState('this.package.exists')
def testClearApplicationState_packageExistsOnAndroidJBMR2OrAbove(self):
with self.assertCalls(
(self.call.adb.Shell('getprop ro.build.version.sdk'), '18\n'),
(self.call.adb.Shell('pm clear this.package.exists'),
'Success\r\n')):
self.device.ClearApplicationState('this.package.exists')
class DeviceUtilsSendKeyEventTest(DeviceUtilsTest):
def testSendKeyEvent(self):
with self.assertCall(self.call.adb.Shell('input keyevent 66'), ''):
self.device.SendKeyEvent(66)
class DeviceUtilsPushChangedFilesIndividuallyTest(DeviceUtilsTest):
def testPushChangedFilesIndividually_empty(self):
test_files = []
with self.assertCalls():
self.device._PushChangedFilesIndividually(test_files)
def testPushChangedFilesIndividually_single(self):
test_files = [('/test/host/path', '/test/device/path')]
with self.assertCalls(self.call.adb.Push(*test_files[0])):
self.device._PushChangedFilesIndividually(test_files)
def testPushChangedFilesIndividually_multiple(self):
test_files = [
('/test/host/path/file1', '/test/device/path/file1'),
('/test/host/path/file2', '/test/device/path/file2')]
with self.assertCalls(
self.call.adb.Push(*test_files[0]),
self.call.adb.Push(*test_files[1])):
self.device._PushChangedFilesIndividually(test_files)
class DeviceUtilsPushChangedFilesZippedTest(DeviceUtilsTest):
def testPushChangedFilesZipped_empty(self):
test_files = []
with self.assertCalls():
self.device._PushChangedFilesZipped(test_files)
def _testPushChangedFilesZipped_spec(self, test_files):
mock_zip_temp = mock.mock_open()
mock_zip_temp.return_value.name = '/test/temp/file/tmp.zip'
with self.assertCalls(
(mock.call.tempfile.NamedTemporaryFile(suffix='.zip'), mock_zip_temp),
(mock.call.multiprocessing.Process(
target=device_utils.DeviceUtils._CreateDeviceZip,
args=('/test/temp/file/tmp.zip', test_files)), mock.Mock()),
(self.call.device.GetExternalStoragePath(),
'/test/device/external_dir'),
self.call.adb.Push(
'/test/temp/file/tmp.zip', '/test/device/external_dir/tmp.zip'),
self.call.device.RunShellCommand(
['unzip', '/test/device/external_dir/tmp.zip'],
as_root=True,
env={'PATH': '/data/local/tmp/bin:$PATH'},
check_return=True),
(self.call.device.IsOnline(), True),
self.call.device.RunShellCommand(
['rm', '/test/device/external_dir/tmp.zip'], check_return=True)):
self.device._PushChangedFilesZipped(test_files)
def testPushChangedFilesZipped_single(self):
self._testPushChangedFilesZipped_spec(
[('/test/host/path/file1', '/test/device/path/file1')])
def testPushChangedFilesZipped_multiple(self):
self._testPushChangedFilesZipped_spec(
[('/test/host/path/file1', '/test/device/path/file1'),
('/test/host/path/file2', '/test/device/path/file2')])
class DeviceUtilsFileExistsTest(DeviceUtilsTest):
def testFileExists_usingTest_fileExists(self):
with self.assertCall(
self.call.device.RunShellCommand(
['test', '-e', '/path/file.exists'], check_return=True), ''):
self.assertTrue(self.device.FileExists('/path/file.exists'))
def testFileExists_usingTest_fileDoesntExist(self):
with self.assertCall(
self.call.device.RunShellCommand(
['test', '-e', '/does/not/exist'], check_return=True),
self.ShellError('', 1)):
self.assertFalse(self.device.FileExists('/does/not/exist'))
class DeviceUtilsPullFileTest(DeviceUtilsTest):
def testPullFile_existsOnDevice(self):
with mock.patch('os.path.exists', return_value=True):
with self.assertCall(
self.call.adb.Pull('/data/app/test.file.exists',
'/test/file/host/path')):
self.device.PullFile('/data/app/test.file.exists',
'/test/file/host/path')
def testPullFile_doesntExistOnDevice(self):
with mock.patch('os.path.exists', return_value=True):
with self.assertCall(
self.call.adb.Pull('/data/app/test.file.does.not.exist',
'/test/file/host/path'),
self.CommandError('remote object does not exist')):
with self.assertRaises(device_errors.CommandFailedError):
self.device.PullFile('/data/app/test.file.does.not.exist',
'/test/file/host/path')
class DeviceUtilsReadFileTest(DeviceUtilsTest):
def testReadFileWithPull_success(self):
tmp_host_dir = '/tmp/dir/on.host/'
tmp_host = MockTempFile('/tmp/dir/on.host/tmp_ReadFileWithPull')
tmp_host.file.read.return_value = 'some interesting contents'
with self.assertCalls(
(mock.call.tempfile.mkdtemp(), tmp_host_dir),
(self.call.adb.Pull('/path/to/device/file', mock.ANY)),
(mock.call.__builtin__.open(mock.ANY, 'r'), tmp_host),
(mock.call.os.path.exists(tmp_host_dir), True),
(mock.call.shutil.rmtree(tmp_host_dir), None)):
self.assertEquals('some interesting contents',
self.device._ReadFileWithPull('/path/to/device/file'))
tmp_host.file.read.assert_called_once_with()
def testReadFileWithPull_rejected(self):
tmp_host_dir = '/tmp/dir/on.host/'
with self.assertCalls(
(mock.call.tempfile.mkdtemp(), tmp_host_dir),
(self.call.adb.Pull('/path/to/device/file', mock.ANY),
self.CommandError()),
(mock.call.os.path.exists(tmp_host_dir), True),
(mock.call.shutil.rmtree(tmp_host_dir), None)):
with self.assertRaises(device_errors.CommandFailedError):
self.device._ReadFileWithPull('/path/to/device/file')
def testReadFile_exists(self):
with self.assertCalls(
(self.call.device.RunShellCommand(
['ls', '-l', '/read/this/test/file'],
as_root=False, check_return=True),
['-rw-rw---- root foo 256 1970-01-01 00:00 file']),
(self.call.device.RunShellCommand(
['cat', '/read/this/test/file'],
as_root=False, check_return=True),
['this is a test file'])):
self.assertEqual('this is a test file\n',
self.device.ReadFile('/read/this/test/file'))
def testReadFile_doesNotExist(self):
with self.assertCall(
self.call.device.RunShellCommand(
['ls', '-l', '/this/file/does.not.exist'],
as_root=False, check_return=True),
self.CommandError('File does not exist')):
with self.assertRaises(device_errors.CommandFailedError):
self.device.ReadFile('/this/file/does.not.exist')
def testReadFile_zeroSize(self):
with self.assertCalls(
(self.call.device.RunShellCommand(
['ls', '-l', '/this/file/has/zero/size'],
as_root=False, check_return=True),
['-r--r--r-- root foo 0 1970-01-01 00:00 zero_size_file']),
(self.call.device._ReadFileWithPull('/this/file/has/zero/size'),
'but it has contents\n')):
self.assertEqual('but it has contents\n',
self.device.ReadFile('/this/file/has/zero/size'))
def testReadFile_withSU(self):
with self.assertCalls(
(self.call.device.RunShellCommand(
['ls', '-l', '/this/file/can.be.read.with.su'],
as_root=True, check_return=True),
['-rw------- root root 256 1970-01-01 00:00 can.be.read.with.su']),
(self.call.device.RunShellCommand(
['cat', '/this/file/can.be.read.with.su'],
as_root=True, check_return=True),
['this is a test file', 'read with su'])):
self.assertEqual(
'this is a test file\nread with su\n',
self.device.ReadFile('/this/file/can.be.read.with.su',
as_root=True))
def testReadFile_withPull(self):
contents = 'a' * 123456
with self.assertCalls(
(self.call.device.RunShellCommand(
['ls', '-l', '/read/this/big/test/file'],
as_root=False, check_return=True),
['-rw-rw---- root foo 123456 1970-01-01 00:00 file']),
(self.call.device._ReadFileWithPull('/read/this/big/test/file'),
contents)):
self.assertEqual(
contents, self.device.ReadFile('/read/this/big/test/file'))
def testReadFile_withPullAndSU(self):
contents = 'b' * 123456
with self.assertCalls(
(self.call.device.RunShellCommand(
['ls', '-l', '/this/big/file/can.be.read.with.su'],
as_root=True, check_return=True),
['-rw------- root root 123456 1970-01-01 00:00 can.be.read.with.su']),
(self.call.device.NeedsSU(), True),
(mock.call.pylib.utils.device_temp_file.DeviceTempFile(self.adb),
MockTempFile('/sdcard/tmp/on.device')),
self.call.device.RunShellCommand(
['cp', '/this/big/file/can.be.read.with.su',
'/sdcard/tmp/on.device'],
as_root=True, check_return=True),
(self.call.device._ReadFileWithPull('/sdcard/tmp/on.device'),
contents)):
self.assertEqual(
contents,
self.device.ReadFile('/this/big/file/can.be.read.with.su',
as_root=True))
def testReadFile_forcePull(self):
contents = 'a' * 123456
with self.assertCall(
self.call.device._ReadFileWithPull('/read/this/big/test/file'),
contents):
self.assertEqual(
contents,
self.device.ReadFile('/read/this/big/test/file', force_pull=True))
class DeviceUtilsWriteFileTest(DeviceUtilsTest):
def testWriteFileWithPush_success(self):
tmp_host = MockTempFile('/tmp/file/on.host')
contents = 'some interesting contents'
with self.assertCalls(
(mock.call.tempfile.NamedTemporaryFile(), tmp_host),
self.call.adb.Push('/tmp/file/on.host', '/path/to/device/file')):
self.device._WriteFileWithPush('/path/to/device/file', contents)
tmp_host.file.write.assert_called_once_with(contents)
def testWriteFileWithPush_rejected(self):
tmp_host = MockTempFile('/tmp/file/on.host')
contents = 'some interesting contents'
with self.assertCalls(
(mock.call.tempfile.NamedTemporaryFile(), tmp_host),
(self.call.adb.Push('/tmp/file/on.host', '/path/to/device/file'),
self.CommandError())):
with self.assertRaises(device_errors.CommandFailedError):
self.device._WriteFileWithPush('/path/to/device/file', contents)
def testWriteFile_withPush(self):
contents = 'some large contents ' * 26 # 20 * 26 = 520 chars
with self.assertCalls(
self.call.device._WriteFileWithPush('/path/to/device/file', contents)):
self.device.WriteFile('/path/to/device/file', contents)
def testWriteFile_withPushForced(self):
contents = 'tiny contents'
with self.assertCalls(
self.call.device._WriteFileWithPush('/path/to/device/file', contents)):
self.device.WriteFile('/path/to/device/file', contents, force_push=True)
def testWriteFile_withPushAndSU(self):
contents = 'some large contents ' * 26 # 20 * 26 = 520 chars
with self.assertCalls(
(self.call.device.NeedsSU(), True),
(mock.call.pylib.utils.device_temp_file.DeviceTempFile(self.adb),
MockTempFile('/sdcard/tmp/on.device')),
self.call.device._WriteFileWithPush('/sdcard/tmp/on.device', contents),
self.call.device.RunShellCommand(
['cp', '/sdcard/tmp/on.device', '/path/to/device/file'],
as_root=True, check_return=True)):
self.device.WriteFile('/path/to/device/file', contents, as_root=True)
def testWriteFile_withEcho(self):
with self.assertCall(self.call.adb.Shell(
"echo -n the.contents > /test/file/to.write"), ''):
self.device.WriteFile('/test/file/to.write', 'the.contents')
def testWriteFile_withEchoAndQuotes(self):
with self.assertCall(self.call.adb.Shell(
"echo -n 'the contents' > '/test/file/to write'"), ''):
self.device.WriteFile('/test/file/to write', 'the contents')
def testWriteFile_withEchoAndSU(self):
with self.assertCalls(
(self.call.device.NeedsSU(), True),
(self.call.adb.Shell("su -c sh -c 'echo -n contents > /test/file'"),
'')):
self.device.WriteFile('/test/file', 'contents', as_root=True)
class DeviceUtilsLsTest(DeviceUtilsTest):
def testLs_directory(self):
result = [('.', adb_wrapper.DeviceStat(16889, 4096, 1417436123)),
('..', adb_wrapper.DeviceStat(16873, 4096, 12382237)),
('testfile.txt', adb_wrapper.DeviceStat(33206, 3, 1417436122))]
with self.assertCalls(
(self.call.adb.Ls('/data/local/tmp'), result)):
self.assertEquals(result,
self.device.Ls('/data/local/tmp'))
def testLs_nothing(self):
with self.assertCalls(
(self.call.adb.Ls('/data/local/tmp/testfile.txt'), [])):
self.assertEquals([],
self.device.Ls('/data/local/tmp/testfile.txt'))
class DeviceUtilsStatTest(DeviceUtilsTest):
def testStat_file(self):
result = [('.', adb_wrapper.DeviceStat(16889, 4096, 1417436123)),
('..', adb_wrapper.DeviceStat(16873, 4096, 12382237)),
('testfile.txt', adb_wrapper.DeviceStat(33206, 3, 1417436122))]
with self.assertCalls(
(self.call.adb.Ls('/data/local/tmp'), result)):
self.assertEquals(adb_wrapper.DeviceStat(33206, 3, 1417436122),
self.device.Stat('/data/local/tmp/testfile.txt'))
def testStat_directory(self):
result = [('.', adb_wrapper.DeviceStat(16873, 4096, 12382237)),
('..', adb_wrapper.DeviceStat(16873, 4096, 12382237)),
('tmp', adb_wrapper.DeviceStat(16889, 4096, 1417436123))]
with self.assertCalls(
(self.call.adb.Ls('/data/local'), result)):
self.assertEquals(adb_wrapper.DeviceStat(16889, 4096, 1417436123),
self.device.Stat('/data/local/tmp'))
def testStat_doesNotExist(self):
result = [('.', adb_wrapper.DeviceStat(16889, 4096, 1417436123)),
('..', adb_wrapper.DeviceStat(16873, 4096, 12382237)),
('testfile.txt', adb_wrapper.DeviceStat(33206, 3, 1417436122))]
with self.assertCalls(
(self.call.adb.Ls('/data/local/tmp'), result)):
with self.assertRaises(device_errors.CommandFailedError):
self.device.Stat('/data/local/tmp/does.not.exist.txt')
class DeviceUtilsSetJavaAssertsTest(DeviceUtilsTest):
def testSetJavaAsserts_enable(self):
with self.assertCalls(
(self.call.device.ReadFile(constants.DEVICE_LOCAL_PROPERTIES_PATH),
'some.example.prop=with an example value\n'
'some.other.prop=value_ok\n'),
self.call.device.WriteFile(
constants.DEVICE_LOCAL_PROPERTIES_PATH,
'some.example.prop=with an example value\n'
'some.other.prop=value_ok\n'
'dalvik.vm.enableassertions=all\n'),
(self.call.device.GetProp('dalvik.vm.enableassertions'), ''),
self.call.device.SetProp('dalvik.vm.enableassertions', 'all')):
self.assertTrue(self.device.SetJavaAsserts(True))
def testSetJavaAsserts_disable(self):
with self.assertCalls(
(self.call.device.ReadFile(constants.DEVICE_LOCAL_PROPERTIES_PATH),
'some.example.prop=with an example value\n'
'dalvik.vm.enableassertions=all\n'
'some.other.prop=value_ok\n'),
self.call.device.WriteFile(
constants.DEVICE_LOCAL_PROPERTIES_PATH,
'some.example.prop=with an example value\n'
'some.other.prop=value_ok\n'),
(self.call.device.GetProp('dalvik.vm.enableassertions'), 'all'),
self.call.device.SetProp('dalvik.vm.enableassertions', '')):
self.assertTrue(self.device.SetJavaAsserts(False))
def testSetJavaAsserts_alreadyEnabled(self):
with self.assertCalls(
(self.call.device.ReadFile(constants.DEVICE_LOCAL_PROPERTIES_PATH),
'some.example.prop=with an example value\n'
'dalvik.vm.enableassertions=all\n'
'some.other.prop=value_ok\n'),
(self.call.device.GetProp('dalvik.vm.enableassertions'), 'all')):
self.assertFalse(self.device.SetJavaAsserts(True))
class DeviceUtilsGetPropTest(DeviceUtilsTest):
def testGetProp_exists(self):
with self.assertCall(
self.call.adb.Shell('getprop test.property'), 'property_value\n'):
self.assertEqual('property_value',
self.device.GetProp('test.property'))
def testGetProp_doesNotExist(self):
with self.assertCall(
self.call.adb.Shell('getprop property.does.not.exist'), '\n'):
self.assertEqual('', self.device.GetProp('property.does.not.exist'))
def testGetProp_cachedRoProp(self):
with self.assertCall(
self.call.adb.Shell('getprop ro.build.type'), 'userdebug\n'):
self.assertEqual('userdebug',
self.device.GetProp('ro.build.type', cache=True))
self.assertEqual('userdebug',
self.device.GetProp('ro.build.type', cache=True))
def testGetProp_retryAndCache(self):
with self.assertCalls(
(self.call.adb.Shell('getprop ro.build.type'), self.ShellError()),
(self.call.adb.Shell('getprop ro.build.type'), self.ShellError()),
(self.call.adb.Shell('getprop ro.build.type'), 'userdebug\n')):
self.assertEqual('userdebug',
self.device.GetProp('ro.build.type',
cache=True, retries=3))
self.assertEqual('userdebug',
self.device.GetProp('ro.build.type',
cache=True, retries=3))
class DeviceUtilsSetPropTest(DeviceUtilsTest):
def testSetProp(self):
with self.assertCall(
self.call.adb.Shell("setprop test.property 'test value'"), ''):
self.device.SetProp('test.property', 'test value')
def testSetProp_check_succeeds(self):
with self.assertCalls(
(self.call.adb.Shell('setprop test.property new_value'), ''),
(self.call.adb.Shell('getprop test.property'), 'new_value')):
self.device.SetProp('test.property', 'new_value', check=True)
def testSetProp_check_fails(self):
with self.assertCalls(
(self.call.adb.Shell('setprop test.property new_value'), ''),
(self.call.adb.Shell('getprop test.property'), 'old_value')):
with self.assertRaises(device_errors.CommandFailedError):
self.device.SetProp('test.property', 'new_value', check=True)
class DeviceUtilsGetPidsTest(DeviceUtilsTest):
def testGetPids_noMatches(self):
with self.assertCall(
self.call.device._RunPipedShellCommand('ps | grep -F does.not.match'),
[]):
self.assertEqual({}, self.device.GetPids('does.not.match'))
def testGetPids_oneMatch(self):
with self.assertCall(
self.call.device._RunPipedShellCommand('ps | grep -F one.match'),
['user 1001 100 1024 1024 ffffffff 00000000 one.match']):
self.assertEqual({'one.match': '1001'}, self.device.GetPids('one.match'))
def testGetPids_mutlipleMatches(self):
with self.assertCall(
self.call.device._RunPipedShellCommand('ps | grep -F match'),
['user 1001 100 1024 1024 ffffffff 00000000 one.match',
'user 1002 100 1024 1024 ffffffff 00000000 two.match',
'user 1003 100 1024 1024 ffffffff 00000000 three.match']):
self.assertEqual(
{'one.match': '1001', 'two.match': '1002', 'three.match': '1003'},
self.device.GetPids('match'))
def testGetPids_exactMatch(self):
with self.assertCall(
self.call.device._RunPipedShellCommand('ps | grep -F exact.match'),
['user 1000 100 1024 1024 ffffffff 00000000 not.exact.match',
'user 1234 100 1024 1024 ffffffff 00000000 exact.match']):
self.assertEqual(
{'not.exact.match': '1000', 'exact.match': '1234'},
self.device.GetPids('exact.match'))
def testGetPids_quotable(self):
with self.assertCall(
self.call.device._RunPipedShellCommand("ps | grep -F 'my$process'"),
['user 1234 100 1024 1024 ffffffff 00000000 my$process']):
self.assertEqual(
{'my$process': '1234'}, self.device.GetPids('my$process'))
class DeviceUtilsTakeScreenshotTest(DeviceUtilsTest):
def testTakeScreenshot_fileNameProvided(self):
with self.assertCalls(
(mock.call.pylib.utils.device_temp_file.DeviceTempFile(
self.adb, suffix='.png'),
MockTempFile('/tmp/path/temp-123.png')),
(self.call.adb.Shell('/system/bin/screencap -p /tmp/path/temp-123.png'),
''),
self.call.device.PullFile('/tmp/path/temp-123.png',
'/test/host/screenshot.png')):
self.device.TakeScreenshot('/test/host/screenshot.png')
class DeviceUtilsGetMemoryUsageForPidTest(DeviceUtilsTest):
def setUp(self):
super(DeviceUtilsGetMemoryUsageForPidTest, self).setUp()
def testGetMemoryUsageForPid_validPid(self):
with self.assertCalls(
(self.call.device._RunPipedShellCommand(
'showmap 1234 | grep TOTAL', as_root=True),
['100 101 102 103 104 105 106 107 TOTAL']),
(self.call.device.ReadFile('/proc/1234/status', as_root=True),
'VmHWM: 1024 kB\n')):
self.assertEqual(
{
'Size': 100,
'Rss': 101,
'Pss': 102,
'Shared_Clean': 103,
'Shared_Dirty': 104,
'Private_Clean': 105,
'Private_Dirty': 106,
'VmHWM': 1024
},
self.device.GetMemoryUsageForPid(1234))
def testGetMemoryUsageForPid_noSmaps(self):
with self.assertCalls(
(self.call.device._RunPipedShellCommand(
'showmap 4321 | grep TOTAL', as_root=True),
['cannot open /proc/4321/smaps: No such file or directory']),
(self.call.device.ReadFile('/proc/4321/status', as_root=True),
'VmHWM: 1024 kb\n')):
self.assertEquals({'VmHWM': 1024}, self.device.GetMemoryUsageForPid(4321))
def testGetMemoryUsageForPid_noStatus(self):
with self.assertCalls(
(self.call.device._RunPipedShellCommand(
'showmap 4321 | grep TOTAL', as_root=True),
['100 101 102 103 104 105 106 107 TOTAL']),
(self.call.device.ReadFile('/proc/4321/status', as_root=True),
self.CommandError())):
self.assertEquals(
{
'Size': 100,
'Rss': 101,
'Pss': 102,
'Shared_Clean': 103,
'Shared_Dirty': 104,
'Private_Clean': 105,
'Private_Dirty': 106,
},
self.device.GetMemoryUsageForPid(4321))
class DeviceUtilsClientCache(DeviceUtilsTest):
def testClientCache_twoCaches(self):
self.device._cache['test'] = 0
client_cache_one = self.device.GetClientCache('ClientOne')
client_cache_one['test'] = 1
client_cache_two = self.device.GetClientCache('ClientTwo')
client_cache_two['test'] = 2
self.assertEqual(self.device._cache, {'test': 0})
self.assertEqual(client_cache_one, {'test': 1})
self.assertEqual(client_cache_two, {'test': 2})
self.device._ClearCache()
self.assertEqual(self.device._cache, {})
self.assertEqual(client_cache_one, {})
self.assertEqual(client_cache_two, {})
def testClientCache_multipleInstances(self):
client_cache_one = self.device.GetClientCache('ClientOne')
client_cache_one['test'] = 1
client_cache_two = self.device.GetClientCache('ClientOne')
self.assertEqual(client_cache_one, {'test': 1})
self.assertEqual(client_cache_two, {'test': 1})
self.device._ClearCache()
self.assertEqual(client_cache_one, {})
self.assertEqual(client_cache_two, {})
class DeviceUtilsParallelTest(mock_calls.TestCase):
def testParallel_default(self):
test_serials = ['0123456789abcdef', 'fedcba9876543210']
with self.assertCall(
mock.call.pylib.device.device_utils.DeviceUtils.HealthyDevices(),
[device_utils.DeviceUtils(s) for s in test_serials]):
parallel_devices = device_utils.DeviceUtils.parallel()
for serial, device in zip(test_serials, parallel_devices.pGet(None)):
self.assertTrue(isinstance(device, device_utils.DeviceUtils))
self.assertEquals(serial, device.adb.GetDeviceSerial())
def testParallel_noDevices(self):
with self.assertCall(
mock.call.pylib.device.device_utils.DeviceUtils.HealthyDevices(), []):
with self.assertRaises(device_errors.NoDevicesError):
device_utils.DeviceUtils.parallel()
class DeviceUtilsHealthyDevicesTest(mock_calls.TestCase):
def _createAdbWrapperMock(self, serial, is_ready=True):
adb = _AdbWrapperMock(serial)
adb.is_ready = is_ready
return adb
def testHealthyDevices_default(self):
test_serials = ['0123456789abcdef', 'fedcba9876543210']
with self.assertCalls(
(mock.call.pylib.device.device_blacklist.ReadBlacklist(), []),
(mock.call.pylib.device.adb_wrapper.AdbWrapper.Devices(),
[self._createAdbWrapperMock(s) for s in test_serials])):
devices = device_utils.DeviceUtils.HealthyDevices()
for serial, device in zip(test_serials, devices):
self.assertTrue(isinstance(device, device_utils.DeviceUtils))
self.assertEquals(serial, device.adb.GetDeviceSerial())
def testHealthyDevices_blacklisted(self):
test_serials = ['0123456789abcdef', 'fedcba9876543210']
with self.assertCalls(
(mock.call.pylib.device.device_blacklist.ReadBlacklist(),
['fedcba9876543210']),
(mock.call.pylib.device.adb_wrapper.AdbWrapper.Devices(),
[self._createAdbWrapperMock(s) for s in test_serials])):
devices = device_utils.DeviceUtils.HealthyDevices()
self.assertEquals(1, len(devices))
self.assertTrue(isinstance(devices[0], device_utils.DeviceUtils))
self.assertEquals('0123456789abcdef', devices[0].adb.GetDeviceSerial())
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main(verbosity=2)
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# Copyright (C) 2006 Matthew Good <matt@matt-good.net>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# Author: Matthew Good <matt@matt-good.net>
from datetime import datetime
import os
from pkg_resources import resource_filename
import re
from trac.core import *
from trac.config import ListOption, Option
from trac.env import ISystemInfoProvider
from trac.mimeview.api import IHTMLPreviewRenderer, Mimeview
from trac.prefs import IPreferencePanelProvider
from trac.util import get_pkginfo
from trac.util.datefmt import http_date, localtz
from trac.util.translation import _
from trac.web.api import IRequestHandler, HTTPNotFound
from trac.web.chrome import add_notice, add_stylesheet
from genshi import QName, Stream
from genshi.core import Attrs, START, END, TEXT
# Kludge to workaround the lack of absolute imports in Python version prior to
# 2.5
pygments = __import__('pygments', {}, {}, ['lexers', 'styles', 'formatters'])
get_all_lexers = pygments.lexers.get_all_lexers
get_lexer_by_name = pygments.lexers.get_lexer_by_name
HtmlFormatter = pygments.formatters.html.HtmlFormatter
get_all_styles = pygments.styles.get_all_styles
get_style_by_name = pygments.styles.get_style_by_name
__all__ = ['PygmentsRenderer']
class PygmentsRenderer(Component):
"""HTML renderer for syntax highlighting based on Pygments."""
implements(ISystemInfoProvider, IHTMLPreviewRenderer,
IPreferencePanelProvider, IRequestHandler)
default_style = Option('mimeviewer', 'pygments_default_style', 'trac',
"""The default style to use for Pygments syntax highlighting.""")
pygments_modes = ListOption('mimeviewer', 'pygments_modes',
'', doc=
"""List of additional MIME types known by Pygments.
For each, a tuple `mimetype:mode:quality` has to be
specified, where `mimetype` is the MIME type,
`mode` is the corresponding Pygments mode to be used
for the conversion and `quality` is the quality ratio
associated to this conversion. That can also be used
to override the default quality ratio used by the
Pygments render.""")
expand_tabs = True
returns_source = True
QUALITY_RATIO = 7
EXAMPLE = """<!DOCTYPE html>
<html lang="en">
<head>
<title>Hello, world!</title>
<script>
jQuery(document).ready(function($) {
$("h1").fadeIn("slow");
});
</script>
</head>
<body>
<h1>Hello, world!</h1>
</body>
</html>"""
def __init__(self):
self._types = None
# ISystemInfoProvider methods
def get_system_info(self):
version = get_pkginfo(pygments).get('version')
# if installed from source, fallback to the hardcoded version info
if not version and hasattr(pygments, '__version__'):
version = pygments.__version__
yield 'Pygments', version
# IHTMLPreviewRenderer methods
def get_quality_ratio(self, mimetype):
# Extend default MIME type to mode mappings with configured ones
if self._types is None:
self._init_types()
try:
return self._types[mimetype][1]
except KeyError:
return 0
def render(self, context, mimetype, content, filename=None, rev=None):
req = context.req
if self._types is None:
self._init_types()
add_stylesheet(req, '/pygments/%s.css' %
req.session.get('pygments_style', self.default_style))
try:
if len(content) > 0:
mimetype = mimetype.split(';', 1)[0]
language = self._types[mimetype][0]
return self._generate(language, content)
except (KeyError, ValueError):
raise Exception("No Pygments lexer found for mime-type '%s'."
% mimetype)
# IPreferencePanelProvider methods
def get_preference_panels(self, req):
yield ('pygments', _('Syntax Highlighting'))
def render_preference_panel(self, req, panel):
styles = list(get_all_styles())
if req.method == 'POST':
style = req.args.get('style')
if style and style in styles:
req.session['pygments_style'] = style
add_notice(req, _('Your preferences have been saved.'))
req.redirect(req.href.prefs(panel or None))
output = self._generate('html', self.EXAMPLE)
return 'prefs_pygments.html', {
'output': output,
'selection': req.session.get('pygments_style', self.default_style),
'styles': styles
}
# IRequestHandler methods
def match_request(self, req):
match = re.match(r'/pygments/(\w+)\.css', req.path_info)
if match:
req.args['style'] = match.group(1)
return True
def process_request(self, req):
style = req.args['style']
try:
style_cls = get_style_by_name(style)
except ValueError, e:
raise HTTPNotFound(e)
parts = style_cls.__module__.split('.')
filename = resource_filename('.'.join(parts[:-1]), parts[-1] + '.py')
mtime = datetime.fromtimestamp(os.path.getmtime(filename), localtz)
last_modified = http_date(mtime)
if last_modified == req.get_header('If-Modified-Since'):
req.send_response(304)
req.end_headers()
return
formatter = HtmlFormatter(style=style_cls)
content = u'\n\n'.join([
formatter.get_style_defs('div.code pre'),
formatter.get_style_defs('table.code td')
]).encode('utf-8')
req.send_response(200)
req.send_header('Content-Type', 'text/css; charset=utf-8')
req.send_header('Last-Modified', last_modified)
req.send_header('Content-Length', len(content))
req.write(content)
# Internal methods
def _init_types(self):
self._types = {}
for lexname, aliases, _, mimetypes in get_all_lexers():
name = aliases[0] if aliases else lexname
for mimetype in mimetypes:
self._types[mimetype] = (name, self.QUALITY_RATIO)
# Pygments currently doesn't know application/javascript
if 'application/javascript' not in self._types:
js_entry = self._types.get('text/javascript')
if js_entry:
self._types['application/javascript'] = js_entry
self._types.update(
Mimeview(self.env).configured_modes_mapping('pygments')
)
def _generate(self, language, content):
lexer = get_lexer_by_name(language, stripnl=False)
return GenshiHtmlFormatter().generate(lexer.get_tokens(content))
class GenshiHtmlFormatter(HtmlFormatter):
"""A Pygments formatter subclass that generates a Python stream instead
of writing markup as strings to an output file.
"""
def _chunk(self, tokens):
"""Groups tokens with the same CSS class in the token stream
and yields them one by one, along with the CSS class, with the
values chunked together."""
last_class = None
text = []
for ttype, value in tokens:
c = self._get_css_class(ttype)
if c == 'n':
c = ''
if c == last_class:
text.append(value)
continue
# If no value, leave the old <span> open.
if value:
yield last_class, u''.join(text)
text = [value]
last_class = c
if text:
yield last_class, u''.join(text)
def generate(self, tokens):
pos = (None, -1, -1)
span = QName('span')
class_ = QName('class')
def _generate():
for c, text in self._chunk(tokens):
if c:
attrs = Attrs([(class_, c)])
yield START, (span, attrs), pos
yield TEXT, text, pos
yield END, span, pos
else:
yield TEXT, text, pos
return Stream(_generate())
|
|
#! /usr/bin/python
import os, sys, re, ConfigParser
from ldap_helper import ldap_connect, get_ldap_attribute
from sh_helper import run_command, prompt_user
from cgi import escape
from subprocess import Popen, PIPE, STDOUT
import shlex
doc_root = {'hg.mozilla.org': '/repo_local/mozilla',
'hg.ecmascript.org': '/repo_local/ecma/mozilla'}
verbose_users = [ 'bkero@mozilla.com2', ]
def is_valid_user (mail):
mail = mail.strip()
## If the regex search below fails, comment out the conditional and the return. Then Uncomment the following line to atleat sanitize the input
mail = mail.replace("(",'').replace(")",'').replace("'",'').replace('"','').replace(';','').replace("\"",'')
#if not re.search("^[_a-z0-9-]+(\.[_a-z0-9-]+)*@[a-z0-9-]+(\.[a-z0-9-]+)*(\.[a-z]{2,4})$", mail):
# return 'Invalid Email Address'
account_status = get_ldap_attribute (mail, 'hgAccountEnabled', 'ldap://ldap.db.scl3.mozilla.com')
if account_status == 'TRUE':
return 1
elif account_status == 'FALSE':
return 2
else:
return 0
#
# Please be very careful when you relax/change the good_chars regular expression.
# Being lax with it can open us up to all kind of security problems.
#
def check_repo_name (repo_name):
good_chars = re.compile ('^(\w|-|/|\.\w)+\s*$')
if not good_chars.search (repo_name):
sys.stderr.write ('Only alpha-numeric characters, ".", and "-" are allowed in the repository names.\n')
sys.stderr.write ('Please try again with only those characters.\n')
sys.exit (1)
return True
def run_hg_clone (cname, user_repo_dir, repo_name, source_repo_path, verbose=False):
global doc_root
userdir = "%s/users/%s" % (doc_root[cname], user_repo_dir)
dest_dir = "%s/%s" % (userdir, repo_name)
dest_url = "/users/%s/%s" % (user_repo_dir, repo_name)
if os.path.exists (dest_dir):
print 'Sorry, you already have a repo called %s' % repo_name
print 'If you think this is wrong, please file an IT bug'
sys.exit (1)
else:
if (os.path.exists ('%s/%s' % (doc_root[cname], source_repo_path))) and (check_repo_name (source_repo_path)):
if not os.path.exists (userdir):
run_command ('mkdir %s' % userdir)
print 'Please wait. Cloning /%s to %s' % (source_repo_path, dest_url)
if(verbose):
run_command ('nohup /usr/bin/hg clone --debug --verbose --time --pull -U %s/%s %s' %
(doc_root[cname], source_repo_path, dest_dir),
verbose=True)
else:
run_command ('nohup /usr/bin/hg clone --pull -U %s/%s %s' %
(doc_root[cname], source_repo_path, dest_dir))
print "Clone complete."
else:
print 'Sorry, there is no source repo called %s.' % source_repo_path
print 'If you think this is wrong, please file an IT bug'
sys.exit (1)
def make_wsgi_dir (cname, user_repo_dir):
global doc_root
wsgi_dir = "/repo_local/mozilla/webroot_wsgi/users/%s" % user_repo_dir
# Create user's webroot_wsgi folder if it doesn't already exist
if not os.path.isdir(wsgi_dir):
os.mkdir(wsgi_dir)
print "Creating hgweb.config file"
# Create hgweb.config file if it doesn't already exist
if not os.path.isfile("%s/hgweb.config" % wsgi_dir):
#try:
hgconfig = open("%s/hgweb.config" % wsgi_dir, "w")
#except:
# print("Problem opening hgweb.config file, please file a bug against IT and pastebin this error.")
hgconfig.write("[web]\n")
hgconfig.write("baseurl = http://%s/users/%s\n" % (cname, user_repo_dir))
hgconfig.write("[paths]\n")
hgconfig.write("/ = %s/users/%s/*\n" % (doc_root[cname], user_repo_dir))
hgconfig.close()
# Create hgweb.wsgi file if it doesn't already exist
if not os.path.isfile("%s/hgweb.wsgi" % wsgi_dir):
try:
hgwsgi = open("%s/hgweb.wsgi" % wsgi_dir, "w")
except:
print("Problem opening hweb.wsgi file, please file an IT bug with this error.")
hgwsgi.write("#!/usr/bin/env python\n")
hgwsgi.write("config = '%s/hgweb.config'\n" % wsgi_dir)
hgwsgi.write("from mercurial import demandimport; demandimport.enable()\n")
hgwsgi.write("from mercurial.hgweb import hgweb\n")
hgwsgi.write("import os\n")
hgwsgi.write("os.environ['HGENCODING'] = 'UTF-8'\n")
hgwsgi.write("application = hgweb(config)\n")
hgwsgi.close()
def fix_user_repo_perms (cname, repo_name):
global doc_root
user = os.getenv ('USER')
user_repo_dir = user.replace ('@', '_')
print "Fixing permissions, don't interrupt."
try:
run_command ('chown %s:scm_level_1 %s/users/%s' % (user, doc_root[cname], user_repo_dir))
run_command ('chmod g+w %s/users/%s' % (doc_root[cname], user_repo_dir))
run_command ('chmod g+s %s/users/%s' % (doc_root[cname], user_repo_dir))
run_command ('chown -R %s:scm_level_1 %s/users/%s/%s' % (user, doc_root[cname], user_repo_dir, repo_name))
run_command ('chmod -R g+w %s/users/%s/%s' % (doc_root[cname], user_repo_dir, repo_name))
run_command ('find %s/users/%s/%s -depth -type d | xargs chmod g+s' % (doc_root[cname], user_repo_dir, repo_name))
except Exception, e:
print "Exception %s" % (e)
def make_repo_clone (cname, repo_name, quick_src, verbose=False, source_repo=''):
global doc_root
user = os.getenv ('USER')
user_repo_dir = user.replace ('@', '_')
dest_url = "/users/%s" % user_repo_dir
source_repo = ''
if quick_src:
if(user in verbose_users):
verbose=True
run_hg_clone (cname, user_repo_dir, repo_name, quick_src, True)
else:
run_hg_clone (cname, user_repo_dir, repo_name, quick_src)
fix_user_repo_perms (cname, repo_name)
sys.exit(0)
else:
#make_wsgi_dir(cname, user_repo_dir)
print "Making repo %s for %s." % (repo_name, user)
print "This repo will appear as %s/users/%s/%s." % (cname, user_repo_dir, repo_name)
print 'If you need a top level repo, please quit now and file a bug for IT to create one for you.'
selection = prompt_user ('Proceed?', ['yes', 'no'])
if (selection == 'yes'):
print 'You can clone an existing public repo or a users private repo.'
print 'You can also create an empty repository.'
selection = prompt_user ('Source repository:', ['Clone a public repository', 'Clone a private repository', 'Create an empty repository'])
if (selection == 'Clone a public repository'):
exec_command = "/usr/bin/find " + doc_root[cname] + " -maxdepth 3 -mindepth 2 -type d -name .hg"
args = shlex.split(exec_command)
#repo_list = run_command (exec_command)
p = Popen(args, stdout=PIPE, stdin=PIPE, stderr=STDOUT)
repo_list = p.communicate()[0].split("\n")
if repo_list:
print "We have the repo_list"
repo_list = map (lambda x: x.replace (doc_root[cname] + '/', ''), repo_list)
repo_list = map (lambda x: x.replace ('/.hg', ''), repo_list)
print 'List of available public repos'
source_repo = prompt_user ('Pick a source repo:', repo_list)
elif (selection == 'Clone a private repository'):
source_user = raw_input ('Please enter the e-mail address of the user owning the repo: ')
valid_user = is_valid_user(source_user)
if valid_user == True:
source_user = source_user.replace ('@', '_')
elif valid_user == False:
sys.stderr.write ('Unknown user.\n')
sys.exit (1)
elif valid_user == 'Invalid Email Address':
sys.stderr.write ('Invalid Email Address.\n')
sys.exit (1)
source_user_path = run_command ('find ' + doc_root[cname] + '/users/' + source_user + ' -maxdepth 1 -mindepth 1 -type d')
if not source_user_path:
print 'That user does not have any private repositories.'
print 'Check https://' + cname + '/users for a list of valid users.'
sys.exit (1)
else:
user_repo_list = run_command ('find ' + doc_root[cname] + '/users/' + source_user + ' -maxdepth 3 -mindepth 2 -type d -name .hg')
user_repo_list = map (lambda x: x.replace (doc_root[cname] + '/users/' + source_user, ''), user_repo_list)
user_repo_list = map (lambda x: x.replace ('/.hg', ''), user_repo_list)
user_repo_list = map (lambda x: x.strip ('/'), user_repo_list)
print 'Select the users repo you wish to clone.'
source_repo = prompt_user ('Pick a source repo:', user_repo_list)
source_repo = 'users/' + source_user + '/' + source_repo
elif (selection == 'Create an empty repository'):
source_repo=''
else:
# We should not get here
source_repo=''
if source_repo != '':
print 'About to clone /%s to /users/%s/%s' % (source_repo, user_repo_dir, repo_name)
response = prompt_user ('Proceed?', ['yes', 'no'])
if (response == 'yes'):
print 'Please do not interrupt this operation.'
run_hg_clone (cname, user_repo_dir, repo_name, source_repo)
else:
print "About to create an empty repository at /users/%s/%s" % (user_repo_dir, repo_name)
response = prompt_user ('Proceed?', ['yes', 'no'])
if (response == 'yes'):
if not os.path.exists ('%s/users/%s' % (doc_root[cname], user_repo_dir)):
try:
exec_command = '/bin/mkdir %s/users/%s' % (doc_root[cname], user_repo_dir)
run_command (exec_command)
except Exception, e:
print "Exception %s" % (e)
run_command ('/usr/bin/nohup /usr/bin/hg init %s/users/%s/%s' % (doc_root[cname], user_repo_dir, repo_name))
fix_user_repo_perms (cname, repo_name)
sys.exit (0)
def edit_repo_description (cname, repo_name):
global doc_root
user = os.getenv ('USER')
user_repo_dir = user.replace ('@', '_')
print 'You are about to edit the description for hg.mozilla.org/users/%s/%s.' % (user_repo_dir, repo_name)
print 'If you need to edit the description for a top level repo, please quit now and file an IT bug for it.'
selection = prompt_user ('Proceed?', ['yes', 'no'])
if (selection == 'yes'):
if os.path.exists ('%s/users/%s/%s' % (doc_root[cname], user_repo_dir, repo_name)):
repo_description = raw_input ('Enter a one line descripton for the repository: ')
if (repo_description != ''):
repo_description = escape (repo_description)
repo_config = ConfigParser.RawConfigParser ()
repo_config_file = '%s/users/%s/%s' % (doc_root[cname], user_repo_dir, repo_name) + '/.hg/hgrc'
if not os.path.isfile (repo_config_file):
run_command ('touch ' + repo_config_file)
run_command ('chown ' + user + ':scm_level_1 ' + repo_config_file)
if repo_config.read (repo_config_file):
repo_config_file = open (repo_config_file, 'w+')
else:
sys.stderr.write ('Could not read the hgrc file for /users/%s/%s.\n' % (user_repo_dir, repo_name))
sys.stderr.write ('Please file an IT bug to troubleshoot this.')
sys.exit (1)
if not repo_config.has_section ('web'):
repo_config.add_section ('web')
repo_config.set ('web', 'description', repo_description)
repo_config.write (repo_config_file)
repo_config_file.close ()
else:
sys.stderr.write ('Could not find the repository at /users/%s/%s.\n' % (user_repo_dir, repo_name))
sys.exit (1)
def do_delete(cname, repo_dir, repo_name, verbose=False):
global doc_root
if verbose:
print "Deleting..."
run_command ('rm -rf %s/users/%s/%s' % (doc_root[cname], repo_dir, repo_name))
if verbose:
print "Finished deleting"
purge_log = open ('/tmp/pushlog_purge.%s' % os.getpid(), "a")
purge_log.write ('echo users/%s/%s\n' % (repo_dir, repo_name))
purge_log.close()
def delete_repo (cname, repo_name, do_quick_delete, verbose=False):
global doc_root
user = os.getenv ('USER')
if(user in verbose_users):
verbose = True
user_repo_dir = user.replace ('@', '_')
url_path = "/users/%s" % user_repo_dir
if os.path.exists ('%s/users/%s/%s' % (doc_root[cname], user_repo_dir, repo_name)):
if do_quick_delete:
do_delete (cname, user_repo_dir, repo_name, verbose)
else:
print '\nAre you sure you want to delete /users/%s/%s?' % (user_repo_dir, repo_name)
print '\nThis action is IRREVERSIBLE.'
selection = prompt_user ('Proceed?', ['yes', 'no'])
if (selection == 'yes'):
do_delete (cname, user_repo_dir, repo_name, verbose)
else:
sys.stderr.write ('Could not find the repository at /users/%s/%s.\n' % (user_repo_dir, repo_name))
sys.stderr.write ('Please check the list at https://%s/users/%s\n' % (cname, user_repo_dir))
sys.exit (1)
sys.exit(0)
def edit_repo (cname, repo_name, do_quick_delete):
if do_quick_delete:
delete_repo (cname, repo_name, do_quick_delete)
else:
action = prompt_user ('What would you like to do?', ['Delete the repository', 'Edit the description'])
if action == 'Edit the description':
edit_repo_description (cname, repo_name)
elif action == 'Delete the repository':
delete_repo (cname, repo_name, False)
return
def serve (cname):
global doc_root
ssh_command = os.getenv ('SSH_ORIGINAL_COMMAND')
if not ssh_command:
sys.stderr.write ('No interactive shells allowed here!\n')
sys.exit (1)
elif ssh_command.startswith ('hg'):
repo_expr = re.compile ('(.*)\s+-R\s+([^\s]+\s+)(.*)')
if (repo_expr.search (ssh_command)):
[(hg_path, repo_path, hg_command)] = repo_expr.findall (ssh_command)
if (hg_command == 'serve --stdio') and (check_repo_name (repo_path)):
hg_arg_string = '/usr/bin/hg -R ' + doc_root[cname] + '/' + repo_path + hg_command
hg_args = hg_arg_string.split ()
os.execv ('/usr/bin/hg', hg_args)
else:
sys.stderr.write ("Thank you dchen! but.. I don't think so!\n")
sys.exit (1)
elif ssh_command.startswith ('clone ') and (cname != 'hg.ecmascript.org'):
args = ssh_command.replace ('clone', '').split()
if check_repo_name (args[0]):
if len(args) == 1:
make_repo_clone (cname, args[0], None)
elif len(args) == 2:
if os.path.isdir ('%s/%s/.hg' % (doc_root[cname], args[1])):
make_repo_clone (cname, args[0], args[1])
sys.exit (0)
sys.stderr.write ('clone usage: ssh hg.mozilla.org clone newrepo [srcrepo]\n')
sys.exit (1)
elif ssh_command.startswith ('edit ') and (cname != 'hg.ecmascript.org'):
args = ssh_command.replace ('edit', '', 1).split()
if check_repo_name (args[0]):
if len(args) == 1:
edit_repo (cname, args[0], False)
elif len(args) == 3 and args[1] == 'delete' and args[2] == 'YES':
edit_repo (cname, args[0], True)
else:
sys.stderr.write ('edit usage: ssh hg.mozilla.org edit [userrepo delete] - WARNING: will not prompt!\n')
sys.exit (1)
elif ssh_command.startswith ('pushlog ') and (cname != 'hg.ecmascript.org'):
args = ssh_command.replace ('pushlog', '').split()
if check_repo_name (args[0]):
fh = open("/repo_local/mozilla/%s/.hg/pushlog2.db" % (args[0]))
print(fh.read())
fh.close()
else:
sys.stderr.write ('No interactive commands allowed here!\n')
sys.exit (1)
if __name__ == '__main__':
# if is_valid_user (os.getenv ('USER')):
if is_valid_user ('bkero@mozilla.com'):
serve ("hg.mozilla.org")
else:
sys.stderr.write ('You are not welcome here, go away!\n')
sys.exit (1)
|
|
"""Support for Harmony Hub devices."""
import json
import logging
import voluptuous as vol
from homeassistant.components import remote
from homeassistant.components.remote import (
ATTR_ACTIVITY,
ATTR_DELAY_SECS,
ATTR_DEVICE,
ATTR_HOLD_SECS,
ATTR_NUM_REPEATS,
DEFAULT_DELAY_SECS,
SUPPORT_ACTIVITY,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import entity_platform
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.restore_state import RestoreEntity
from .const import (
ACTIVITY_POWER_OFF,
ATTR_ACTIVITY_STARTING,
ATTR_DEVICES_LIST,
ATTR_LAST_ACTIVITY,
DOMAIN,
HARMONY_DATA,
HARMONY_OPTIONS_UPDATE,
PREVIOUS_ACTIVE_ACTIVITY,
SERVICE_CHANGE_CHANNEL,
SERVICE_SYNC,
)
from .entity import HarmonyEntity
from .subscriber import HarmonyCallback
_LOGGER = logging.getLogger(__name__)
# We want to fire remote commands right away
PARALLEL_UPDATES = 0
ATTR_CHANNEL = "channel"
HARMONY_CHANGE_CHANNEL_SCHEMA = {
vol.Required(ATTR_CHANNEL): cv.positive_int,
}
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities
):
"""Set up the Harmony config entry."""
data = hass.data[DOMAIN][entry.entry_id][HARMONY_DATA]
_LOGGER.debug("HarmonyData : %s", data)
default_activity = entry.options.get(ATTR_ACTIVITY)
delay_secs = entry.options.get(ATTR_DELAY_SECS, DEFAULT_DELAY_SECS)
harmony_conf_file = hass.config.path(f"harmony_{entry.unique_id}.conf")
device = HarmonyRemote(data, default_activity, delay_secs, harmony_conf_file)
async_add_entities([device])
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_SYNC,
{},
"sync",
)
platform.async_register_entity_service(
SERVICE_CHANGE_CHANNEL, HARMONY_CHANGE_CHANNEL_SCHEMA, "change_channel"
)
class HarmonyRemote(HarmonyEntity, remote.RemoteEntity, RestoreEntity):
"""Remote representation used to control a Harmony device."""
def __init__(self, data, activity, delay_secs, out_path):
"""Initialize HarmonyRemote class."""
super().__init__(data=data)
self._state = None
self._current_activity = ACTIVITY_POWER_OFF
self.default_activity = activity
self._activity_starting = None
self._is_initial_update = True
self.delay_secs = delay_secs
self._last_activity = None
self._config_path = out_path
self._attr_unique_id = data.unique_id
self._attr_device_info = self._data.device_info(DOMAIN)
self._attr_name = data.name
self._attr_supported_features = SUPPORT_ACTIVITY
async def _async_update_options(self, data):
"""Change options when the options flow does."""
if ATTR_DELAY_SECS in data:
self.delay_secs = data[ATTR_DELAY_SECS]
if ATTR_ACTIVITY in data:
self.default_activity = data[ATTR_ACTIVITY]
def _setup_callbacks(self):
callbacks = {
"connected": self.async_got_connected,
"disconnected": self.async_got_disconnected,
"config_updated": self.async_new_config,
"activity_starting": self.async_new_activity,
"activity_started": self.async_new_activity_finished,
}
self.async_on_remove(self._data.async_subscribe(HarmonyCallback(**callbacks)))
@callback
def async_new_activity_finished(self, activity_info: tuple) -> None:
"""Call for finished updated current activity."""
self._activity_starting = None
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Complete the initialization."""
await super().async_added_to_hass()
_LOGGER.debug("%s: Harmony Hub added", self.name)
self.async_on_remove(self._clear_disconnection_delay)
self._setup_callbacks()
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"{HARMONY_OPTIONS_UPDATE}-{self.unique_id}",
self._async_update_options,
)
)
# Store Harmony HUB config, this will also update our current
# activity
await self.async_new_config()
# Restore the last activity so we know
# how what to turn on if nothing
# is specified
last_state = await self.async_get_last_state()
if not last_state:
return
if ATTR_LAST_ACTIVITY not in last_state.attributes:
return
if self.is_on:
return
self._last_activity = last_state.attributes[ATTR_LAST_ACTIVITY]
@property
def current_activity(self):
"""Return the current activity."""
return self._current_activity
@property
def activity_list(self):
"""Return the available activities."""
return self._data.activity_names
@property
def extra_state_attributes(self):
"""Add platform specific attributes."""
return {
ATTR_ACTIVITY_STARTING: self._activity_starting,
ATTR_DEVICES_LIST: self._data.device_names,
ATTR_LAST_ACTIVITY: self._last_activity,
}
@property
def is_on(self):
"""Return False if PowerOff is the current activity, otherwise True."""
return self._current_activity not in [None, "PowerOff"]
@callback
def async_new_activity(self, activity_info: tuple) -> None:
"""Call for updating the current activity."""
activity_id, activity_name = activity_info
_LOGGER.debug("%s: activity reported as: %s", self.name, activity_name)
self._current_activity = activity_name
if self._is_initial_update:
self._is_initial_update = False
else:
self._activity_starting = activity_name
if activity_id != -1:
# Save the activity so we can restore
# to that activity if none is specified
# when turning on
self._last_activity = activity_name
self._state = bool(activity_id != -1)
self.async_write_ha_state()
async def async_new_config(self, _=None):
"""Call for updating the current activity."""
_LOGGER.debug("%s: configuration has been updated", self.name)
self.async_new_activity(self._data.current_activity)
await self.hass.async_add_executor_job(self.write_config_file)
async def async_turn_on(self, **kwargs):
"""Start an activity from the Harmony device."""
_LOGGER.debug("%s: Turn On", self.name)
activity = kwargs.get(ATTR_ACTIVITY, self.default_activity)
if not activity or activity == PREVIOUS_ACTIVE_ACTIVITY:
if self._last_activity:
activity = self._last_activity
else:
all_activities = self._data.activity_names
if all_activities:
activity = all_activities[0]
if activity:
await self._data.async_start_activity(activity)
else:
_LOGGER.error("%s: No activity specified with turn_on service", self.name)
async def async_turn_off(self, **kwargs):
"""Start the PowerOff activity."""
await self._data.async_power_off()
async def async_send_command(self, command, **kwargs):
"""Send a list of commands to one device."""
_LOGGER.debug("%s: Send Command", self.name)
device = kwargs.get(ATTR_DEVICE)
if device is None:
_LOGGER.error("%s: Missing required argument: device", self.name)
return
num_repeats = kwargs[ATTR_NUM_REPEATS]
delay_secs = kwargs.get(ATTR_DELAY_SECS, self.delay_secs)
hold_secs = kwargs[ATTR_HOLD_SECS]
await self._data.async_send_command(
command, device, num_repeats, delay_secs, hold_secs
)
async def change_channel(self, channel):
"""Change the channel using Harmony remote."""
await self._data.change_channel(channel)
async def sync(self):
"""Sync the Harmony device with the web service."""
if await self._data.sync():
await self.hass.async_add_executor_job(self.write_config_file)
def write_config_file(self):
"""Write Harmony configuration file.
This is a handy way for users to figure out the available commands for automations.
"""
_LOGGER.debug(
"%s: Writing hub configuration to file: %s", self.name, self._config_path
)
json_config = self._data.json_config
if json_config is None:
_LOGGER.warning("%s: No configuration received from hub", self.name)
return
try:
with open(self._config_path, "w+", encoding="utf-8") as file_out:
json.dump(json_config, file_out, sort_keys=True, indent=4)
except OSError as exc:
_LOGGER.error(
"%s: Unable to write HUB configuration to %s: %s",
self.name,
self._config_path,
exc,
)
|
|
from __future__ import absolute_import, division, print_function
import socket
import random
import docker
import docker.errors
import logging
import os
import tempfile
from ..container_manager import (
create_model_container_label, parse_model_container_label,
ContainerManager, CLIPPER_DOCKER_LABEL, CLIPPER_MODEL_CONTAINER_LABEL,
CLIPPER_QUERY_FRONTEND_CONTAINER_LABEL,
CLIPPER_MGMT_FRONTEND_CONTAINER_LABEL, CLIPPER_INTERNAL_RPC_PORT,
CLIPPER_INTERNAL_MANAGEMENT_PORT, CLIPPER_INTERNAL_QUERY_PORT,
CLIPPER_INTERNAL_METRIC_PORT, CLIPPER_INTERNAL_REDIS_PORT,
CLIPPER_DOCKER_PORT_LABELS, CLIPPER_METRIC_CONFIG_LABEL, ClusterAdapter,
CLIPPER_FLUENTD_CONFIG_LABEL, CLIPPER_INTERNAL_FLUENTD_PORT)
from ..exceptions import ClipperException
from .docker_metric_utils import (
run_query_frontend_metric_image,
setup_metric_config,
run_metric_image,
add_to_metric_config,
delete_from_metric_config
)
from .logging.docker_logging_utils import (
get_logs_from_containers,
get_default_log_config
)
from .docker_api_utils import (
create_network,
check_container_status,
list_containers,
run_container
)
from clipper_admin.docker.logging.fluentd import Fluentd
logger = logging.getLogger(__name__)
class DockerContainerManager(ContainerManager):
def __init__(self,
cluster_name="default-cluster",
docker_ip_address="localhost",
use_centralized_log=False,
fluentd_port=CLIPPER_INTERNAL_FLUENTD_PORT,
clipper_query_port=1337,
clipper_management_port=1338,
clipper_rpc_port=7000,
redis_ip=None,
redis_port=6379,
prometheus_port=9090,
docker_network="clipper_network",
extra_container_kwargs=None):
"""
Parameters
----------
cluster_name : str
A unique name for this Clipper cluster. This can be used to run multiple Clipper
clusters on the same node without interfering with each other.
docker_ip_address : str, optional
The public hostname or IP address at which the Clipper Docker
containers can be accessed via their exposed ports. This should almost always
be "localhost". Only change if you know what you're doing!
use_centralized_log: bool, optional
If it is True, Clipper sets up Fluentd and DB (Currently SQlite) to centralize logs
fluentd_port : int, optional
The port on which the fluentd logging driver should listen to centralize logs.
clipper_query_port : int, optional
The port on which the query frontend should listen for incoming prediction requests.
clipper_management_port : int, optional
The port on which the management frontend should expose the management REST API.
clipper_rpc_port : int, optional
The port to start the Clipper RPC service on.
redis_ip : str, optional
The address of a running Redis cluster. If set to None, Clipper will start
a Redis container for you.
redis_port : int, optional
The Redis port. If ``redis_ip`` is set to None, Clipper will start Redis on this port.
If ``redis_ip`` is provided, Clipper will connect to Redis on this port.
docker_network : str, optional
The docker network to attach the containers to. You can read more about Docker
networking in the
`Docker User Guide <https://docs.docker.com/engine/userguide/networking/>`_.
extra_container_kwargs : dict
Any additional keyword arguments to pass to the call to
:py:meth:`docker.client.containers.run`.
"""
self.cluster_name = cluster_name
self.cluster_identifier = cluster_name # For logging purpose
self.public_hostname = docker_ip_address
self.clipper_query_port = clipper_query_port
self.clipper_management_port = clipper_management_port
self.clipper_rpc_port = clipper_rpc_port
self.redis_ip = redis_ip
if redis_ip is None:
self.external_redis = False
else:
self.external_redis = True
self.redis_port = redis_port
self.prometheus_port = prometheus_port
self.centralize_log = use_centralized_log
if docker_network is "host":
raise ClipperException(
"DockerContainerManager does not support running Clipper on the "
"\"host\" docker network. Please pick a different network name"
)
self.docker_network = docker_network
self.docker_client = docker.from_env()
if extra_container_kwargs is None:
self.extra_container_kwargs = {}
else:
self.extra_container_kwargs = extra_container_kwargs.copy()
# Merge Clipper-specific labels with any user-provided labels
if "labels" in self.extra_container_kwargs:
self.common_labels = self.extra_container_kwargs.pop("labels")
self.common_labels.update({
CLIPPER_DOCKER_LABEL: self.cluster_name
})
else:
self.common_labels = {CLIPPER_DOCKER_LABEL: self.cluster_name}
container_args = {
"network": self.docker_network,
"detach": True,
}
self.extra_container_kwargs.update(container_args)
self.logger = ClusterAdapter(logger, {
'cluster_name': self.cluster_identifier
})
# Setting Docker cluster logging.
self.logging_system = Fluentd
self.log_config = get_default_log_config()
self.logging_system_instance = None
if self.centralize_log:
self.logging_system_instance = self.logging_system(
self.logger,
self.cluster_name,
self.docker_client,
port=find_unbound_port(fluentd_port)
)
self.log_config = self.logging_system_instance.get_log_config()
def start_clipper(self,
query_frontend_image,
mgmt_frontend_image,
frontend_exporter_image,
cache_size,
qf_http_thread_pool_size,
qf_http_timeout_request,
qf_http_timeout_content,
num_frontend_replicas=1):
if num_frontend_replicas != 1:
msg = "Docker container manager's query frontend scale-out " \
"hasn't been implemented. You can contribute to Clipper at " \
"https://github.com/ucbrise/clipper." \
"Please set num_frontend_replicas=1 or use Kubernetes."
raise ClipperException(msg)
create_network(
docker_client=self.docker_client,
name=self.docker_network)
containers_in_cluster = list_containers(
docker_client=self.docker_client,
filters={
'label': [
'{key}={val}'.format(
key=CLIPPER_DOCKER_LABEL, val=self.cluster_name)
]
})
if len(containers_in_cluster) > 0:
raise ClipperException(
"Cluster {} cannot be started because it already exists. "
"Please use ClipperConnection.connect() to connect to it.".
format(self.cluster_name))
if self.centralize_log:
self.logging_system_instance.start(self.common_labels, self.extra_container_kwargs)
# Redis for cluster configuration
if not self.external_redis:
self.logger.info("Starting managed Redis instance in Docker")
redis_name = "redis-{}".format(random.randint(0, 100000))
self.redis_port = find_unbound_port(self.redis_port)
redis_labels = self.common_labels.copy()
redis_labels[CLIPPER_DOCKER_PORT_LABELS['redis']] = str(
self.redis_port)
redis_container = run_container(
docker_client=self.docker_client,
image='redis:alpine',
cmd="redis-server --port %s" % CLIPPER_INTERNAL_REDIS_PORT,
log_config=self.log_config,
name=redis_name,
ports={
'%s/tcp' % CLIPPER_INTERNAL_REDIS_PORT: self.redis_port
},
labels=redis_labels,
extra_container_kwargs=self.extra_container_kwargs)
self.redis_ip = redis_container.name
check_container_status(
docker_client=self.docker_client,
name=redis_name)
# frontend management
mgmt_cmd = "--redis_ip={redis_ip} --redis_port={redis_port}".format(
redis_ip=self.redis_ip, redis_port=CLIPPER_INTERNAL_REDIS_PORT)
mgmt_name = "mgmt_frontend-{}".format(random.randint(0, 100000))
self.clipper_management_port = find_unbound_port(
self.clipper_management_port)
mgmt_labels = self.common_labels.copy()
mgmt_labels[CLIPPER_MGMT_FRONTEND_CONTAINER_LABEL] = ""
mgmt_labels[CLIPPER_DOCKER_PORT_LABELS['management']] = str(
self.clipper_management_port)
run_container(
docker_client=self.docker_client,
image=mgmt_frontend_image,
cmd=mgmt_cmd,
log_config=self.log_config,
name=mgmt_name,
ports={
'%s/tcp' % CLIPPER_INTERNAL_MANAGEMENT_PORT:
self.clipper_management_port
},
labels=mgmt_labels,
extra_container_kwargs=self.extra_container_kwargs)
check_container_status(
docker_client=self.docker_client,
name=mgmt_name)
# query frontend
query_cmd = ("--redis_ip={redis_ip} --redis_port={redis_port} "
"--prediction_cache_size={cache_size} "
"--thread_pool_size={thread_pool_size} "
"--timeout_request={timeout_request} "
"--timeout_content={timeout_content}").format(
redis_ip=self.redis_ip,
redis_port=CLIPPER_INTERNAL_REDIS_PORT,
cache_size=cache_size,
thread_pool_size=qf_http_thread_pool_size,
timeout_request=qf_http_timeout_request,
timeout_content=qf_http_timeout_content)
query_container_id = random.randint(0, 100000)
query_name = "query_frontend-{}".format(query_container_id)
self.clipper_query_port = find_unbound_port(self.clipper_query_port)
self.clipper_rpc_port = find_unbound_port(self.clipper_rpc_port)
query_labels = self.common_labels.copy()
query_labels[CLIPPER_QUERY_FRONTEND_CONTAINER_LABEL] = ""
query_labels[CLIPPER_DOCKER_PORT_LABELS['query_rest']] = str(
self.clipper_query_port)
query_labels[CLIPPER_DOCKER_PORT_LABELS['query_rpc']] = str(
self.clipper_rpc_port)
run_container(
docker_client=self.docker_client,
image=query_frontend_image,
cmd=query_cmd,
log_config=self.log_config,
name=query_name,
ports={
'%s/tcp' % CLIPPER_INTERNAL_QUERY_PORT: self.clipper_query_port,
'%s/tcp' % CLIPPER_INTERNAL_RPC_PORT: self.clipper_rpc_port
},
labels=query_labels,
extra_container_kwargs=self.extra_container_kwargs)
check_container_status(
docker_client=self.docker_client,
name=query_name)
# Metric Section
query_frontend_metric_name = "query_frontend_exporter-{}".format(
query_container_id)
run_query_frontend_metric_image(
query_frontend_metric_name, self.docker_client, query_name,
frontend_exporter_image, self.common_labels,
self.log_config, self.extra_container_kwargs)
check_container_status(
docker_client=self.docker_client,
name=query_frontend_metric_name)
self.prom_config_path = tempfile.NamedTemporaryFile(
'w', suffix='.yml', delete=False).name
self.prom_config_path = os.path.realpath(
self.prom_config_path) # resolve symlink
self.logger.info("Metric Configuration Saved at {path}".format(
path=self.prom_config_path))
setup_metric_config(query_frontend_metric_name, self.prom_config_path,
CLIPPER_INTERNAL_METRIC_PORT)
metric_frontend_name = "metric_frontend-{}".format(
random.randint(0, 100000))
self.prometheus_port = find_unbound_port(self.prometheus_port)
metric_labels = self.common_labels.copy()
metric_labels[CLIPPER_DOCKER_PORT_LABELS['metric']] = str(
self.prometheus_port)
metric_labels[CLIPPER_METRIC_CONFIG_LABEL] = self.prom_config_path
run_metric_image(metric_frontend_name, self.docker_client,
metric_labels, self.prometheus_port,
self.prom_config_path, self.log_config,
self.extra_container_kwargs)
check_container_status(
docker_client=self.docker_client,
name=metric_frontend_name)
self.connect()
def connect(self):
"""
Use the cluster name to update ports. Because they might not match as in
start_clipper the ports might be changed.
:return: None
"""
containers = list_containers(
docker_client=self.docker_client,
filters={
'label': [
'{key}={val}'.format(
key=CLIPPER_DOCKER_LABEL, val=self.cluster_name)
]
})
all_labels = {}
for container in containers:
all_labels.update(container.labels)
if CLIPPER_DOCKER_PORT_LABELS['redis'] in all_labels:
self.redis_port = all_labels[CLIPPER_DOCKER_PORT_LABELS['redis']]
self.clipper_management_port = all_labels[CLIPPER_DOCKER_PORT_LABELS[
'management']]
self.clipper_query_port = all_labels[CLIPPER_DOCKER_PORT_LABELS[
'query_rest']]
self.clipper_rpc_port = all_labels[CLIPPER_DOCKER_PORT_LABELS[
'query_rpc']]
self.prometheus_port = all_labels[CLIPPER_DOCKER_PORT_LABELS['metric']]
self.prom_config_path = all_labels[CLIPPER_METRIC_CONFIG_LABEL]
if self._is_valid_logging_state_to_connect(all_labels):
self.connect_to_logging_system(all_labels)
def deploy_model(self, name, version, input_type, image, num_replicas=1):
# Parameters
# ----------
# image : str
# The fully specified Docker imagesitory to deploy. If using a custom
# registry, the registry name must be prepended to the image. For example,
# "localhost:5000/my_model_name:my_model_version" or
# "quay.io/my_namespace/my_model_name:my_model_version"
self.set_num_replicas(name, version, input_type, image, num_replicas)
def _get_replicas(self, name, version):
containers = list_containers(
docker_client=self.docker_client,
filters={
"label": [
"{key}={val}".format(
key=CLIPPER_DOCKER_LABEL, val=self.cluster_name),
"{key}={val}".format(
key=CLIPPER_MODEL_CONTAINER_LABEL,
val=create_model_container_label(name, version))
]
})
return containers
def get_num_replicas(self, name, version):
return len(self._get_replicas(name, version))
def _add_replica(self, name, version, input_type, image):
containers = list_containers(
docker_client=self.docker_client,
filters={
"label": [
"{key}={val}".format(
key=CLIPPER_DOCKER_LABEL, val=self.cluster_name),
CLIPPER_QUERY_FRONTEND_CONTAINER_LABEL
]
})
if len(containers) < 1:
self.logger.warning("No Clipper query frontend found.")
raise ClipperException(
"No Clipper query frontend to attach model container to")
query_frontend_hostname = containers[0].name
env_vars = {
"CLIPPER_MODEL_NAME": name,
"CLIPPER_MODEL_VERSION": version,
# NOTE: assumes this container being launched on same machine
# in same docker network as the query frontend
"CLIPPER_IP": query_frontend_hostname,
"CLIPPER_INPUT_TYPE": input_type,
}
model_container_label = create_model_container_label(name, version)
labels = self.common_labels.copy()
labels[CLIPPER_MODEL_CONTAINER_LABEL] = model_container_label
labels[CLIPPER_DOCKER_LABEL] = self.cluster_name
model_container_name = model_container_label + '-{}'.format(
random.randint(0, 100000))
run_container(
docker_client=self.docker_client,
image=image,
name=model_container_name,
environment=env_vars,
labels=labels,
log_config=self.log_config,
extra_container_kwargs=self.extra_container_kwargs)
# Metric Section
add_to_metric_config(model_container_name, self.prom_config_path,
self.prometheus_port,
CLIPPER_INTERNAL_METRIC_PORT)
# Return model_container_name so we can check if it's up and running later
return model_container_name
def set_num_replicas(self, name, version, input_type, image, num_replicas):
current_replicas = self._get_replicas(name, version)
if len(current_replicas) < num_replicas:
num_missing = num_replicas - len(current_replicas)
self.logger.info(
"Found {cur} replicas for {name}:{version}. Adding {missing}".
format(
cur=len(current_replicas),
name=name,
version=version,
missing=(num_missing)))
model_container_names = []
for _ in range(num_missing):
container_name = self._add_replica(name, version, input_type,
image)
model_container_names.append(container_name)
for name in model_container_names:
check_container_status(
docker_client=self.docker_client,
name=name)
elif len(current_replicas) > num_replicas:
num_extra = len(current_replicas) - num_replicas
self.logger.info(
"Found {cur} replicas for {name}:{version}. Removing {extra}".
format(
cur=len(current_replicas),
name=name,
version=version,
extra=(num_extra)))
while len(current_replicas) > num_replicas:
cur_container = current_replicas.pop()
cur_container.stop()
# Metric Section
delete_from_metric_config(cur_container.name,
self.prom_config_path,
self.prometheus_port)
def get_logs(self, logging_dir):
if self.centralize_log:
return self.logging_system_instance.get_logs(logging_dir)
else:
return get_logs_from_containers(self, logging_dir)
def stop_models(self, models):
containers = list_containers(
docker_client=self.docker_client,
filters={
"label": [
CLIPPER_MODEL_CONTAINER_LABEL, "{key}={val}".format(
key=CLIPPER_DOCKER_LABEL, val=self.cluster_name)
]
})
for c in containers:
c_name, c_version = parse_model_container_label(
c.labels[CLIPPER_MODEL_CONTAINER_LABEL])
if c_name in models and c_version in models[c_name]:
c.stop()
def stop_all_model_containers(self):
containers = list_containers(
docker_client=self.docker_client,
filters={
"label": [
CLIPPER_MODEL_CONTAINER_LABEL, "{key}={val}".format(
key=CLIPPER_DOCKER_LABEL, val=self.cluster_name)
]
})
for c in containers:
c.stop()
def stop_all(self, graceful=True):
containers = list_containers(
docker_client=self.docker_client,
filters={
"label":
"{key}={val}".format(
key=CLIPPER_DOCKER_LABEL, val=self.cluster_name)
})
for c in containers:
if graceful:
c.stop()
else:
c.kill()
def _is_valid_logging_state_to_connect(self, all_labels):
if self.centralize_log and not self.logging_system.container_is_running(all_labels):
raise ClipperException(
"Invalid state detected. "
"log centralization is {log_centralization_state}, "
"but cannot find fluentd instance running. "
"Please change your use_centralized_log parameter of DockerContainermanager"
.format(log_centralization_state=self.centralize_log)
)
return self.logging_system.container_is_running(all_labels)
def connect_to_logging_system(self, all_labels):
if not self.centralize_log:
logger.info(
"The current DockerContainerManager's use_centralized_log flag is False, "
"but there is a logging system {type} instance running in a cluster."
"It means that clipper cluster you want to connect uses log centralization."
"We will set the flag on to avoid unexpected bugs"
.format(type=self.logging_system.get_type())
)
self.centralize_log= True
self.logging_system_instance = \
self.logging_system(
self.logger,
self.cluster_name,
self.docker_client,
port=all_labels[CLIPPER_DOCKER_PORT_LABELS['fluentd']],
conf_path=all_labels[CLIPPER_FLUENTD_CONFIG_LABEL]
)
self.log_config = self.logging_system_instance.get_log_config()
def get_admin_addr(self):
return "{host}:{port}".format(
host=self.public_hostname, port=self.clipper_management_port)
def get_query_addr(self):
return "{host}:{port}".format(
host=self.public_hostname, port=self.clipper_query_port)
def get_metric_addr(self):
return "{host}:{port}".format(
host=self.public_hostname, port=self.prometheus_port)
def find_unbound_port(start=None,
increment=False,
port_range=(10000, 50000),
verbose=False,
logger=None):
"""
Find a unbound port.
Parameters
----------
start : int
The port number to start with. If this port is unbounded, return this port.
If None, start will be a random port.
increment : bool
If True, find port by incrementing start port; else, random search.
port_range : tuple
The range of port for random number generation
verbose : bool
Verbose flag for logging
logger: logging.Logger
"""
while True:
if not start:
start = random.randint(*port_range)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.bind(("127.0.0.1", start))
# Make sure we clean up after binding
del sock
return start
except socket.error as e:
if verbose and logger:
logger.info("Socket error: {}".format(e))
logger.info(
"randomly generated port %d is bound. Trying again." %
start)
if increment:
start += 1
else:
start = random.randint(*port_range)
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import fnmatch
from helpers import unittest, LuigiTestCase
import luigi
import mock
from luigi.mock import MockTarget, MockFileSystem
from luigi.tools.range import (RangeDaily, RangeDailyBase, RangeEvent, RangeHourly, RangeHourlyBase, _constrain_glob,
_get_filesystems_and_globs)
class CommonDateHourTask(luigi.Task):
dh = luigi.DateHourParameter()
def output(self):
return MockTarget(self.dh.strftime('/n2000y01a05n/%Y_%m-_-%daww/21mm%Hdara21/ooo'))
class CommonDateTask(luigi.Task):
d = luigi.DateParameter()
def output(self):
return MockTarget(self.d.strftime('/n2000y01a05n/%Y_%m-_-%daww/21mm01dara21/ooo'))
task_a_paths = [
'TaskA/2014-03-20/18',
'TaskA/2014-03-20/21',
'TaskA/2014-03-20/23',
'TaskA/2014-03-21/00',
'TaskA/2014-03-21/00.attempt.1',
'TaskA/2014-03-21/00.attempt.2',
'TaskA/2014-03-21/01',
'TaskA/2014-03-21/02',
'TaskA/2014-03-21/03.attempt-temp-2014-03-21T13-22-58.165969',
'TaskA/2014-03-21/03.attempt.1',
'TaskA/2014-03-21/03.attempt.2',
'TaskA/2014-03-21/03.attempt.3',
'TaskA/2014-03-21/03.attempt.latest',
'TaskA/2014-03-21/04.attempt-temp-2014-03-21T13-23-09.078249',
'TaskA/2014-03-21/12',
'TaskA/2014-03-23/12',
]
task_b_paths = [
'TaskB/no/worries2014-03-20/23',
'TaskB/no/worries2014-03-21/01',
'TaskB/no/worries2014-03-21/03',
'TaskB/no/worries2014-03-21/04.attempt-yadayada',
'TaskB/no/worries2014-03-21/05',
]
mock_contents = task_a_paths + task_b_paths
expected_a = [
'TaskA(dh=2014-03-20T17)',
'TaskA(dh=2014-03-20T19)',
'TaskA(dh=2014-03-20T20)',
]
# expected_reverse = [
# ]
expected_wrapper = [
'CommonWrapperTask(dh=2014-03-21T00)',
'CommonWrapperTask(dh=2014-03-21T02)',
'CommonWrapperTask(dh=2014-03-21T03)',
'CommonWrapperTask(dh=2014-03-21T04)',
'CommonWrapperTask(dh=2014-03-21T05)',
]
class TaskA(luigi.Task):
dh = luigi.DateHourParameter()
def output(self):
return MockTarget(self.dh.strftime('TaskA/%Y-%m-%d/%H'))
class TaskB(luigi.Task):
dh = luigi.DateHourParameter()
complicator = luigi.Parameter()
def output(self):
return MockTarget(self.dh.strftime('TaskB/%%s%Y-%m-%d/%H') % self.complicator)
class TaskC(luigi.Task):
dh = luigi.DateHourParameter()
def output(self):
return MockTarget(self.dh.strftime('not/a/real/path/%Y-%m-%d/%H'))
class CommonWrapperTask(luigi.WrapperTask):
dh = luigi.DateHourParameter()
def requires(self):
yield TaskA(dh=self.dh)
yield TaskB(dh=self.dh, complicator='no/worries') # str(self.dh) would complicate beyond working
def mock_listdir(contents):
def contents_listdir(_, glob):
for path in fnmatch.filter(contents, glob + '*'):
yield path
return contents_listdir
def mock_exists_always_true(_, _2):
yield True
def mock_exists_always_false(_, _2):
yield False
class ConstrainGlobTest(unittest.TestCase):
def test_limit(self):
glob = '/[0-9][0-9][0-9][0-9]/[0-9][0-9]/[0-9][0-9]/[0-9][0-9]'
paths = [(datetime.datetime(2013, 12, 31, 5) + datetime.timedelta(hours=h)).strftime('/%Y/%m/%d/%H') for h in range(40)]
self.assertEqual(sorted(_constrain_glob(glob, paths)), [
'/2013/12/31/[0-2][0-9]',
'/2014/01/01/[0-2][0-9]',
])
paths.pop(26)
self.assertEqual(sorted(_constrain_glob(glob, paths, 6)), [
'/2013/12/31/0[5-9]',
'/2013/12/31/1[0-9]',
'/2013/12/31/2[0-3]',
'/2014/01/01/0[012345689]',
'/2014/01/01/1[0-9]',
'/2014/01/01/2[0]',
])
self.assertEqual(sorted(_constrain_glob(glob, paths[:7], 10)), [
'/2013/12/31/05',
'/2013/12/31/06',
'/2013/12/31/07',
'/2013/12/31/08',
'/2013/12/31/09',
'/2013/12/31/10',
'/2013/12/31/11',
])
def test_no_wildcards(self):
glob = '/2014/01'
paths = '/2014/01'
self.assertEqual(_constrain_glob(glob, paths), [
'/2014/01',
])
def datetime_to_epoch(dt):
td = dt - datetime.datetime(1970, 1, 1)
return td.days * 86400 + td.seconds + td.microseconds / 1E6
class RangeDailyBaseTest(unittest.TestCase):
maxDiff = None
def setUp(self):
# yucky to create separate callbacks; would be nicer if the callback
# received an instance of a subclass of Event, so one callback could
# accumulate all types
@RangeDailyBase.event_handler(RangeEvent.DELAY)
def callback_delay(*args):
self.events.setdefault(RangeEvent.DELAY, []).append(args)
@RangeDailyBase.event_handler(RangeEvent.COMPLETE_COUNT)
def callback_complete_count(*args):
self.events.setdefault(RangeEvent.COMPLETE_COUNT, []).append(args)
@RangeDailyBase.event_handler(RangeEvent.COMPLETE_FRACTION)
def callback_complete_fraction(*args):
self.events.setdefault(RangeEvent.COMPLETE_FRACTION, []).append(args)
self.events = {}
def test_consistent_formatting(self):
task = RangeDailyBase(of=CommonDateTask,
start=datetime.date(2016, 1, 1))
self.assertEqual(task._format_range([datetime.datetime(2016, 1, 2, 13), datetime.datetime(2016, 2, 29, 23)]), '[2016-01-02, 2016-02-29]')
def _empty_subcase(self, kwargs, expected_events):
calls = []
class RangeDailyDerived(RangeDailyBase):
def missing_datetimes(*args):
calls.append(args)
return args[-1][:5]
task = RangeDailyDerived(of=CommonDateTask,
**kwargs)
self.assertEqual(task.requires(), [])
self.assertEqual(calls, [])
self.assertEqual(task.requires(), [])
self.assertEqual(calls, []) # subsequent requires() should return the cached result, never call missing_datetimes
self.assertEqual(self.events, expected_events)
self.assertTrue(task.complete())
def test_stop_before_days_back(self):
# nothing to do because stop is earlier
self._empty_subcase(
{
'now': datetime_to_epoch(datetime.datetime(2015, 1, 1, 4)),
'stop': datetime.date(2014, 3, 20),
'days_back': 4,
'days_forward': 20,
'reverse': True,
},
{
'event.tools.range.delay': [
('CommonDateTask', 0),
],
'event.tools.range.complete.count': [
('CommonDateTask', 0),
],
'event.tools.range.complete.fraction': [
('CommonDateTask', 1.),
],
}
)
def _nonempty_subcase(self, kwargs, expected_finite_datetimes_range, expected_requires, expected_events):
calls = []
class RangeDailyDerived(RangeDailyBase):
def missing_datetimes(*args):
calls.append(args)
return args[-1][:7]
task = RangeDailyDerived(of=CommonDateTask,
**kwargs)
self.assertEqual(list(map(str, task.requires())), expected_requires)
self.assertEqual(calls[0][1], CommonDateTask)
self.assertEqual((min(calls[0][2]), max(calls[0][2])), expected_finite_datetimes_range)
self.assertEqual(list(map(str, task.requires())), expected_requires)
self.assertEqual(len(calls), 1) # subsequent requires() should return the cached result, not call missing_datetimes again
self.assertEqual(self.events, expected_events)
self.assertFalse(task.complete())
def test_start_long_before_long_days_back_and_with_long_days_forward(self):
self._nonempty_subcase(
{
'now': datetime_to_epoch(datetime.datetime(2017, 10, 22, 12, 4, 29)),
'start': datetime.date(2011, 3, 20),
'stop': datetime.date(2025, 1, 29),
'task_limit': 4,
'days_back': 3 * 365,
'days_forward': 3 * 365,
},
(datetime.datetime(2014, 10, 24), datetime.datetime(2020, 10, 21)),
[
'CommonDateTask(d=2014-10-24)',
'CommonDateTask(d=2014-10-25)',
'CommonDateTask(d=2014-10-26)',
'CommonDateTask(d=2014-10-27)',
],
{
'event.tools.range.delay': [
('CommonDateTask', 3750),
],
'event.tools.range.complete.count': [
('CommonDateTask', 5057),
],
'event.tools.range.complete.fraction': [
('CommonDateTask', 5057. / (5057 + 7)),
],
}
)
class RangeHourlyBaseTest(unittest.TestCase):
maxDiff = None
def setUp(self):
# yucky to create separate callbacks; would be nicer if the callback
# received an instance of a subclass of Event, so one callback could
# accumulate all types
@RangeHourlyBase.event_handler(RangeEvent.DELAY)
def callback_delay(*args):
self.events.setdefault(RangeEvent.DELAY, []).append(args)
@RangeHourlyBase.event_handler(RangeEvent.COMPLETE_COUNT)
def callback_complete_count(*args):
self.events.setdefault(RangeEvent.COMPLETE_COUNT, []).append(args)
@RangeHourlyBase.event_handler(RangeEvent.COMPLETE_FRACTION)
def callback_complete_fraction(*args):
self.events.setdefault(RangeEvent.COMPLETE_FRACTION, []).append(args)
self.events = {}
def test_consistent_formatting(self):
task = RangeHourlyBase(of=CommonDateHourTask,
start=datetime.datetime(2016, 1, 1))
self.assertEqual(task._format_range([datetime.datetime(2016, 1, 2, 13), datetime.datetime(2016, 2, 29, 23)]), '[2016-01-02T13, 2016-02-29T23]')
def _empty_subcase(self, kwargs, expected_events):
calls = []
class RangeHourlyDerived(RangeHourlyBase):
def missing_datetimes(*args):
calls.append(args)
return args[-1][:5]
task = RangeHourlyDerived(of=CommonDateHourTask,
**kwargs)
self.assertEqual(task.requires(), [])
self.assertEqual(calls, [])
self.assertEqual(task.requires(), [])
self.assertEqual(calls, []) # subsequent requires() should return the cached result, never call missing_datetimes
self.assertEqual(self.events, expected_events)
self.assertTrue(task.complete())
def test_start_after_hours_forward(self):
# nothing to do because start is later
self._empty_subcase(
{
'now': datetime_to_epoch(datetime.datetime(2000, 1, 1, 4)),
'start': datetime.datetime(2014, 3, 20, 17),
'hours_back': 4,
'hours_forward': 20,
},
{
'event.tools.range.delay': [
('CommonDateHourTask', 0),
],
'event.tools.range.complete.count': [
('CommonDateHourTask', 0),
],
'event.tools.range.complete.fraction': [
('CommonDateHourTask', 1.),
],
}
)
def _nonempty_subcase(self, kwargs, expected_finite_datetimes_range, expected_requires, expected_events):
calls = []
class RangeHourlyDerived(RangeHourlyBase):
def missing_datetimes(*args):
calls.append(args)
return args[-1][:7]
task = RangeHourlyDerived(of=CommonDateHourTask,
**kwargs)
self.assertEqual(list(map(str, task.requires())), expected_requires)
self.assertEqual(calls[0][1], CommonDateHourTask)
self.assertEqual((min(calls[0][2]), max(calls[0][2])), expected_finite_datetimes_range)
self.assertEqual(list(map(str, task.requires())), expected_requires)
self.assertEqual(len(calls), 1) # subsequent requires() should return the cached result, not call missing_datetimes again
self.assertEqual(self.events, expected_events)
self.assertFalse(task.complete())
def test_start_long_before_hours_back(self):
self._nonempty_subcase(
{
'now': datetime_to_epoch(datetime.datetime(2000, 1, 1, 4)),
'start': datetime.datetime(1960, 3, 2, 1),
'hours_back': 5,
'hours_forward': 20,
},
(datetime.datetime(1999, 12, 31, 23), datetime.datetime(2000, 1, 1, 23)),
[
'CommonDateHourTask(dh=1999-12-31T23)',
'CommonDateHourTask(dh=2000-01-01T00)',
'CommonDateHourTask(dh=2000-01-01T01)',
'CommonDateHourTask(dh=2000-01-01T02)',
'CommonDateHourTask(dh=2000-01-01T03)',
'CommonDateHourTask(dh=2000-01-01T04)',
'CommonDateHourTask(dh=2000-01-01T05)',
],
{
'event.tools.range.delay': [
('CommonDateHourTask', 25), # because of short hours_back we're oblivious to those 40 preceding years
],
'event.tools.range.complete.count': [
('CommonDateHourTask', 349192),
],
'event.tools.range.complete.fraction': [
('CommonDateHourTask', 349192. / (349192 + 7)),
],
}
)
def test_start_after_long_hours_back(self):
self._nonempty_subcase(
{
'now': datetime_to_epoch(datetime.datetime(2014, 10, 22, 12, 4, 29)),
'start': datetime.datetime(2014, 3, 20, 17),
'task_limit': 4,
'hours_back': 365 * 24,
},
(datetime.datetime(2014, 3, 20, 17), datetime.datetime(2014, 10, 22, 12)),
[
'CommonDateHourTask(dh=2014-03-20T17)',
'CommonDateHourTask(dh=2014-03-20T18)',
'CommonDateHourTask(dh=2014-03-20T19)',
'CommonDateHourTask(dh=2014-03-20T20)',
],
{
'event.tools.range.delay': [
('CommonDateHourTask', 5180),
],
'event.tools.range.complete.count': [
('CommonDateHourTask', 5173),
],
'event.tools.range.complete.fraction': [
('CommonDateHourTask', 5173. / (5173 + 7)),
],
}
)
def test_start_long_before_long_hours_back_and_with_long_hours_forward(self):
self._nonempty_subcase(
{
'now': datetime_to_epoch(datetime.datetime(2017, 10, 22, 12, 4, 29)),
'start': datetime.datetime(2011, 3, 20, 17),
'task_limit': 4,
'hours_back': 3 * 365 * 24,
'hours_forward': 3 * 365 * 24,
},
(datetime.datetime(2014, 10, 23, 13), datetime.datetime(2020, 10, 21, 12)),
[
'CommonDateHourTask(dh=2014-10-23T13)',
'CommonDateHourTask(dh=2014-10-23T14)',
'CommonDateHourTask(dh=2014-10-23T15)',
'CommonDateHourTask(dh=2014-10-23T16)',
],
{
'event.tools.range.delay': [
('CommonDateHourTask', 52560),
],
'event.tools.range.complete.count': [
('CommonDateHourTask', 84061),
],
'event.tools.range.complete.fraction': [
('CommonDateHourTask', 84061. / (84061 + 7)),
],
}
)
class FilesystemInferenceTest(unittest.TestCase):
def _test_filesystems_and_globs(self, datetime_to_task, datetime_to_re, expected):
actual = list(_get_filesystems_and_globs(datetime_to_task, datetime_to_re))
self.assertEqual(len(actual), len(expected))
for (actual_filesystem, actual_glob), (expected_filesystem, expected_glob) in zip(actual, expected):
self.assertTrue(isinstance(actual_filesystem, expected_filesystem))
self.assertEqual(actual_glob, expected_glob)
def test_date_glob_successfully_inferred(self):
self._test_filesystems_and_globs(
lambda d: CommonDateTask(d),
lambda d: d.strftime('(%Y).*(%m).*(%d)'),
[
(MockFileSystem, '/n2000y01a05n/[0-9][0-9][0-9][0-9]_[0-9][0-9]-_-[0-9][0-9]aww/21mm01dara21'),
]
)
def test_datehour_glob_successfully_inferred(self):
self._test_filesystems_and_globs(
lambda d: CommonDateHourTask(d),
lambda d: d.strftime('(%Y).*(%m).*(%d).*(%H)'),
[
(MockFileSystem, '/n2000y01a05n/[0-9][0-9][0-9][0-9]_[0-9][0-9]-_-[0-9][0-9]aww/21mm[0-9][0-9]dara21'),
]
)
def test_wrapped_datehour_globs_successfully_inferred(self):
self._test_filesystems_and_globs(
lambda d: CommonWrapperTask(d),
lambda d: d.strftime('(%Y).*(%m).*(%d).*(%H)'),
[
(MockFileSystem, 'TaskA/[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]'),
(MockFileSystem, 'TaskB/no/worries[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]'),
]
)
def test_inconsistent_output_datehour_glob_not_inferred(self):
class InconsistentlyOutputtingDateHourTask(luigi.Task):
dh = luigi.DateHourParameter()
def output(self):
base = self.dh.strftime('/even/%Y%m%d%H')
if self.dh.hour % 2 == 0:
return MockTarget(base)
else:
return {
'spi': MockTarget(base + '/something.spi'),
'spl': MockTarget(base + '/something.spl'),
}
def test_raise_not_implemented():
list(_get_filesystems_and_globs(
lambda d: InconsistentlyOutputtingDateHourTask(d),
lambda d: d.strftime('(%Y).*(%m).*(%d).*(%H)')))
self.assertRaises(NotImplementedError, test_raise_not_implemented)
def test_wrapped_inconsistent_datehour_globs_not_inferred(self):
class InconsistentlyParameterizedWrapperTask(luigi.WrapperTask):
dh = luigi.DateHourParameter()
def requires(self):
yield TaskA(dh=self.dh - datetime.timedelta(days=1))
yield TaskB(dh=self.dh, complicator='no/worries')
def test_raise_not_implemented():
list(_get_filesystems_and_globs(
lambda d: InconsistentlyParameterizedWrapperTask(d),
lambda d: d.strftime('(%Y).*(%m).*(%d).*(%H)')))
self.assertRaises(NotImplementedError, test_raise_not_implemented)
class RangeDailyTest(unittest.TestCase):
def test_bulk_complete_correctly_interfaced(self):
class BulkCompleteDailyTask(luigi.Task):
d = luigi.DateParameter()
@classmethod
def bulk_complete(self, parameter_tuples):
return list(parameter_tuples)[:-2]
def output(self):
raise RuntimeError("Shouldn't get called while resolving deps via bulk_complete")
task = RangeDaily(now=datetime_to_epoch(datetime.datetime(2015, 12, 1)),
of=BulkCompleteDailyTask,
start=datetime.date(2015, 11, 1),
stop=datetime.date(2015, 12, 1))
expected = [
'BulkCompleteDailyTask(d=2015-11-29)',
'BulkCompleteDailyTask(d=2015-11-30)',
]
actual = [t.task_id for t in task.requires()]
self.assertEqual(actual, expected)
@mock.patch('luigi.mock.MockFileSystem.listdir',
new=mock_listdir([
'/data/2014/p/v/z/2014_/_03-_-21octor/20/ZOOO',
'/data/2014/p/v/z/2014_/_03-_-23octor/20/ZOOO',
'/data/2014/p/v/z/2014_/_03-_-24octor/20/ZOOO',
]))
@mock.patch('luigi.mock.MockFileSystem.exists',
new=mock_exists_always_true)
def test_missing_tasks_correctly_required(self):
class SomeDailyTask(luigi.Task):
d = luigi.DateParameter()
def output(self):
return MockTarget(self.d.strftime('/data/2014/p/v/z/%Y_/_%m-_-%doctor/20/ZOOO'))
task = RangeDaily(now=datetime_to_epoch(datetime.datetime(2016, 4, 1)),
of=SomeDailyTask,
start=datetime.date(2014, 3, 20),
task_limit=3,
days_back=3 * 365)
expected = [
'SomeDailyTask(d=2014-03-20)',
'SomeDailyTask(d=2014-03-22)',
'SomeDailyTask(d=2014-03-25)',
]
actual = [t.task_id for t in task.requires()]
self.assertEqual(actual, expected)
class RangeHourlyTest(unittest.TestCase):
# fishy to mock the mock, but MockFileSystem doesn't support globs yet
@mock.patch('luigi.mock.MockFileSystem.listdir', new=mock_listdir(mock_contents))
@mock.patch('luigi.mock.MockFileSystem.exists',
new=mock_exists_always_true)
def test_missing_tasks_correctly_required(self):
for task_path in task_a_paths:
MockTarget(task_path)
# this test takes a few seconds. Since stop is not defined,
# finite_datetimes constitute many years to consider
task = RangeHourly(now=datetime_to_epoch(datetime.datetime(2016, 4, 1)),
of=TaskA,
start=datetime.datetime(2014, 3, 20, 17),
task_limit=3,
hours_back=3 * 365 * 24)
actual = [t.task_id for t in task.requires()]
self.assertEqual(actual, expected_a)
@mock.patch('luigi.mock.MockFileSystem.listdir', new=mock_listdir(mock_contents))
@mock.patch('luigi.mock.MockFileSystem.exists',
new=mock_exists_always_true)
def test_missing_wrapper_tasks_correctly_required(self):
task = RangeHourly(
now=datetime_to_epoch(datetime.datetime(2040, 4, 1)),
of=CommonWrapperTask,
start=datetime.datetime(2014, 3, 20, 23),
stop=datetime.datetime(2014, 3, 21, 6),
hours_back=30 * 365 * 24)
actual = [t.task_id for t in task.requires()]
self.assertEqual(actual, expected_wrapper)
def test_bulk_complete_correctly_interfaced(self):
class BulkCompleteHourlyTask(luigi.Task):
dh = luigi.DateHourParameter()
@classmethod
def bulk_complete(cls, parameter_tuples):
return parameter_tuples[:-2]
def output(self):
raise RuntimeError("Shouldn't get called while resolving deps via bulk_complete")
task = RangeHourly(now=datetime_to_epoch(datetime.datetime(2015, 12, 1)),
of=BulkCompleteHourlyTask,
start=datetime.datetime(2015, 11, 1),
stop=datetime.datetime(2015, 12, 1))
expected = [
'BulkCompleteHourlyTask(dh=2015-11-30T22)',
'BulkCompleteHourlyTask(dh=2015-11-30T23)',
]
actual = [t.task_id for t in task.requires()]
self.assertEqual(actual, expected)
@mock.patch('luigi.mock.MockFileSystem.exists',
new=mock_exists_always_false)
def test_missing_directory(self):
task = RangeHourly(now=datetime_to_epoch(
datetime.datetime(2014, 4, 1)),
of=TaskC,
start=datetime.datetime(2014, 3, 20, 23),
stop=datetime.datetime(2014, 3, 21, 1))
self.assertFalse(task.complete())
expected = [
'TaskC(dh=2014-03-20T23)',
'TaskC(dh=2014-03-21T00)']
self.assertEqual([t.task_id for t in task.requires()], expected)
class RangeInstantiationTest(LuigiTestCase):
def test_old_instantiation(self):
"""
Verify that you can still programatically set of param as string
"""
class MyTask(luigi.Task):
date_param = luigi.DateParameter()
def complete(self):
return False
range_task = RangeDailyBase(now=datetime_to_epoch(datetime.datetime(2015, 12, 2)),
of=MyTask,
start=datetime.date(2015, 12, 1),
stop=datetime.date(2015, 12, 2))
expected_task = MyTask(date_param=datetime.date(2015, 12, 1))
self.assertEqual(expected_task, list(range_task._requires())[0])
def test_cli_instantiation(self):
"""
Verify that you can still use Range through CLI
"""
class MyTask(luigi.Task):
task_namespace = "wohoo"
date_param = luigi.DateParameter()
secret = 'some-value-to-sooth-python-linters'
comp = False
def complete(self):
return self.comp
def run(self):
self.comp = True
MyTask.secret = 'yay'
now = str(int(datetime_to_epoch(datetime.datetime(2015, 12, 2))))
self.run_locally_split('RangeDailyBase --of wohoo.MyTask --now {now} --start 2015-12-01 --stop 2015-12-02'.format(now=now))
self.assertEqual(MyTask(date_param=datetime.date(1934, 12, 1)).secret, 'yay')
|
|
#/usr/bin/python
# -*- coding: utf-8 -*-
"""
.. currentmodule:: pylayers.antprop.radionode
.. autosummary::
:members:
"""
from __future__ import print_function
import doctest
import os
import glob
import os
import sys
import doctest
import numpy as np
if sys.version_info.major==2:
import ConfigParser as cp
else:
import configparser as cp
#import pylayers.util.easygui as eg
import pylayers.util.pyutil as pyu
import pylayers.util.geomutil as geo
from pylayers.mobility.trajectory import *
from pylayers.util.project import *
import numpy as np
import scipy as sp
class RadioNode(PyLayers):
""" container for a Radio Node
This class manages the spatial and temporal behavior of a radio node
Attributes
----------
position
position of the RadioNode np.array([],dtype=float)
time
time tag of the RadioNode np.array([],dtype=float)
orientation
orientation 3x3xn (rotation matrix for each position)
points
dictionnary of points (redundant information)
type
0: undefined 1: Tx 2 : Rx
Methods
-------
info : display information about a RadioNode
loadspa : load a spa file in PulsRay data format
save : save a RadioNode file in .spa, .ini, .vect data format
point : set a RadioNode point position
points : set a RadioNode set of points position
line : set a RadioNode route
surface : set a RadioNode area
volume : set a RadioNode volume
gpoint : set a RadioNode point position (gui)
gline : set a RadioNode route (gui)
gsurface : set a RadioNode area (gui)
gvolume : set a RadioNode volume (gui)
show3 : display the RadioNode in the associated structure
"""
def __init__(self, name = '',typ='undefined',
_fileini='radionode.ini',
_fileant='defant.vsh3'
):
"""
the _fileini file must be placed in the ini directory
Parameters
----------
typ : int
0 : undefined
1 : tx
2 : rx
_fileini : string
file of RadioNode coordinates
Notes
-----
The point [0,0,0] is defined as the first point (index 0)
"""
self.position = np.array([], dtype=float)
self.position = np.array([0, 0, 0]).reshape(3, 1)
self.time = np.array([], dtype=float)
self.orientation = np.eye(3).reshape(3, 3, 1)
self.typ = typ
self.N = 1
self.name=name
#
# clean existing .ini file
#
if _fileini == 'radionode.ini':
if typ == 'tx':
_fileini = _fileini.replace('node', 'tx')
if typ == 'rx':
_fileini = _fileini.replace('node', 'rx')
fileini = pyu.getlong(_fileini, 'ini')
# delete radionode.ini if it exists
try:
os.remove(fileini)
except:
pass
prefix = _fileini.replace('.ini','')
prefix = prefix.replace('.spa','')
self.fileini = prefix + '.ini'
self.filespa = prefix + '.spa'
self.filegeom = prefix + '.vect'
fileini = pyu.getlong(self.fileini,'ini')
# if file _fileini exists it is loaded
try:
fd = open(fileini,'r')
fd.close()
self.loadini(self.fileini, 'ini')
except:
pass
self.save()
def __repr__(self):
""" representation of radio node
Only position is shown
"""
st = ''
for k in range(self.N):
st = st + str(k)+ ' : ' \
+ str(round(self.position[0,k]*1000)/1000.) + ' ' + \
str(round(self.position[1,k]*1000)/1000.) + ' ' + \
str(round(self.position[2,k]*1000)/1000.) + '\n'
return(st)
def pos2pt(self):
""" convert position to points dict
"""
npt = np.shape(self.position)[1]
self.points = {}
for k in range(npt):
self.points[k + 1] = self.position[:, k]
def transform(self,alpha,trans):
""" tranform position rotation + translation
Parameters
----------
alpha : float
angle (rad)
trans : np.array() (,2)
"""
d2r = np.pi/180
Rot = np.array([[np.cos(d2r*alpha),-np.sin(d2r*alpha)],
[np.sin(d2r*alpha),np.cos(d2r*alpha)]])
self.position[0:2,:] = np.dot(Rot,self.position[0:2,:])
self.position[0:2,:] = self.position[0:2,:]+trans[:,np.newaxis]
def info(self):
""" display RadioNodes informations
"""
print("npos : ", self.N)
print("position : ", self.position)
print("name : ", self.name)
#print "orientation : ", self.orientation
print("type : ", self.typ)
print("fileini : ", self.fileini)
print("filespa : ", self.filespa)
print("filegeom : ", self.filegeom)
print("fileant : ", self.fileant)
def clear(self):
""" clear positions
The origin [0,0,0] is always defined as the first point
"""
self.position = np.array([], dtype=float)
self.position = np.array([0., 0., 0.]).reshape(3, 1)
self.N = 1
def points(self, pt=np.array([[0], [0], [0]])):
""" add a set of points to RadioNode
Parameters
----------
pt : ndarray
point position (3 x Npt)
"""
if type(pt) == list:
pt = np.array(pt)
self.position = pt
self.N = np.shape(self.position)[1]
self.save()
def point(self, pt=[0, 0, 0], time=[1], orientation=[], mode='subst'):
""" add a position to RadioNode
The new RadioNode is saved in .spa
Parameters
----------
pt : ndarray
point position (1 x 3)
time : ndarray
1x1
orientation : ndarray
3x3 matrix
mode: string
'subst' for replacement (default)
'append' for appending
Examples
--------
>>> from pylayers.simul.radionode import *
>>> import numpy as np
>>> tx = RadioNode()
>>> tx.point([1,1,1],[1],np.eye(3),'subst')
>>> tx.position
array([[1],
[1],
[1]])
"""
if isinstance(pt, list):
pt = np.array(pt)
if isinstance(time, list):
time = np.array(time)
orientation = np.reshape(np.eye(3), (3, 3, 1))
pt = np.array(pt)
time = np.array(time)
pt = np.reshape(pt, (3, 1))
if mode == 'subst':
self.time = time
self.position = pt
self.orientation = orientation
else:
try:
self.time = np.append(self.time, time, axis=0)
self.position = np.append(self.position, pt, axis=1)
self.orientation = np.append(self.orientation, orientation, axis=2)
except:
self.time = time
self.position = pt
self.orientation = orientation
self.pos2pt()
self.save()
def linevect(self,npt=1, step=1.0 , ptt=[0, 0, 0], vec=[1, 0, 0], mode='subst'):
""" create a line along a direction
Parameters
----------
npt : int
number of points
step : float
incremental distance in meters
ptt : list or array
1x3 point tail (starting point)
vec : list or arry
1x3 unitary vector
mode : string
'subst'
'append'
Examples
--------
>>> from pylayers.simul.radionode import *
>>> r = RadioNode()
>>> r.linevect(npt=3)
>>> r
0 : 0.0 0.0 0.0
1 : 1.0 0.0 0.0
2 : 2.0 0.0 0.0
<BLANKLINE>
"""
if isinstance(ptt, list):
ptt = np.array(ptt)
if isinstance(vec, list):
vec = np.array(vec)
if (npt <= 1):
raise ValueError('npt should be greater than 1')
ptt = np.reshape(ptt, (3, 1))
vec = np.reshape(vec, (3, 1))
k = np.arange(npt)
pt = ptt + k*step*vec
if mode == 'subst':
self.position = pt
else:
self.position = np.append(self.position, pt, axis=1)
self.pos2pt()
self.N = np.shape(self.position)[1]
self.save()
def line(self, npt, ptt=[0, 0, 0], pth=[1, 0, 0], mode='subst'):
""" build a line trajectory for a RadioNode
Parameters
----------
npt : integer
number of points
ptt : list or ndarray
starting point coordinates (default [0,0,0])
ptf : list or ndarray
ending point coordinates
mode : string
'subst' for replacement (default)
'append' for appending
Examples
--------
>>> from pylayers.simul.radionode import *
>>> r = RadioNode()
>>> r.line(3,[0,0,0],[1,0,0])
>>> r.position
array([[ 0. , 0.5, 1. ],
[ 0. , 0. , 0. ],
[ 0. , 0. , 0. ]])
"""
if isinstance(ptt, list):
ptt = np.array(ptt)
if isinstance(pth, list):
pth = np.array(pth)
if (npt <= 1):
raise ValueError('npt should be greater than 1')
ptt = np.reshape(ptt, (3, 1))
pth = np.reshape(pth, (3, 1))
pas = 1.0 / (npt - 1)
k = np.arange(0.0, 1.0 + pas, pas)
pt = ptt + k * (pth-ptt)
if mode == 'subst':
self.position = pt
else:
self.position = np.append(self.position, pt, axis=1)
self.pos2pt()
self.N = np.shape(self.position)[1]
self.save()
def surface(self, N1=2, N2=2, p0=[0, 0, 0], p1=[1, 0, 0], p2=[0, 1, 0], mode='subst'):
""" add a surface to RadioNode
add a surface with basis (p0p1,p0p2)
Parameters
----------
N1 : int
default 2
N2 : int
default 2
p0 : array or list
first point
p1 : array or list
second point
p2 : array or list
third point
mode : string
'subst'
'append'
Examples
--------
>>> from pylayers.simul.radionode import *
>>> tx= RadioNode()
>>> tx.surface(10,10,[0,0,1.5],[3.0,0,1.5],[0.0,3.0,1.5],'subst')
"""
p0 = np.array(p0)
p1 = np.array(p1)
p2 = np.array(p2)
p0 = np.reshape(p0, (3, 1))
p1 = np.reshape(p1, (3, 1))
p2 = np.reshape(p2, (3, 1))
pas1 = 1.0 / (N1 - 1)
k1 = np.arange(0.0, 1.0 + pas1, pas1)
pas2 = 1.0 / (N2 - 1)
k2 = np.arange(0.0, 1.0 + pas2, pas2)
n1 = len(k1)
n2 = len(k2)
kk1 = np.kron(np.ones(n2), k1)
kk2 = np.kron(k2, np.ones(n1))
pt = p0 + kk1 * (p1 - p0) + kk2 * (p2 - p0)
if mode == 'subst':
self.position = pt
else:
self.position = np.append(self.position, pt, axis=1)
self.pos2pt()
self.N = np.shape(self.position)[1]
self.save()
def volume(self,N1=2,N2=2,N3=2,p0=[0, 0, 0],p1=[1, 0, 0], p2=[0, 1, 0], p3=[0, 0, 1], mode='subst'):
""" add a volume to RadioNode
build a volume with edges : p0p1, p0p2, p0p3
Parameters
----------
N1 : int
number of points on axis 1
N2 : int
number of points on axis 2
N3 : int
number of points on axis 3
p0 : list or ndarray
first point
p1 : list or ndarray
second point
p2 : list or ndarray
third point
p3 : list or ndarray
fourth point
Examples
--------
>>> from pylayers.simul.radionode import *
>>> tx = RadioNode()
>>> tx.volume(10,10,10,[0,0,1.0],[3.0,0,1.1],[0.0,3.0,1.1],[0.0,0.0,2.0])
"""
if isinstance(p0, list):
p0 = np.array(p0)
if isinstance(p1, list):
p1 = np.array(p1)
if isinstance(p2, list):
p2 = np.array(p2)
if isinstance(p3, list):
p3 = np.array(p3)
p0 = np.reshape(p0, (3, 1))
p1 = np.reshape(p1, (3, 1))
p2 = np.reshape(p2, (3, 1))
p3 = np.reshape(p3, (3, 1))
pas1 = 1.0 / (N1 - 1)
k1 = np.arange(0.0, 1.0 + pas1, pas1)
pas2 = 1.0 / (N2 - 1)
k2 = np.arange(0.0, 1.0 + pas2, pas2)
pas3 = 1.0 / (N3 - 1)
k3 = np.arange(0.0, 1.0 + pas3, pas3)
n1 = len(k1)
n2 = len(k2)
n3 = len(k3)
kk1 = np.kron(np.ones(n2 * n3), k1)
kk2 = np.kron(np.kron(np.ones(n1), k2), np.ones(n3))
kk3 = np.kron(k3, np.ones(n1 * n2))
pt = p0 + kk1 * (p1 - p0) + kk2 * (p2 - p0) + kk3 * (p3 - p0)
if mode == 'subst':
self.position = pt
else:
self.position = np.append(self.position, pt, axis=1)
self.pos2pt()
self.N = np.shape(self.position)[1]
self.save()
def loadini(self, _filespa, rep='ini'):
""" load an .ini file
Parameters
----------
_filespa : string
short filename
rep : string
directory name
"""
filespa = pyu.getlong(_filespa, rep)
#print filespa+ " loadini"
space = cp.ConfigParser()
space.read(filespa)
points = space.items("coordinates")
self.points = pyu.lt2idic(points)
self.N = len(self.points.keys())
del self.position
for k in self.points.keys():
try:
self.position = np.hstack((self.position,
self.points[k].reshape(3,1)))
except:
self.position = self.points[k].reshape(3,1)
self.traj=Trajectory(pt=self.position.T)
def loadspa(self, _filespa, rep=pstruc['DIRLCH']):
""" load a spa file
Parameters
----------
_filespa : string
short filename
rep : string
directory name
"""
self.filespa = _filespa
filespa = pyu.getlong(_filespa, rep)
try:
fid = open(filespa)
except:
print("filespa does not exist")
return()
lig = fid.readlines()
typ = int(lig[0])
if typ == 0:
nnpt = int(lig[1])
coord = lig[2:]
for index in range(len(coord)):
point = map(float, coord[index].split())
ndpoint = np.array([[point[0]], [point[1]], [point[2]]])
self.position = np.append(self.position, ndpoint, axis=1)
self.time = np.arange(nnpt)
ident = np.eye(3)
tmp = np.zeros(9 * nnpt)
self.orientation = np.reshape(tmp, (3, 3, nnpt))
self.N = nnpt
for i in range(nnpt):
self.orientation[:, :, i] = ident
fid.close()
def save(self):
""" save RadioNode in .ini, .spa, .vect file
This function save the RadioNode in different files format
.spa : pulsray format
.vect : geomview format
"""
_filespa = self.filespa
_fileini = self.fileini
fileini = pyu.getlong(_fileini, 'ini')
try:
fd = open(fileini, "w")
except:
print(fileini + ' does not exist')
space = cp.ConfigParser()
space.add_section("coordinates")
npt = np.shape(self.position)[1]
for k in range(npt):
x = self.position[0, k]
y = self.position[1, k]
z = self.position[2, k]
space.set("coordinates", str(k + 1), str(x) + ' ' +
str(y) + ' ' + str(z))
space.write(fd)
fd.close()
points = space.items("coordinates")
if self.typ == 'undefined':
filespa = pyu.getlong(_filespa, 'ini')
colorname = 'green'
elif self.typ == 'tx':
filespa = pyu.getlong(_filespa, pstruc['DIRLCH'])
colorname = 'red'
elif self.typ == 'rx':
filespa = pyu.getlong(_filespa, pstruc['DIRTRA'])
colorname = 'blue'
# save points in GeomVect container
filename = self.filegeom.replace('.vect', '')
filename = filename.replace('.off', '')
try:
gv = geo.Geomoff(filename)
ant = self.A
# if not hasattr(ant,'theta'):
if ant.fromfile:
ant.Fsynth3(pattern=True)
else:
ant.Fpatt(pattern=True)
V = ant.SqG[ant.Nf/2,:,:]
if not hasattr(self,'position'):
print("no position available")
T=self.orientation.reshape(3,3)
gv.pattern(ant.theta,ant.phi,V,po=self.position,T=T,ilog=False,minr=0.01,maxr=1.)
self.filegeom=filename + '.off'
except:
#pdb.set_trace()
if hasattr(self,'position'):
pass
# problem gv is not a geomvect
# gv.points(self.position, colorname)
else :
print(" no position available ")
if _filespa.split('.')[1] == 'spa':
fi_spa = open(filespa, 'w')
npt = np.shape(self.position)[1]
snpt = str(npt) + "\n"
snpt2 = str(npt) + " " + str(npt) + " " + str(npt) + "\n"
fi_spa.write("0\n")
fi_spa.write(snpt)
for i in range(npt):
x = str(self.position[0, i]).replace(',', '.')
y = str(self.position[1, i]).replace(',', '.')
z = str(self.position[2, i]).replace(',', '.')
chaine = x + " " + y + " " + z + "\n"
chaine2 = chaine.replace(',', '.')
fi_spa.write(chaine)
fi_spa.close()
def gpoint(self, mode='subst', display=False):
""" gui point
Parameters
----------
mode : string
subst
display : boolean
False
"""
p0 = self.position[:, 0]
(p0, n1) = eg.pointbox(p0, 1)
self.point(p0, [1], np.eye(3), mode)
self.save()
if display:
self.show3()
def gline(self, mode='subst', display=False):
""" gui line
A line is built between the first point and the gui point
Parameters
----------
mode : string
subst
display : boolean
False
"""
p0 = self.position[:, 0]
(p1, N1) = eg.pointbox(p0, 10)
self.line(N1, p0, p1, mode)
self.save()
if display:
self.show3()
def gsurface(self, mode='subst', display=False):
""" gui surface
Parameters
----------
mode : string
subst
display : boolean
False
"""
p0 = self.position[:, 0]
(p1, N1) = eg.pointbox(p0, 10, 'Enter Surface second point')
(p2, N2) = eg.pointbox(p1, 10, 'Enter Surface third point')
self.surface(N1, N2, p0, p1, p2, mode)
self.save()
if display:
self.show3()
def gvolume(self, mode='subst', display=False):
""" gui volume
Parameters
----------
mode : string
subst
display : boolean
False
"""
p0 = self.position[:, 0]
(p1, N1) = eg.pointbox(p0, 10, 'Enter Volume second point')
(p2, N2) = eg.pointbox(p1, 10, 'Enter Volume third point')
(p3, N3) = eg.pointbox(p2, 10, 'Enter Volume fourth point')
self.volume(N1, N2, N3, p0, p1, p2, p3, mode)
self.save()
if display:
self.show3()
# def savevect(self):
# """
# Create a .vect file
# Le type de format est 0 . Coordonnees explicites de tous les points.
#
# save(_filespa)
#
# _filespa : file short name
#
# """
#
# if self.typ==0:
# self.filegeom="RadioNode.vect"
# filegeom = pyu.getlong("RadioNode.vect","geom")
# elif self.typ==1:
# self.filegeom = "RadioTx.vect"
# filegeom = pyu.getlong("RadioTx.vect","geom")
# elif self.typ==2:
# self.filegeom = "RadioRx.vect"
# filegeom = pyu.getlong("RadioRx.vect","geom")
#
# fi_geom = open(filegeom,'w')
#
# npt = shape(self.position)[1]
# snpt2 = str(npt)+" "+str(npt)+" "+str(npt)+"\n"
# if npt>1:
# fi_geom.write("appearance{\n")
# fi_geom.write("linewidth 8}\n")
# fi_geom.write("VECT\n")
# fi_geom.write(snpt2)
# fi_geom.write("1 "*npt+"\n")
# fi_geom.write("1 "*npt+"\n")
# else:
# fi_geom.write("ESPHERE\n")
# fi_geom.write("0.2\n")
#
# for i in range(npt):
# x = str(self.position[0,i])
# y = str(self.position[1,i])
# z = str(self.position[2,i])
# chaine = x+" "+y+" "+z+"\n"
# fi_geom.write(chaine)
# if npt>1:
# if self.typ==0:
# fi_geom.write(npt*"0 0 1 1\n")
# elif self.typ==1:
# fi_geom.write(npt*"1 0 0 1\n")
# elif self.typ==2:
# fi_geom.write(npt*"0 1 0 1\n")
# fi_geom.close()
# def savespa(self):
# """
# Create a .spa file
# Le type de format est 0 . Coordonnees explicites de tous les points.
#
# savespa(_filespa)
#
# _filespa : file short name
#
# """
#
# _filespa = self.filespa
#
# elif self.typ==1:
# filespa = pyu.getlong(_filespa,'launch')
# elif self.typ==2:
# filespa = pyu.getlong(_filespa,'trace')
#
# fi_spa = open(filespa,'w')
#
# npt = shape(self.position)[1]
# snpt = str(npt)+"\n"
#
# fi_spa.write("0\n")
# fi_spa.write(snpt)
# for i in range(npt):
# x = str(self.position[0,i])
# y = str(self.position[1,i])
# z = str(self.position[2,i])
# chaine = x+" "+y+" "+z+"\n"
# fi_spa.write(chaine)
# fi_spa.close()
def show(self, num = [], fig=[], ax =[],size=5,marker='o',color='b'):
""" Display RadioNode position in the 2D strucure
Parameters
----------
"""
if num ==[]:
num = np.arange(np.shape(self.position)[1])
x = self.position[0, num]
y = self.position[1, num]
if ax == []:
fig = plt.gcf()
ax = fig.gca()
ax.scatter(x, y,s=size,c=color,linewidth=0)
return fig,ax
# def show3(self, _filestr='DLR.off'):
# """ display RadioNode position in geomview
#
# Parameters
# ----------
#
# _filestr : string
# structure file
# """
# filename = pyu.getlong("strucRN.off", pstruc['DIRGEOM'])
# fo = open(filename, "w")
# filegeom = pyu.getlong(self.filegeom, pstruc['DIRGEOM'])
#
# # get .off filename from .str or .str2 filename
# _fileoff, ext = os.path.splitext(self.filestr)
# _fileoff = _fileoff + '.off'
# fo.write("LIST\n")
# if os.path.isfile(pyu.getlong(_fileoff,'geom')):
# fo.write("{<" + _fileoff + "}\n")
# fo.write("{<" + self.filegeom + "}\n")
# fo.write("{</usr/share/geomview/geom/xyz.vect}\n")
# fo.close()
# chaine = "geomview -nopanel -b 1 1 1 " + filename + " 2>/dev/null &"
# os.system(chaine)
def move(self, dx, dy, dz):
""" move RadioNode with a specified offset over each cartesian axis
Parameters
----------
dx : float
dy : float
dz : float
"""
self.position[0, :] += dx
self.position[1, :] += dy
self.position[2, :] += dz
self.save()
def extract(self, i):
""" extract the i-th radionode component
Parameters
----------
i : integer
Returns
-------
u : RadioNode
"""
if self.typ == 'undefined':
u = RadioNode(self.filestr)
elif self.typ == 'tx':
u = RadioTx(self.filestr, self.signal)
elif self.typ == 'rx':
u = RadioRx(self.filestr, self.fc, self.bandwidth, self.NF)
u.position = self.position[:, i]
#
#
# u.time = self.time[i]
# u.orientation = self.orientation[:,:,i]
u.filespa = "filespa.spa"
#
# Write the RadioNode Coordinate in filespa
#
if self.typ != 'undefined':
if self.typ == 'tx':
filespa = pyu.getlong("filespa.spa", pstruc['DIRLCH'])
elif self.typ == 'rx':
filespa = pyu.getlong("filespa.spa", pstruc['DIRTRA'])
fi = open(filespa, 'w')
fi.write("0\n")
fi.write("1\n")
x = str(self.position[0, i])
y = str(self.position[1, i])
z = str(self.position[2, i])
chaine = x + " " + y + " " + z + "\n"
fi.write(chaine)
fi.close()
return u
if (__name__ == "__main__"):
tx = RadioNode(_fileini='w2m1rx.ini')
doctest.testmod()
|
|
# coding=utf-8
"""
hangman.view
~~~~~~~~~~~~
View layer, printing and prompting.
"""
from __future__ import absolute_import
import click
from ._compat import zip
from .utils import FlashMessage, GameOverNotificationComplete
# DRAW COMPONENT BLOCK
# -------------------------------------------------------------------
def draw_board(game, message=FlashMessage()):
"""
Present the game status with pictures.
- Clears the screen.
- Flashes any messages.
- Zip the two halves of the picture together.
.. code-block:: text
+---------------------------------------------+
| message 45 x 1 |
+---------------------------------------------+
| title 45 x 1 |
+----------+----------------------------------+
| | |
| | |
| | |
| | |
| picture | misses |
| 10 x 10 | 35 x 10 |
| | |
| | |
| | |
| | |
+----------+----------------------------------+
| hits 45 x 1 |
+---------------------------------------------+
Dare to pick a letter:
_
**Example output:**
.. code-block:: text
HANGMAN GAME
_____
| |
|
| MISSES:
| _ _ _ _ _ _ _ _ _ _
|
|
________|_
_ _ _ _ _ _ _
Dare to pick a letter:
_
:param hangman.Hangman game: game instance
:param hangman.utils.FlashMessage message: flash message
:raises: hangman.utils.GameOverNotificationComplete
"""
# setup
click.clear()
partial_picture = build_partial_picture(game.remaining_turns)
partial_misses = build_partial_misses(game.misses)
# print
print_partial_message(message, game.answer)
print_partial_title()
print_partial_body(partial_picture, partial_misses)
print_partial_hits(game.status)
# raise to break game loop
if message.game_lost or message.game_won:
raise GameOverNotificationComplete
def say_goodbye():
"""Write a goodbye message."""
click.secho('Have a nice day!', bold=True, fg='green', blink=True)
return print_spacer()
# PROMPT USER INPUT
# -------------------------------------------------------------------
def prompt_guess():
"""Get a single letter."""
print_spacer()
click.secho('Dare to pick a letter: ', dim=True, bold=True)
letter = click.getchar()
# \x03 = ctrl+c, \x04 = ctrl+d
if letter in ['\x03', '\x04']:
raise KeyboardInterrupt
return letter
def prompt_play_again():
"""Prompt user to play again."""
print_spacer()
return click.confirm('Double or nothings?')
# BUILD PARTIAL BLOCKS
# -------------------------------------------------------------------
def build_partial_picture(remaining_turns):
"""Generator, build the iconic hangman game status."""
yield ' _____'
yield ' | |'
if remaining_turns <= 9:
yield ' (_) |'
else:
yield ' |'
if remaining_turns <= 5:
yield ' \|/ |'
elif remaining_turns <= 6:
yield ' \| |'
elif remaining_turns <= 8:
yield ' | |'
else:
yield ' |'
if remaining_turns <= 7:
yield ' | |'
else:
yield ' |'
if remaining_turns <= 4:
yield ' | |'
else:
yield ' |'
if remaining_turns <= 0:
yield ' _/ \_ |'
elif remaining_turns <= 1:
yield ' _/ \ |'
elif remaining_turns <= 2:
yield ' / \ |'
elif remaining_turns <= 3:
yield ' / |'
else:
yield ' |'
yield '________|_'
def build_partial_misses(game_misses):
"""Generator, build game misses block."""
misses_block = ' '.join('{0:_<10s}'.format(''.join(game_misses)))
yield ''
yield ''
yield ''
yield '{0:s}{1:s}'.format(' ' * 5, 'MISSES:')
yield '{0:s}{1:s}'.format(' ' * 5, misses_block)
yield ''
yield ''
yield ''
yield ''
yield ''
# PRINT PARTIAL BLOCKS
# -------------------------------------------------------------------
def print_partial_message(flash, answer):
if flash.game_lost:
message = "YOU LOSE! THE ANSWER IS {0}".format(answer)
return click.secho('{0:45s}'.format(message), bold=True, fg='red')
if flash.game_won:
message = "YOU ARE SO COOL"
return click.secho('{0:45s}'.format(message), bold=True, fg='cyan')
if flash.message:
return click.secho('{0:45s}'.format(flash), bold=True, fg='yellow')
return print_spacer()
def print_partial_title():
return click.secho('{0: ^45s}'.format('HANGMAN GAME'), bold=True, underline=True)
def print_partial_body(picture, status):
for line in zip(picture, status):
click.echo('{0:10s}{1:35s}'.format(*line))
def print_partial_hits(game_status):
# Dynamically space hits to fill line
space_between_letters = ' ' if len(game_status) < 45 / 4 else ' '
formatted_game_status = space_between_letters.join(game_status)
print_spacer()
return click.echo('{0: ^45s}'.format(formatted_game_status))
def print_spacer():
"""Print empty line"""
return click.echo()
|
|
# -*- coding:utf-8 -*-
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
import pytest
from django.db import connection
from django.db.models import F
from django.test import TestCase
from django_mysql.models.handler import Handler
from django_mysql.utils import index_name
from testapp.models import (
Author, AuthorHugeName, AuthorMultiIndex, NameAuthor, VanillaAuthor
)
def get_index_names(model):
# There's no easy way of getting index names in django so pull them from
# INFORMATION_SCHEMA
with connection.cursor() as cursor:
cursor.execute(
"""SELECT DISTINCT INDEX_NAME
FROM INFORMATION_SCHEMA.STATISTICS
WHERE TABLE_SCHEMA = %s AND
TABLE_NAME = %s""",
(connection.settings_dict['NAME'], model._meta.db_table)
)
index_names = [x[0] for x in cursor.fetchall()]
return index_names
class HandlerCreationTests(TestCase):
def test_bad_creation_joins_not_allowed(self):
qs = Author.objects.filter(tutor__name='A')
with pytest.raises(ValueError):
qs.handler()
def test_bad_creation_limit_not_allowed(self):
qs = Author.objects.all()[:100]
with pytest.raises(ValueError):
qs.handler()
def test_bad_creation_ordering_not_allowed(self):
qs = Author.objects.order_by('name')
with pytest.raises(ValueError):
qs.handler()
def test_can_open_close_with_huge_table_name(self):
with AuthorHugeName.objects.handler():
pass
def test_cannot_open_twice(self):
handler = Author.objects.handler()
with handler:
with pytest.raises(ValueError):
with handler:
pass
def test_cannot_close_unopened(self):
handler = Author.objects.handler()
with pytest.raises(ValueError):
handler.__exit__(None, None, None)
class BaseAuthorTestCase(TestCase):
def setUp(self):
self.jk = Author.objects.create(name='JK Rowling')
self.grisham = Author.objects.create(name='John Grisham')
class HandlerReadTests(BaseAuthorTestCase):
def test_bad_read_unopened(self):
handler = Author.objects.all().handler()
with pytest.raises(RuntimeError):
handler.read()
def test_bad_read_mode(self):
with Author.objects.handler() as handler:
with pytest.raises(ValueError):
handler.read(mode='non-existent')
def test_read_does_single_by_default(self):
with Author.objects.handler() as handler:
handler_all = list(handler.read())
assert handler_all == [self.jk]
def test_read_limit_first(self):
with Author.objects.handler() as handler:
handler_first = handler.read(limit=1)[0]
qs_first = Author.objects.earliest('id')
assert handler_first == qs_first
def test_read_limit_last(self):
with Author.objects.handler() as handler:
handler_last = handler.read(mode='last', limit=1)[0]
qs_last = Author.objects.latest('id')
assert handler_last == qs_last
def test_read_limit_all(self):
with Author.objects.handler() as handler:
handler_all = list(handler.read(limit=2))
qs_all = list(Author.objects.all())
assert handler_all == qs_all
def test_read_index_primary(self):
with Author.objects.handler() as handler:
handler_all = list(handler.read(index='PRIMARY', limit=2))
qs_all = list(Author.objects.order_by('id'))
assert handler_all == qs_all
def test_read_index_different(self):
index_name = [name for name in get_index_names(Author)
if name != "PRIMARY"][0]
with Author.objects.handler() as handler:
handler_all = list(handler.read(index=index_name, limit=2))
qs_all = list(Author.objects.order_by('name').all())
assert handler_all == qs_all
def test_read_where_filter_read(self):
qs = Author.objects.filter(name__startswith='John')
with qs.handler() as handler:
handler_all = list(handler.read())
qs_all = list(qs)
assert handler_all == qs_all
def test_read_where_filter_f_expression(self):
qs = Author.objects.filter(name=F('name'))
with qs.handler() as handler:
handler_all = list(handler.read(limit=100))
assert len(handler_all) == 2
def test_read_where_exclude(self):
qs = Author.objects.filter(name__contains='JK')
with qs.handler() as handler:
handler_all = list(handler.read())
qs_all = list(qs)
assert handler_all == qs_all
def test_read_where_filter_params_not_injected_or_modified(self):
table_col = "`looks_like`.`table_column`"
author = Author.objects.create(name=table_col)
qs = Author.objects.filter(name=table_col)
with qs.handler() as handler:
handler_first = handler.read()[0]
assert handler_first == author
def test_read_where_passed_in(self):
qs = Author.objects.filter(name__startswith='John')
with Author.objects.handler() as handler:
handler_author = handler.read(where=qs)[0]
assert handler_author == qs[0]
def test_read_where_passed_in_overrides_completely(self):
qs = Author.objects.filter(name='JK Rowling')
qs2 = Author.objects.filter(name='John Grisham')
with qs.handler() as handler:
handler_default = handler.read()[0]
handler_where = handler.read(where=qs2)[0]
assert handler_default == qs[0]
assert handler_where == qs2[0]
def test_read_bad_where_passed_in(self):
with Author.objects.handler() as handler:
with pytest.raises(ValueError):
handler.read(where=Author.objects.filter(tutor__name='A'))
def test_read_index_value_and_mode_invalid(self):
with Author.objects.handler() as handler:
with pytest.raises(ValueError):
handler.read(value=1, mode='first')
def test_read_index_equal(self):
with Author.objects.handler() as handler:
handler_result = handler.read(value=self.jk.id)[0]
assert handler_result == self.jk
def test_read_index_equal_exact(self):
with Author.objects.handler() as handler:
handler_result = handler.read(value__exact=self.jk.id)[0]
assert handler_result == self.jk
def test_read_index_less_than(self):
with Author.objects.handler() as handler:
handler_result = handler.read(value__lt=self.jk.id + 1)[0]
assert handler_result == self.jk
def test_read_index_less_than_equal(self):
with Author.objects.handler() as handler:
handler_result = handler.read(value__lte=self.jk.id)[0]
assert handler_result == self.jk
def test_read_index_greater_than_equal(self):
with Author.objects.handler() as handler:
handler_result = handler.read(value__gte=self.jk.id)[0]
assert handler_result == self.jk
def test_read_index_greater_than(self):
with Author.objects.handler() as handler:
handler_result = handler.read(value__gt=self.jk.id)[0]
assert handler_result == self.grisham
def test_read_index_too_many_filters(self):
with Author.objects.handler() as handler:
with pytest.raises(ValueError):
handler.read(value__lte=1, value__gte=1)
def test_read_index_bad_operator(self):
with Author.objects.handler() as handler:
with pytest.raises(ValueError):
handler.read(value__flange=1)
def test_read_bad_argument(self):
with Author.objects.handler() as handler:
with pytest.raises(ValueError):
handler.read(pk=1)
def test_read_bad_argument_underscores(self):
with Author.objects.handler() as handler:
with pytest.raises(ValueError):
handler.read(value_exact=1)
class HandlerIterTests(BaseAuthorTestCase):
def test_iter_all(self):
with Author.objects.handler() as handler:
seen_ids = [author.id for author in handler.iter()]
assert seen_ids == [self.jk.id, self.grisham.id]
def test_iter_chunk_size_1(self):
with Author.objects.handler() as handler:
seen_ids = [author.id for author in handler.iter(chunk_size=1)]
assert seen_ids == [self.jk.id, self.grisham.id]
def test_iter_reverse(self):
with Author.objects.handler() as handler:
seen_ids = [author.id for author in handler.iter(reverse=True)]
assert seen_ids == [self.grisham.id, self.jk.id]
def test_iter_reverse_chunk_size_1(self):
with Author.objects.handler() as handler:
seen_ids = [author.id for author in
handler.iter(chunk_size=1, reverse=True)]
assert seen_ids == [self.grisham.id, self.jk.id]
def test_bad_iter_unopened(self):
handler = Author.objects.all().handler()
with pytest.raises(RuntimeError):
sum(1 for x in handler.iter())
def test_iter_where_preset(self):
where_qs = Author.objects.filter(name__startswith='John')
with where_qs.handler() as handler:
seen_ids = [author.id for author in handler.iter()]
assert seen_ids == [self.grisham.id]
def test_iter_where_passed_in(self):
where_qs = Author.objects.filter(name__startswith='John')
with Author.objects.handler() as handler:
seen_ids = [author.id for author in handler.iter(where=where_qs)]
assert seen_ids == [self.grisham.id]
def test_iter_loses_its_place(self):
portis = Author.objects.create(name='Charles Portis')
with Author.objects.handler() as handler:
iterator = handler.iter(chunk_size=1)
assert next(iterator) == self.jk
assert next(iterator) == self.grisham
iterator2 = handler.iter(chunk_size=1, reverse=True)
assert next(iterator2) == portis
# This SHOULD be portis, but it thinks it's exhausted already
# because iterator2 reset the iteration in reverse, and now for the
# NEXT page after the LAST
with pytest.raises(StopIteration):
assert next(iterator) == portis
class HandlerMultipartIndexTests(TestCase):
def setUp(self):
super(HandlerMultipartIndexTests, self).setUp()
self.smith1 = AuthorMultiIndex.objects.create(name='John Smith',
country='Scotland')
self.smith2 = AuthorMultiIndex.objects.create(name='John Smith',
country='England')
self.index_name = index_name(AuthorMultiIndex, 'name', 'country')
def test_read_all(self):
with AuthorMultiIndex.objects.handler() as handler:
handler_all = list(handler.read(index=self.index_name, limit=2))
qs_all = list(AuthorMultiIndex.objects.order_by('name', 'country'))
assert handler_all == qs_all
def test_read_index_multipart(self):
with AuthorMultiIndex.objects.handler() as handler:
value = ('John Smith', 'England')
result = handler.read(index=self.index_name, value=value)[0]
assert result == self.smith2
class HandlerNestingTests(BaseAuthorTestCase):
def setUp(self):
super(HandlerNestingTests, self).setUp()
self.jk_name = NameAuthor.objects.create(name='JK Rowling')
self.grisham_name = NameAuthor.objects.create(name='John Grisham')
def test_can_nest(self):
ahandler = Author.objects.handler()
bhandler = NameAuthor.objects.handler()
with ahandler, bhandler:
handler_plain = ahandler.read()[0]
handler_name = bhandler.read()[0]
assert handler_plain == self.jk
assert handler_name == self.jk_name
def test_can_nest_two_for_same_table(self):
ahandler = Author.objects.handler()
bhandler = Author.objects.handler()
with ahandler, bhandler:
first = ahandler.read()[0]
second = bhandler.read()[0]
assert first == self.jk
assert second == self.jk
class HandlerStandaloneTests(TestCase):
def setUp(self):
super(HandlerStandaloneTests, self).setUp()
self.jk = VanillaAuthor.objects.create(name='JK Rowling')
self.grisham = VanillaAuthor.objects.create(name='John Grisham')
def test_vanilla_works(self):
handler = Handler(VanillaAuthor.objects.all())
with handler:
first = handler.read()[0]
assert first == self.jk
def test_vanilla_filters(self):
qs = VanillaAuthor.objects.filter(name__startswith='John')
with Handler(qs) as handler:
first = handler.read()[0]
assert first == self.grisham
class HandlerMultiDBTests(TestCase):
def setUp(self):
super(HandlerMultiDBTests, self).setUp()
self.jk = Author.objects.using('other').create(name='JK Rowling')
self.grisham = Author.objects.create(name='John Grisham')
def test_queryset_db_is_used(self):
handler = Author.objects.using('other').handler()
with handler:
handler_result = handler.read()[0]
assert handler_result == self.jk
|
|
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
## Originally zExceptions.ExceptionFormatter from Zope;
## Modified by Ian Bicking, Imaginary Landscape, 2005
"""
An exception collector that finds traceback information plus
supplements
"""
import sys
import traceback
import time
from six.moves import cStringIO as StringIO
import linecache
from paste.exceptions import serial_number_generator
import warnings
DEBUG_EXCEPTION_FORMATTER = True
DEBUG_IDENT_PREFIX = 'E-'
FALLBACK_ENCODING = 'UTF-8'
__all__ = ['collect_exception', 'ExceptionCollector']
class ExceptionCollector(object):
"""
Produces a data structure that can be used by formatters to
display exception reports.
Magic variables:
If you define one of these variables in your local scope, you can
add information to tracebacks that happen in that context. This
allows applications to add all sorts of extra information about
the context of the error, including URLs, environmental variables,
users, hostnames, etc. These are the variables we look for:
``__traceback_supplement__``:
You can define this locally or globally (unlike all the other
variables, which must be defined locally).
``__traceback_supplement__`` is a tuple of ``(factory, arg1,
arg2...)``. When there is an exception, ``factory(arg1, arg2,
...)`` is called, and the resulting object is inspected for
supplemental information.
``__traceback_info__``:
This information is added to the traceback, usually fairly
literally.
``__traceback_hide__``:
If set and true, this indicates that the frame should be
hidden from abbreviated tracebacks. This way you can hide
some of the complexity of the larger framework and let the
user focus on their own errors.
By setting it to ``'before'``, all frames before this one will
be thrown away. By setting it to ``'after'`` then all frames
after this will be thrown away until ``'reset'`` is found. In
each case the frame where it is set is included, unless you
append ``'_and_this'`` to the value (e.g.,
``'before_and_this'``).
Note that formatters will ignore this entirely if the frame
that contains the error wouldn't normally be shown according
to these rules.
``__traceback_reporter__``:
This should be a reporter object (see the reporter module),
or a list/tuple of reporter objects. All reporters found this
way will be given the exception, innermost first.
``__traceback_decorator__``:
This object (defined in a local or global scope) will get the
result of this function (the CollectedException defined
below). It may modify this object in place, or return an
entirely new object. This gives the object the ability to
manipulate the traceback arbitrarily.
The actually interpretation of these values is largely up to the
reporters and formatters.
``collect_exception(*sys.exc_info())`` will return an object with
several attributes:
``frames``:
A list of frames
``exception_formatted``:
The formatted exception, generally a full traceback
``exception_type``:
The type of the exception, like ``ValueError``
``exception_value``:
The string value of the exception, like ``'x not in list'``
``identification_code``:
A hash of the exception data meant to identify the general
exception, so that it shares this code with other exceptions
that derive from the same problem. The code is a hash of
all the module names and function names in the traceback,
plus exception_type. This should be shown to users so they
can refer to the exception later. (@@: should it include a
portion that allows identification of the specific instance
of the exception as well?)
The list of frames goes innermost first. Each frame has these
attributes; some values may be None if they could not be
determined.
``modname``:
the name of the module
``filename``:
the filename of the module
``lineno``:
the line of the error
``revision``:
the contents of __version__ or __revision__
``name``:
the function name
``supplement``:
an object created from ``__traceback_supplement__``
``supplement_exception``:
a simple traceback of any exception ``__traceback_supplement__``
created
``traceback_info``:
the str() of any ``__traceback_info__`` variable found in the local
scope (@@: should it str()-ify it or not?)
``traceback_hide``:
the value of any ``__traceback_hide__`` variable
``traceback_log``:
the value of any ``__traceback_log__`` variable
``__traceback_supplement__`` is thrown away, but a fixed
set of attributes are captured; each of these attributes is
optional.
``object``:
the name of the object being visited
``source_url``:
the original URL requested
``line``:
the line of source being executed (for interpreters, like ZPT)
``column``:
the column of source being executed
``expression``:
the expression being evaluated (also for interpreters)
``warnings``:
a list of (string) warnings to be displayed
``getInfo``:
a function/method that takes no arguments, and returns a string
describing any extra information
``extraData``:
a function/method that takes no arguments, and returns a
dictionary. The contents of this dictionary will not be
displayed in the context of the traceback, but globally for
the exception. Results will be grouped by the keys in the
dictionaries (which also serve as titles). The keys can also
be tuples of (importance, title); in this case the importance
should be ``important`` (shows up at top), ``normal`` (shows
up somewhere; unspecified), ``supplemental`` (shows up at
bottom), or ``extra`` (shows up hidden or not at all).
These are used to create an object with attributes of the same
names (``getInfo`` becomes a string attribute, not a method).
``__traceback_supplement__`` implementations should be careful to
produce values that are relatively static and unlikely to cause
further errors in the reporting system -- any complex
introspection should go in ``getInfo()`` and should ultimately
return a string.
Note that all attributes are optional, and under certain
circumstances may be None or may not exist at all -- the collector
can only do a best effort, but must avoid creating any exceptions
itself.
Formatters may want to use ``__traceback_hide__`` as a hint to
hide frames that are part of the 'framework' or underlying system.
There are a variety of rules about special values for this
variables that formatters should be aware of.
TODO:
More attributes in __traceback_supplement__? Maybe an attribute
that gives a list of local variables that should also be
collected? Also, attributes that would be explicitly meant for
the entire request, not just a single frame. Right now some of
the fixed set of attributes (e.g., source_url) are meant for this
use, but there's no explicit way for the supplement to indicate
new values, e.g., logged-in user, HTTP referrer, environment, etc.
Also, the attributes that do exist are Zope/Web oriented.
More information on frames? cgitb, for instance, produces
extensive information on local variables. There exists the
possibility that getting this information may cause side effects,
which can make debugging more difficult; but it also provides
fodder for post-mortem debugging. However, the collector is not
meant to be configurable, but to capture everything it can and let
the formatters be configurable. Maybe this would have to be a
configuration value, or maybe it could be indicated by another
magical variable (which would probably mean 'show all local
variables below this frame')
"""
show_revisions = 0
def __init__(self, limit=None):
self.limit = limit
def getLimit(self):
limit = self.limit
if limit is None:
limit = getattr(sys, 'tracebacklimit', None)
return limit
def getRevision(self, globals):
if not self.show_revisions:
return None
revision = globals.get('__revision__', None)
if revision is None:
# Incorrect but commonly used spelling
revision = globals.get('__version__', None)
if revision is not None:
try:
revision = str(revision).strip()
except:
revision = '???'
return revision
def collectSupplement(self, supplement, tb):
result = {}
for name in ('object', 'source_url', 'line', 'column',
'expression', 'warnings'):
result[name] = getattr(supplement, name, None)
func = getattr(supplement, 'getInfo', None)
if func:
result['info'] = func()
else:
result['info'] = None
func = getattr(supplement, 'extraData', None)
if func:
result['extra'] = func()
else:
result['extra'] = None
return SupplementaryData(**result)
def collectLine(self, tb, extra_data):
f = tb.tb_frame
lineno = tb.tb_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
globals = f.f_globals
locals = f.f_locals
if not hasattr(locals, 'has_key'):
# Something weird about this frame; it's not a real dict
warnings.warn(
"Frame %s has an invalid locals(): %r" % (
globals.get('__name__', 'unknown'), locals))
locals = {}
data = {}
data['modname'] = globals.get('__name__', None)
data['filename'] = filename
data['lineno'] = lineno
data['revision'] = self.getRevision(globals)
data['name'] = name
data['tbid'] = id(tb)
# Output a traceback supplement, if any.
if '__traceback_supplement__' in locals:
# Use the supplement defined in the function.
tbs = locals['__traceback_supplement__']
elif '__traceback_supplement__' in globals:
# Use the supplement defined in the module.
# This is used by Scripts (Python).
tbs = globals['__traceback_supplement__']
else:
tbs = None
if tbs is not None:
factory = tbs[0]
args = tbs[1:]
try:
supp = factory(*args)
data['supplement'] = self.collectSupplement(supp, tb)
if data['supplement'].extra:
for key, value in data['supplement'].extra.items():
extra_data.setdefault(key, []).append(value)
except:
if DEBUG_EXCEPTION_FORMATTER:
out = StringIO()
traceback.print_exc(file=out)
text = out.getvalue()
data['supplement_exception'] = text
# else just swallow the exception.
try:
tbi = locals.get('__traceback_info__', None)
if tbi is not None:
data['traceback_info'] = str(tbi)
except:
pass
marker = []
for name in ('__traceback_hide__', '__traceback_log__',
'__traceback_decorator__'):
try:
tbh = locals.get(name, globals.get(name, marker))
if tbh is not marker:
data[name[2:-2]] = tbh
except:
pass
return data
def collectExceptionOnly(self, etype, value):
return traceback.format_exception_only(etype, value)
def collectException(self, etype, value, tb, limit=None):
# The next line provides a way to detect recursion.
__exception_formatter__ = 1
frames = []
ident_data = []
traceback_decorators = []
if limit is None:
limit = self.getLimit()
n = 0
extra_data = {}
while tb is not None and (limit is None or n < limit):
if tb.tb_frame.f_locals.get('__exception_formatter__'):
# Stop recursion. @@: should make a fake ExceptionFrame
frames.append('(Recursive formatException() stopped)\n')
break
data = self.collectLine(tb, extra_data)
frame = ExceptionFrame(**data)
frames.append(frame)
if frame.traceback_decorator is not None:
traceback_decorators.append(frame.traceback_decorator)
ident_data.append(frame.modname or '?')
ident_data.append(frame.name or '?')
tb = tb.tb_next
n = n + 1
ident_data.append(str(etype))
ident = serial_number_generator.hash_identifier(
' '.join(ident_data), length=5, upper=True,
prefix=DEBUG_IDENT_PREFIX)
result = CollectedException(
frames=frames,
exception_formatted=self.collectExceptionOnly(etype, value),
exception_type=etype,
exception_value=self.safeStr(value),
identification_code=ident,
date=time.localtime(),
extra_data=extra_data)
if etype is ImportError:
extra_data[('important', 'sys.path')] = [sys.path]
for decorator in traceback_decorators:
try:
new_result = decorator(result)
if new_result is not None:
result = new_result
except:
pass
return result
def safeStr(self, obj):
try:
return str(obj)
except UnicodeEncodeError:
try:
return unicode(obj).encode(FALLBACK_ENCODING, 'replace')
except UnicodeEncodeError:
# This is when something is really messed up, but this can
# happen when the __str__ of an object has to handle unicode
return repr(obj)
limit = 200
class Bunch(object):
"""
A generic container
"""
def __init__(self, **attrs):
for name, value in attrs.items():
setattr(self, name, value)
def __repr__(self):
name = '<%s ' % self.__class__.__name__
name += ' '.join(['%s=%r' % (name, str(value)[:30])
for name, value in self.__dict__.items()
if not name.startswith('_')])
return name + '>'
class CollectedException(Bunch):
"""
This is the result of collection the exception; it contains copies
of data of interest.
"""
# A list of frames (ExceptionFrame instances), innermost last:
frames = []
# The result of traceback.format_exception_only; this looks
# like a normal traceback you'd see in the interactive interpreter
exception_formatted = None
# The *string* representation of the type of the exception
# (@@: should we give the # actual class? -- we can't keep the
# actual exception around, but the class should be safe)
# Something like 'ValueError'
exception_type = None
# The string representation of the exception, from ``str(e)``.
exception_value = None
# An identifier which should more-or-less classify this particular
# exception, including where in the code it happened.
identification_code = None
# The date, as time.localtime() returns:
date = None
# A dictionary of supplemental data:
extra_data = {}
class SupplementaryData(Bunch):
"""
The result of __traceback_supplement__. We don't keep the
supplement object around, for fear of GC problems and whatnot.
(@@: Maybe I'm being too superstitious about copying only specific
information over)
"""
# These attributes are copied from the object, or left as None
# if the object doesn't have these attributes:
object = None
source_url = None
line = None
column = None
expression = None
warnings = None
# This is the *return value* of supplement.getInfo():
info = None
class ExceptionFrame(Bunch):
"""
This represents one frame of the exception. Each frame is a
context in the call stack, typically represented by a line
number and module name in the traceback.
"""
# The name of the module; can be None, especially when the code
# isn't associated with a module.
modname = None
# The filename (@@: when no filename, is it None or '?'?)
filename = None
# Line number
lineno = None
# The value of __revision__ or __version__ -- but only if
# show_revision = True (by defaut it is false). (@@: Why not
# collect this?)
revision = None
# The name of the function with the error (@@: None or '?' when
# unknown?)
name = None
# A SupplementaryData object, if __traceback_supplement__ was found
# (and produced no errors)
supplement = None
# If accessing __traceback_supplement__ causes any error, the
# plain-text traceback is stored here
supplement_exception = None
# The str() of any __traceback_info__ value found
traceback_info = None
# The value of __traceback_hide__
traceback_hide = False
# The value of __traceback_decorator__
traceback_decorator = None
# The id() of the traceback scope, can be used to reference the
# scope for use elsewhere
tbid = None
def get_source_line(self, context=0):
"""
Return the source of the current line of this frame. You
probably want to .strip() it as well, as it is likely to have
leading whitespace.
If context is given, then that many lines on either side will
also be returned. E.g., context=1 will give 3 lines.
"""
if not self.filename or not self.lineno:
return None
lines = []
for lineno in range(self.lineno-context, self.lineno+context+1):
lines.append(linecache.getline(self.filename, lineno))
return ''.join(lines)
if hasattr(sys, 'tracebacklimit'):
limit = min(limit, sys.tracebacklimit)
col = ExceptionCollector()
def collect_exception(t, v, tb, limit=None):
"""
Collection an exception from ``sys.exc_info()``.
Use like::
try:
blah blah
except:
exc_data = collect_exception(*sys.exc_info())
"""
return col.collectException(t, v, tb, limit=limit)
|
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Admin model views for PersistentIdentifier."""
import uuid
from flask import current_app, flash, url_for
from flask_admin.actions import action
from flask_admin.contrib.sqla import ModelView
from flask_wtf import FlaskForm
from invenio_admin.filters import FilterConverter
from invenio_admin.forms import LazyChoices
from markupsafe import Markup
from wtforms.validators import ValidationError
from .models import Bucket, FileInstance, Location, MultipartObject, \
ObjectVersion, slug_pattern
from .tasks import verify_checksum
def _(x):
"""Identity function for string extraction."""
return x
def require_slug(form, field):
"""Validate location name."""
if not slug_pattern.match(field.data):
raise ValidationError(_("Invalid location name."))
def link(text, link_func):
"""Generate a object formatter for links.."""
def object_formatter(v, c, m, p):
"""Format object view link."""
return Markup('<a href="{0}">{1}</a>'.format(
link_func(m), text))
return object_formatter
class LocationModelView(ModelView):
"""ModelView for the locations."""
filter_converter = FilterConverter()
can_create = True
can_edit = False
can_delete = True
can_view_details = True
column_formatters = dict(
buckets=link('Buckets', lambda o: url_for(
'bucket.index_view', flt2_2=o.name))
)
column_details_list = (
'name', 'uri', 'default', 'created', 'updated', 'buckets')
column_list = ('name', 'uri', 'default', 'created', 'updated', 'buckets')
column_labels = dict(
id=_('ID'),
uri=_('URI'),
)
column_filters = ('default', 'created', 'updated', )
column_searchable_list = ('uri', 'name')
column_default_sort = 'name'
form_base_class = FlaskForm
form_columns = ('name', 'uri', 'default')
form_args = dict(
name=dict(validators=[require_slug])
)
page_size = 25
class BucketModelView(ModelView):
"""ModelView for the buckets."""
filter_converter = FilterConverter()
can_create = True
can_delete = False
can_edit = True
can_view_details = True
column_formatters = dict(
objects=link('Objects', lambda o: url_for(
'objectversion.index_view', flt0_0=o.id, flt1_37=1, sort=1)),
object_versions=link('Object Versions', lambda o: url_for(
'objectversion.index_view', flt0_0=o.id, flt1_29=1, sort=1)),
)
column_details_list = (
'id', 'location', 'default_storage_class', 'deleted', 'locked', 'size',
'quota_size', 'max_file_size', 'created', 'updated', 'objects',
'object_versions',
)
column_list = (
'id', 'location', 'default_storage_class', 'deleted', 'locked', 'size',
'quota_size', 'created', 'updated', 'objects',
)
column_labels = dict(
id=_('UUID'),
default_location=_('Location'),
pid_provider=_('Storage Class'),
)
column_filters = (
# Change of order affects Location.column_formatters!
'location.name', 'default_storage_class', 'deleted', 'locked', 'size',
'created', 'updated',
)
column_default_sort = ('updated', True)
form_base_class = FlaskForm
form_columns = (
'default_storage_class', 'locked', 'deleted', 'quota_size',
'max_file_size'
)
form_choices = dict(
default_storage_class=LazyChoices(lambda: current_app.config[
'FILES_REST_STORAGE_CLASS_LIST'].items()))
page_size = 25
class ObjectModelView(ModelView):
"""ModelView for the objects."""
filter_converter = FilterConverter()
can_create = False
can_edit = False
can_delete = False
can_view_details = True
column_formatters = dict(
file_instance=link('File', lambda o: url_for(
'fileinstance.index_view', flt0_0=o.file_id)),
versions=link('Versions', lambda o: url_for(
'objectversion.index_view',
sort=7, desc=1, flt0_0=o.bucket_id, flt1_29=o.key)),
bucket_objs=link('Objects', lambda o: url_for(
'objectversion.index_view',
flt0_0=o.bucket_id, flt1_37=1, sort=1)),
)
column_labels = {
'version_id': _('Version'),
'file_id': _('File UUID'),
'file.uri': _('URI'),
'file.size': _('Size'),
'is_deleted': _('Deleted'),
'file.checksum': _('Checksum'),
'file.readable': _('Readable'),
'file.storage_class': _('Storage class'),
'bucket_objs': _("Objects"),
'file_instance': _("File"),
}
column_list = (
'bucket', 'key', 'version_id', 'file.uri', 'is_head', 'is_deleted',
'file.size', 'created', 'updated', 'versions', 'bucket_objs',
'file_instance')
column_searchable_list = ('key', )
column_details_list = (
'bucket', 'key', 'version_id', 'file_id', 'file.uri', 'file.checksum',
'file.size', 'file.storage_class', 'is_head', 'is_deleted', 'created',
'updated', 'file_instance', 'versions')
column_filters = (
# Order affects column_formatters in other model views.
'bucket.id', 'bucket.locked', 'bucket.deleted', 'bucket.location.name',
'file_id', 'file.checksum', 'file.storage_class', 'file.readable',
'key', 'version_id', 'is_head', 'file.size', 'created', 'updated', )
column_sortable_list = (
'key', 'file.uri', 'is_head', 'file.size', 'created', 'updated')
column_default_sort = ('updated', True)
page_size = 25
class FileInstanceModelView(ModelView):
"""ModelView for the objects."""
filter_converter = FilterConverter()
can_create = False
can_edit = False
can_delete = False
can_view_details = True
column_formatters = dict(
objects=link('Objects', lambda o: url_for(
'objectversion.index_view', flt3_12=o.id)),
)
column_labels = dict(
id=_('ID'),
uri=_('URI'),
last_check=_('Fixity'),
last_check_at=_('Checked'),
)
column_list = (
'id', 'uri', 'storage_class', 'size', 'checksum', 'readable',
'writable', 'last_check', 'last_check_at', 'created', 'updated',
'objects')
column_searchable_list = ('uri', 'size', 'checksum', )
column_details_list = (
'id', 'uri', 'storage_class', 'size', 'checksum', 'readable',
'writable', 'last_check', 'last_check_at', 'created', 'updated',
'objects')
column_filters = (
'id', 'uri', 'storage_class', 'size', 'checksum', 'readable',
'writable', 'last_check', 'last_check_at', 'created', 'updated',
'objects')
column_default_sort = ('updated', True)
page_size = 25
@action('verify_checksum', _('Run fixity check'))
def action_verify_checksum(self, ids):
"""Inactivate users."""
try:
count = 0
for file_id in ids:
f = FileInstance.query.filter_by(
id=uuid.UUID(file_id)).one_or_none()
if f is None:
raise ValueError(_("Cannot find file instance."))
verify_checksum.delay(file_id)
count += 1
if count > 0:
flash(_('Fixity check(s) sent to queue.'), 'success')
except Exception as exc:
if not self.handle_view_exception(exc):
raise
current_app.logger.exception(str(exc)) # pragma: no cover
flash(_('Failed to run fixity checks.'),
'error') # pragma: no cover
class MultipartObjectModelView(ModelView):
"""ModelView for the objects."""
filter_converter = FilterConverter()
can_create = False
can_edit = False
can_delete = False
can_view_details = True
column_formatters = dict(
file_instance=link('File', lambda o: url_for(
'fileinstance.index_view', flt0_0=o.file_id)),
)
column_labels = dict(
id=_('ID'),
completed=_('Complete'),
file_instance=_('File'),
)
column_list = (
'upload_id', 'completed', 'created', 'updated', 'file_instance', )
column_details_list = (
'upload_id', 'completed', 'created', 'updated', 'file_instance', )
column_filters = (
'upload_id', 'completed', 'created', 'updated', )
column_default_sort = ('upload_id', True)
page_size = 25
location_adminview = dict(
modelview=LocationModelView,
model=Location,
category=_('Files'))
bucket_adminview = dict(
modelview=BucketModelView,
model=Bucket,
category=_('Files'))
object_adminview = dict(
modelview=ObjectModelView,
model=ObjectVersion,
category=_('Files'))
fileinstance_adminview = dict(
modelview=FileInstanceModelView,
model=FileInstance,
category=_('Files'))
multipartobject_adminview = dict(
modelview=MultipartObjectModelView,
model=MultipartObject,
category=_('Files'))
|
|
#!/usr/bin/env python
# external
from sympy import sin, cos, symbols
from sympy.physics.mechanics import (dynamicsymbols, ReferenceFrame, Point,
RigidBody, Particle, inertia)
from numpy import radians
from numpy.testing import assert_allclose
# local
from ..shapes import Cylinder
from ..visualization_frame import VisualizationFrame
from ..camera import PerspectiveCamera, OrthoGraphicCamera
from ..scene import Scene
from ..light import PointLight
class TestVisualizationFrameScene(object):
def __init__(self):
#We define some quantities required for tests here..
self.p = dynamicsymbols('p:3')
self.q = dynamicsymbols('q:3')
self.dynamic = list(self.p) + list(self.q)
self.states = [radians(45) for x in self.p] + \
[radians(30) for x in self.q]
self.I = ReferenceFrame('I')
self.A = self.I.orientnew('A', 'space', self.p, 'XYZ')
self.B = self.A.orientnew('B', 'space', self.q, 'XYZ')
self.O = Point('O')
self.P1 = self.O.locatenew('P1', 10 * self.I.x + \
10 * self.I.y + 10 * self.I.z)
self.P2 = self.P1.locatenew('P2', 10 * self.I.x + \
10 * self.I.y + 10 * self.I.z)
self.point_list1 = [[2, 3, 1], [4, 6, 2], [5, 3, 1], [5, 3, 6]]
self.point_list2 = [[3, 1, 4], [3, 8, 2], [2, 1, 6], [2, 1, 1]]
self.shape1 = Cylinder(1.0, 1.0)
self.shape2 = Cylinder(1.0, 1.0)
self.Ixx, self.Iyy, self.Izz = symbols('Ixx Iyy Izz')
self.mass = symbols('mass')
self.parameters = [self.Ixx, self.Iyy, self.Izz, self.mass]
self.param_vals = [0, 0, 0, 0]
self.inertia = inertia(self.A, self.Ixx, self.Iyy, self.Izz)
self.rigid_body = RigidBody('rigid_body1', self.P1, self.A, \
self.mass, (self.inertia, self.P1))
self.global_frame1 = VisualizationFrame('global_frame1', \
self.A, self.P1, self.shape1)
self.global_frame2 = VisualizationFrame('global_frame2', \
self.B, self.P2, self.shape2)
self.scene1 = Scene(self.I, self.O, \
(self.global_frame1, self.global_frame2), \
name='scene')
self.particle = Particle('particle1', self.P1, self.mass)
#To make it more readable
p = self.p
q = self.q
#Here is the dragon ..
self.transformation_matrix = \
[[cos(p[1])*cos(p[2]), sin(p[2])*cos(p[1]), -sin(p[1]), 0], \
[sin(p[0])*sin(p[1])*cos(p[2]) - sin(p[2])*cos(p[0]), \
sin(p[0])*sin(p[1])*sin(p[2]) + cos(p[0])*cos(p[2]), \
sin(p[0])*cos(p[1]), 0], \
[sin(p[0])*sin(p[2]) + sin(p[1])*cos(p[0])*cos(p[2]), \
-sin(p[0])*cos(p[2]) + sin(p[1])*sin(p[2])*cos(p[0]), \
cos(p[0])*cos(p[1]), 0], \
[10, 10, 10, 1]]
def test_vframe_with_rframe(self):
self.frame1 = VisualizationFrame('frame1', self.I, self.O, \
self.shape1)
assert self.frame1.name == 'frame1'
assert self.frame1.reference_frame == self.I
assert self.frame1.origin == self.O
assert self.frame1.shape is self.shape1
self.frame1.name = 'frame1_'
assert self.frame1.name == 'frame1_'
self.frame1.reference_frame = self.A
assert self.frame1.reference_frame == self.A
self.frame1.origin = self.P1
assert self.frame1.origin == self.P1
self.frame1.shape = self.shape2
assert self.frame1.shape is self.shape2
assert self.frame1.generate_transformation_matrix(self.I, self.O).tolist() == \
self.transformation_matrix
def test_vframe_with_rbody(self):
self.frame2 = VisualizationFrame('frame2', self.rigid_body, \
self.shape1)
assert self.frame2.name == 'frame2'
assert self.frame2.reference_frame == self.A
assert self.frame2.origin == self.P1
assert self.frame2.shape == self.shape1
self.frame2.name = 'frame2_'
assert self.frame2.name == 'frame2_'
self.frame2.reference_frame = self.B
assert self.frame2.reference_frame == self.B
self.frame2.origin = self.P2
assert self.frame2.origin == self.P2
self.frame2.shape = self.shape2
assert self.frame2.shape is self.shape2
self.frame2.reference_frame = self.A
self.frame2.origin = self.P1
assert self.frame2.generate_transformation_matrix(self.I, self.O).tolist() == \
self.transformation_matrix
def test_vframe_with_particle(self):
self.frame3 = VisualizationFrame('frame3', \
self.A, self.particle, \
self.shape1)
assert self.frame3.name == 'frame3'
assert self.frame3.reference_frame == self.A
assert self.frame3.origin == self.P1
assert self.frame3.shape is self.shape1
self.frame3.name = 'frame3_'
assert self.frame3.name == 'frame3_'
self.frame3.reference_frame = self.B
assert self.frame3.reference_frame == self.B
self.frame3.origin = self.P2
assert self.frame3.origin == self.P2
self.frame3.shape = self.shape2
assert self.frame3.shape is self.shape2
self.frame3.reference_frame = self.A
self.frame3.origin = self.P1
assert self.frame3.generate_transformation_matrix(self.I, self.O).tolist() == \
self.transformation_matrix
def test_vframe_without_name(self):
self.frame4 = VisualizationFrame(self.I, self.O, \
self.shape1)
assert self.frame4.name == 'unnamed'
#To check if referenceframe and origin are defined
#properly without name arg
assert self.frame4.reference_frame == self.I
assert self.frame4.origin == self.O
assert self.frame4.shape is self.shape1
self.frame4.name = 'frame1_'
assert self.frame4.name == 'frame1_'
def test_numeric_transform(self):
self.list1 = [[0.5000000000000001, 0.5, \
-0.7071067811865475, 0.0, \
-0.14644660940672627, 0.8535533905932737, \
0.5, 0.0, \
0.8535533905932737, -0.14644660940672627, \
0.5000000000000001, 0.0, \
10.0, 10.0, 10.0, 1.0]]
self.list2 = [[-0.11518993731879767, 0.8178227645734215, \
-0.563823734943801, 0.0, \
0.1332055011661179, 0.5751927992738988, \
0.8070994598700584, 0.0, \
0.984371663956036, 0.017865313009926137, \
-0.17519491371464685, 0.0, \
20.0, 20.0, 20.0, 1.0]]
self.global_frame1.generate_transformation_matrix(self.I, self.O)
self.global_frame1.generate_numeric_transform_function(self.dynamic, \
self.parameters)
assert_allclose(self.global_frame1.\
evaluate_transformation_matrix(self.states, \
self.param_vals), self.list1)
self.global_frame2.generate_transformation_matrix(self.I, self.O)
self.global_frame2.generate_numeric_transform_function(self.dynamic, \
self.parameters)
assert_allclose(self.global_frame2.\
evaluate_transformation_matrix(self.states, \
self.param_vals), self.list2)
def test_perspective_camera(self):
#Camera is a subclass of VisualizationFrame, but without any
#specific shape attached. We supply only ReferenceFrame,Point
#to camera. and it inherits methods from VisualizationFrame
#Testing with rigid-body ..
camera = PerspectiveCamera('camera', self.rigid_body, fov=45, \
near=1, far=1000)
assert camera.name == 'camera'
assert camera.reference_frame == self.A
assert camera.origin == self.P1
assert camera.fov == 45
assert camera.near == 1
assert camera.far == 1000
#Testing with reference_frame, particle ..
camera = PerspectiveCamera('camera', self.I, self.particle, \
fov=45, near=1, far=1000)
assert camera.name == 'camera'
assert camera.reference_frame == self.I
assert camera.origin == self.P1
assert camera.fov == 45
assert camera.near == 1
assert camera.far == 1000
#Testing with reference_frame, point ..
camera = PerspectiveCamera('camera', self.I, self.O, \
fov=45, near=1, far=1000)
assert camera.name == 'camera'
assert camera.reference_frame == self.I
assert camera.origin == self.O
assert camera.fov == 45
assert camera.near == 1
assert camera.far == 1000
camera.name = 'camera1'
assert camera.name == 'camera1'
assert camera.__str__() == 'PerspectiveCamera: camera1'
assert camera.__repr__() == 'PerspectiveCamera'
camera.reference_frame = self.A
assert camera.reference_frame == self.A
camera.origin = self.P1
assert camera.origin == self.P1
camera.fov = 30
assert camera.fov == 30
camera.near = 10
assert camera.near == 10
camera.far = 500
assert camera.far == 500
#Test unnamed
camera1 = PerspectiveCamera(self.I, self.O)
assert camera1.name == 'unnamed'
assert camera1.reference_frame == self.I
assert camera1.origin == self.O
assert camera1.fov == 45
assert camera1.near == 1
assert camera1.far == 1000
def test_orthographic_camera(self):
#As compared to Perspective Camera, Orthographic Camera doesnt
#have fov, instead the left,right,top and bottom faces are
#adjusted by the Scene width, and height
#Testing with rigid_body
camera = OrthoGraphicCamera('camera', self.rigid_body, \
near=1, far=1000)
assert camera.name == 'camera'
assert camera.reference_frame == self.A
assert camera.origin == self.P1
assert camera.near == 1
assert camera.far == 1000
#Testing with reference_frame, particle
camera = OrthoGraphicCamera('camera', self.I, self.particle, \
near=1, far=1000)
assert camera.name == 'camera'
assert camera.reference_frame == self.I
assert camera.origin == self.P1
assert camera.near == 1
assert camera.far == 1000
#Testing with reference_frame, point ...
camera = OrthoGraphicCamera('camera', self.I, self.O, near=1, \
far=1000)
assert camera.name == 'camera'
assert camera.reference_frame == self.I
assert camera.origin == self.O
assert camera.near == 1
assert camera.far == 1000
camera.name = 'camera1'
assert camera.name == 'camera1'
assert camera.__str__() == 'OrthoGraphicCamera: camera1'
assert camera.__repr__() == 'OrthoGraphicCamera'
camera.reference_frame = self.A
assert camera.reference_frame == self.A
camera.origin = self.P1
assert camera.origin == self.P1
camera.near = 10
assert camera.near == 10
camera.far = 500
assert camera.far == 500
camera1 = OrthoGraphicCamera(self.I, self.O)
assert camera1.name == 'unnamed'
assert camera1.reference_frame == self.I
assert camera1.origin == self.O
assert camera1.near == 1
assert camera1.far == 1000
def test_point_light(self):
#Testing with rigid-body ..
light = PointLight('light', self.rigid_body, color='blue')
assert light.name == 'light'
assert light.reference_frame == self.A
assert light.origin == self.P1
assert light.color == 'blue'
#Testing with reference_frame, particle ..
light = PointLight('light', self.I, self.particle, color='blue')
assert light.name == 'light'
assert light.reference_frame == self.I
assert light.origin == self.P1
assert light.color == 'blue'
#Testing with reference_frame, point ..
light = PointLight('light', self.I, self.O, color='blue')
assert light.name == 'light'
assert light.reference_frame == self.I
assert light.origin == self.O
assert light.color == 'blue'
light.name = 'light1'
assert light.name == 'light1'
assert light.__str__() == 'PointLight: light1'
assert light.__repr__() == 'PointLight'
light.reference_frame = self.A
assert light.reference_frame == self.A
light.origin = self.P1
assert light.origin == self.P1
light.color = 'red'
assert light.color == 'red'
#Test unnamed
light1 = PointLight(self.I, self.O)
assert light1.name == 'unnamed'
assert light1.reference_frame == self.I
assert light1.origin == self.O
assert light1.color == 'white'
def test_scene_init(self):
self.scene2 = Scene(self.I, self.O, \
self.global_frame1, self.global_frame2, \
name='scene')
assert self.scene2.name == 'scene'
assert self.scene2.reference_frame == self.I
assert self.scene2.origin == self.O
assert self.scene2.visualization_frames[0] is self.global_frame1
assert self.scene2.visualization_frames[1] is self.global_frame2
self.scene2.name = 'scene1'
assert self.scene2.name == 'scene1'
self.scene2.reference_frame = self.A
assert self.scene2.reference_frame == self.A
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Fixtures for Nova tests."""
from __future__ import absolute_import
import logging as std_logging
import os
import warnings
import fixtures
from oslo_config import cfg
from oslo_db.sqlalchemy import enginefacade
from oslo_messaging import conffixture as messaging_conffixture
import six
from nova.db import migration
from nova.db.sqlalchemy import api as session
from nova import exception
from nova.objects import base as obj_base
from nova import rpc
from nova import service
from nova.tests.functional.api import client
_TRUE_VALUES = ('True', 'true', '1', 'yes')
CONF = cfg.CONF
DB_SCHEMA = {'main': "", 'api': ""}
SESSION_CONFIGURED = False
class ServiceFixture(fixtures.Fixture):
"""Run a service as a test fixture."""
def __init__(self, name, host=None, **kwargs):
name = name
# If not otherwise specified, the host will default to the
# name of the service. Some things like aggregates care that
# this is stable.
host = host or name
kwargs.setdefault('host', host)
kwargs.setdefault('binary', 'compute-%s' % name)
self.kwargs = kwargs
def setUp(self):
super(ServiceFixture, self).setUp()
self.service = service.Service.create(**self.kwargs)
self.service.start()
self.addCleanup(self.service.kill)
class NullHandler(std_logging.Handler):
"""custom default NullHandler to attempt to format the record.
Used in conjunction with
log_fixture.get_logging_handle_error_fixture to detect formatting errors in
debug level logs without saving the logs.
"""
def handle(self, record):
self.format(record)
def emit(self, record):
pass
def createLock(self):
self.lock = None
class StandardLogging(fixtures.Fixture):
"""Setup Logging redirection for tests.
There are a number of things we want to handle with logging in tests:
* Redirect the logging to somewhere that we can test or dump it later.
* Ensure that as many DEBUG messages as possible are actually
executed, to ensure they are actually syntactically valid (they
often have not been).
* Ensure that we create useful output for tests that doesn't
overwhelm the testing system (which means we can't capture the
100 MB of debug logging on every run).
To do this we create a logger fixture at the root level, which
defaults to INFO and create a Null Logger at DEBUG which lets
us execute log messages at DEBUG but not keep the output.
To support local debugging OS_DEBUG=True can be set in the
environment, which will print out the full debug logging.
There are also a set of overrides for particularly verbose
modules to be even less than INFO.
"""
def setUp(self):
super(StandardLogging, self).setUp()
# set root logger to debug
root = std_logging.getLogger()
root.setLevel(std_logging.DEBUG)
# supports collecting debug level for local runs
if os.environ.get('OS_DEBUG') in _TRUE_VALUES:
level = std_logging.DEBUG
else:
level = std_logging.INFO
# Collect logs
fs = '%(asctime)s %(levelname)s [%(name)s] %(message)s'
self.logger = self.useFixture(
fixtures.FakeLogger(format=fs, level=None))
# TODO(sdague): why can't we send level through the fake
# logger? Tests prove that it breaks, but it's worth getting
# to the bottom of.
root.handlers[0].setLevel(level)
if level > std_logging.DEBUG:
# Just attempt to format debug level logs, but don't save them
handler = NullHandler()
self.useFixture(fixtures.LogHandler(handler, nuke_handlers=False))
handler.setLevel(std_logging.DEBUG)
# Don't log every single DB migration step
std_logging.getLogger(
'migrate.versioning.api').setLevel(std_logging.WARNING)
class OutputStreamCapture(fixtures.Fixture):
"""Capture output streams during tests.
This fixture captures errant printing to stderr / stdout during
the tests and lets us see those streams at the end of the test
runs instead. Useful to see what was happening during failed
tests.
"""
def setUp(self):
super(OutputStreamCapture, self).setUp()
if os.environ.get('OS_STDOUT_CAPTURE') in _TRUE_VALUES:
self.out = self.useFixture(fixtures.StringStream('stdout'))
self.useFixture(
fixtures.MonkeyPatch('sys.stdout', self.out.stream))
if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES:
self.err = self.useFixture(fixtures.StringStream('stderr'))
self.useFixture(
fixtures.MonkeyPatch('sys.stderr', self.err.stream))
@property
def stderr(self):
return self.err._details["stderr"].as_text()
@property
def stdout(self):
return self.out._details["stdout"].as_text()
class Timeout(fixtures.Fixture):
"""Setup per test timeouts.
In order to avoid test deadlocks we support setting up a test
timeout parameter read from the environment. In almost all
cases where the timeout is reached this means a deadlock.
A class level TIMEOUT_SCALING_FACTOR also exists, which allows
extremely long tests to specify they need more time.
"""
def __init__(self, timeout, scaling=1):
super(Timeout, self).__init__()
try:
self.test_timeout = int(timeout)
except ValueError:
# If timeout value is invalid do not set a timeout.
self.test_timeout = 0
if scaling >= 1:
self.test_timeout *= scaling
else:
raise ValueError('scaling value must be >= 1')
def setUp(self):
super(Timeout, self).setUp()
if self.test_timeout > 0:
self.useFixture(fixtures.Timeout(self.test_timeout, gentle=True))
class Database(fixtures.Fixture):
def __init__(self, database='main', connection=None):
"""Create a database fixture.
:param database: The type of database, 'main' or 'api'
:param connection: The connection string to use
"""
super(Database, self).__init__()
# NOTE(pkholkin): oslo_db.enginefacade is configured in tests the same
# way as it is done for any other service that uses db
global SESSION_CONFIGURED
if not SESSION_CONFIGURED:
session.configure(CONF)
SESSION_CONFIGURED = True
self.database = database
if database == 'main':
if connection is not None:
ctxt_mgr = session.create_context_manager(
connection=connection)
facade = ctxt_mgr.get_legacy_facade()
self.get_engine = facade.get_engine
else:
self.get_engine = session.get_engine
elif database == 'api':
self.get_engine = session.get_api_engine
def _cache_schema(self):
global DB_SCHEMA
if not DB_SCHEMA[self.database]:
engine = self.get_engine()
conn = engine.connect()
migration.db_sync(database=self.database)
DB_SCHEMA[self.database] = "".join(line for line
in conn.connection.iterdump())
engine.dispose()
def cleanup(self):
engine = self.get_engine()
engine.dispose()
def reset(self):
self._cache_schema()
engine = self.get_engine()
engine.dispose()
conn = engine.connect()
conn.connection.executescript(DB_SCHEMA[self.database])
def setUp(self):
super(Database, self).setUp()
self.reset()
self.addCleanup(self.cleanup)
class DatabaseAtVersion(fixtures.Fixture):
def __init__(self, version, database='main'):
"""Create a database fixture.
:param version: Max version to sync to (or None for current)
:param database: The type of database, 'main' or 'api'
"""
super(DatabaseAtVersion, self).__init__()
self.database = database
self.version = version
if database == 'main':
self.get_engine = session.get_engine
elif database == 'api':
self.get_engine = session.get_api_engine
def cleanup(self):
engine = self.get_engine()
engine.dispose()
def reset(self):
engine = self.get_engine()
engine.dispose()
engine.connect()
migration.db_sync(version=self.version, database=self.database)
def setUp(self):
super(DatabaseAtVersion, self).setUp()
self.reset()
self.addCleanup(self.cleanup)
class RPCFixture(fixtures.Fixture):
def __init__(self, *exmods):
super(RPCFixture, self).__init__()
self.exmods = []
self.exmods.extend(exmods)
def setUp(self):
super(RPCFixture, self).setUp()
self.addCleanup(rpc.cleanup)
rpc.add_extra_exmods(*self.exmods)
self.addCleanup(rpc.clear_extra_exmods)
self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
self.messaging_conf.transport_driver = 'fake'
self.useFixture(self.messaging_conf)
rpc.init(CONF)
class WarningsFixture(fixtures.Fixture):
"""Filters out warnings during test runs."""
def setUp(self):
super(WarningsFixture, self).setUp()
# NOTE(sdague): Make deprecation warnings only happen once. Otherwise
# this gets kind of crazy given the way that upstream python libs use
# this.
warnings.simplefilter("once", DeprecationWarning)
warnings.filterwarnings('ignore',
message='With-statements now directly support'
' multiple context managers')
self.addCleanup(warnings.resetwarnings)
class ConfPatcher(fixtures.Fixture):
"""Fixture to patch and restore global CONF.
This also resets overrides for everything that is patched during
it's teardown.
"""
def __init__(self, **kwargs):
"""Constructor
:params group: if specified all config options apply to that group.
:params **kwargs: the rest of the kwargs are processed as a
set of key/value pairs to be set as configuration override.
"""
super(ConfPatcher, self).__init__()
self.group = kwargs.pop('group', None)
self.args = kwargs
def setUp(self):
super(ConfPatcher, self).setUp()
for k, v in six.iteritems(self.args):
self.addCleanup(CONF.clear_override, k, self.group)
CONF.set_override(k, v, self.group)
class OSAPIFixture(fixtures.Fixture):
"""Create an OS API server as a fixture.
This spawns an OS API server as a fixture in a new greenthread in
the current test. The fixture has a .api paramenter with is a
simple rest client that can communicate with it.
This fixture is extremely useful for testing REST responses
through the WSGI stack easily in functional tests.
Usage:
api = self.useFixture(fixtures.OSAPIFixture()).api
resp = api.api_request('/someurl')
self.assertEqual(200, resp.status_code)
resp = api.api_request('/otherurl', method='POST', body='{foo}')
The resp is a requests library response. Common attributes that
you'll want to use are:
- resp.status_code - integer HTTP status code returned by the request
- resp.content - the body of the response
- resp.headers - dictionary of HTTP headers returned
"""
def __init__(self, api_version='v2',
project_id='6f70656e737461636b20342065766572'):
"""Constructor
:param api_version: the API version that we're interested in
using. Currently this expects 'v2' or 'v2.1' as possible
options.
:param project_id: the project id to use on the API.
"""
super(OSAPIFixture, self).__init__()
self.api_version = api_version
self.project_id = project_id
def setUp(self):
super(OSAPIFixture, self).setUp()
# in order to run these in tests we need to bind only to local
# host, and dynamically allocate ports
conf_overrides = {
'osapi_compute_listen': '127.0.0.1',
'metadata_listen': '127.0.0.1',
'osapi_compute_listen_port': 0,
'metadata_listen_port': 0,
'verbose': True,
'debug': True
}
self.useFixture(ConfPatcher(**conf_overrides))
self.osapi = service.WSGIService("osapi_compute")
self.osapi.start()
self.addCleanup(self.osapi.stop)
self.auth_url = 'http://%(host)s:%(port)s/%(api_version)s' % ({
'host': self.osapi.host, 'port': self.osapi.port,
'api_version': self.api_version})
self.api = client.TestOpenStackClient('fake', 'fake', self.auth_url,
self.project_id)
self.admin_api = client.TestOpenStackClient(
'admin', 'admin', self.auth_url, self.project_id)
class PoisonFunctions(fixtures.Fixture):
"""Poison functions so they explode if we touch them.
When running under a non full stack test harness there are parts
of the code that you don't want to go anywhere near. These include
things like code that spins up extra threads, which just
introduces races.
"""
def setUp(self):
super(PoisonFunctions, self).setUp()
# The compute libvirt driver starts an event thread which only
# causes trouble in tests. Make sure that if tests don't
# properly patch it the test explodes.
# explicit import because MonkeyPatch doesn't magic import
# correctly if we are patching a method on a class in a
# module.
import jacket.compute.virt.libvirt.host
def evloop(*args, **kwargs):
import sys
warnings.warn("Forgot to disable libvirt event thread")
sys.exit(1)
self.useFixture(fixtures.MonkeyPatch(
'jacket.compute.Host._init_events',
evloop))
class IndirectionAPIFixture(fixtures.Fixture):
"""Patch and restore the global NovaObject indirection api."""
def __init__(self, indirection_api):
"""Constructor
:param indirection_api: the indirection API to be used for tests.
"""
super(IndirectionAPIFixture, self).__init__()
self.indirection_api = indirection_api
def cleanup(self):
obj_base.NovaObject.indirection_api = self.orig_indirection_api
def setUp(self):
super(IndirectionAPIFixture, self).setUp()
self.orig_indirection_api = obj_base.NovaObject.indirection_api
obj_base.NovaObject.indirection_api = self.indirection_api
self.addCleanup(self.cleanup)
class _FakeGreenThread(object):
def __init__(self, func, *args, **kwargs):
self._result = func(*args, **kwargs)
def cancel(self, *args, **kwargs):
# This method doesn't make sense for a synchronous call, it's just
# defined to satisfy the interface.
pass
def kill(self, *args, **kwargs):
# This method doesn't make sense for a synchronous call, it's just
# defined to satisfy the interface.
pass
def link(self, func, *args, **kwargs):
func(self, *args, **kwargs)
def unlink(self, func, *args, **kwargs):
# This method doesn't make sense for a synchronous call, it's just
# defined to satisfy the interface.
pass
def wait(self):
return self._result
class SpawnIsSynchronousFixture(fixtures.Fixture):
"""Patch and restore the spawn_n utility method to be synchronous"""
def setUp(self):
super(SpawnIsSynchronousFixture, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
'compute.utils.spawn_n', _FakeGreenThread))
self.useFixture(fixtures.MonkeyPatch(
'compute.utils.spawn', _FakeGreenThread))
class BannedDBSchemaOperations(fixtures.Fixture):
"""Ban some operations for migrations"""
def __init__(self, banned_resources=None):
super(BannedDBSchemaOperations, self).__init__()
self._banned_resources = banned_resources or []
@staticmethod
def _explode(resource, op):
raise exception.DBNotAllowed(
'Operation %s.%s() is not allowed in a database migration' % (
resource, op))
def setUp(self):
super(BannedDBSchemaOperations, self).setUp()
for thing in self._banned_resources:
self.useFixture(fixtures.MonkeyPatch(
'sqlalchemy.%s.drop' % thing,
lambda *a, **k: self._explode(thing, 'drop')))
self.useFixture(fixtures.MonkeyPatch(
'sqlalchemy.%s.alter' % thing,
lambda *a, **k: self._explode(thing, 'alter')))
class StableObjectJsonFixture(fixtures.Fixture):
"""Fixture that makes sure we get stable JSON object representations.
Since objects contain things like set(), which can't be converted to
JSON, we have some situations where the representation isn't fully
deterministic. This doesn't matter at all at runtime, but does to
unit tests that try to assert things at a low level.
This fixture mocks the obj_to_primitive() call and makes sure to
sort the list of changed fields (which came from a set) before
returning it to the caller.
"""
def __init__(self):
self._original_otp = obj_base.NovaObject.obj_to_primitive
def setUp(self):
super(StableObjectJsonFixture, self).setUp()
def _doit(obj, *args, **kwargs):
result = self._original_otp(obj, *args, **kwargs)
if 'nova_object.changes' in result:
result['nova_object.changes'].sort()
return result
self.useFixture(fixtures.MonkeyPatch(
'compute.objects.base.NovaObject.obj_to_primitive', _doit))
class EngineFacadeFixture(fixtures.Fixture):
"""Fixture to isolation EngineFacade during tests.
Because many elements of EngineFacade are based on globals, once
an engine facade has been initialized, all future code goes
through it. This means that the initialization of sqlite in
databases in our Database fixture will drive all connections to
sqlite. While that's fine in a production environment, during
testing this means we can't test againts multiple backends in the
same test run.
oslo.db does not yet support a reset mechanism here. This builds a
custom in tree engine facade fixture to handle this. Eventually
this will be added to oslo.db and this can be removed. Tracked by
https://bugs.launchpad.net/oslo.db/+bug/1548960
"""
def __init__(self, ctx_manager, engine, sessionmaker):
super(EngineFacadeFixture, self).__init__()
self._ctx_manager = ctx_manager
self._engine = engine
self._sessionmaker = sessionmaker
def setUp(self):
super(EngineFacadeFixture, self).setUp()
self._existing_factory = self._ctx_manager._root_factory
self._ctx_manager._root_factory = enginefacade._TestTransactionFactory(
self._engine, self._sessionmaker, apply_global=False,
synchronous_reader=True)
self.addCleanup(self.cleanup)
def cleanup(self):
self._ctx_manager._root_factory = self._existing_factory
class ForbidNewLegacyNotificationFixture(fixtures.Fixture):
"""Make sure the test fails if new legacy notification is added"""
def __init__(self):
super(ForbidNewLegacyNotificationFixture, self).__init__()
self.notifier = rpc.LegacyValidatingNotifier
def setUp(self):
super(ForbidNewLegacyNotificationFixture, self).setUp()
self.notifier.fatal = True
# allow the special test value used in
# compute.tests.unit.test_notifications.NotificationsTestCase
self.notifier.allowed_legacy_notification_event_types.append(
'_decorated_function')
self.addCleanup(self.cleanup)
def cleanup(self):
self.notifier.fatal = False
self.notifier.allowed_legacy_notification_event_types.remove(
'_decorated_function')
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import ast
import itertools
import os
import pprint
import shutil
from abc import abstractmethod
from collections import OrderedDict, defaultdict
from pex.compatibility import string, to_bytes
from pex.installer import InstallerBase, Packager
from pex.interpreter import PythonInterpreter
from twitter.common.collections import OrderedSet
from twitter.common.dirutil.chroot import Chroot
from pants.backend.python.targets.python_binary import PythonBinary
from pants.backend.python.targets.python_requirement_library import PythonRequirementLibrary
from pants.backend.python.targets.python_target import PythonTarget
from pants.backend.python.tasks2.gather_sources import GatherSources
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TargetDefinitionException, TaskError
from pants.base.specs import SiblingAddresses
from pants.build_graph.address_lookup_error import AddressLookupError
from pants.build_graph.build_graph import sort_targets
from pants.build_graph.resources import Resources
from pants.task.task import Task
from pants.util.dirutil import safe_rmtree, safe_walk
from pants.util.meta import AbstractClass
SETUP_BOILERPLATE = """
# DO NOT EDIT THIS FILE -- AUTOGENERATED BY PANTS
# Target: {setup_target}
from setuptools import setup
setup(**
{setup_dict}
)
"""
class SetupPyRunner(InstallerBase):
def __init__(self, source_dir, setup_command, **kw):
self.__setup_command = setup_command.split()
super(SetupPyRunner, self).__init__(source_dir, **kw)
def mixins(self):
mixins = super(SetupPyRunner, self).mixins().copy()
for (key, version) in self._interpreter.extras:
if key == 'setuptools':
mixins['setuptools'] = 'setuptools=={}'.format(version)
break
else:
# We know Pants sets up python interpreters with wheel and setuptools via the `PythonSetup`
# subsystem; so this should never happen
raise AssertionError("Expected interpreter {} to have the extra 'setuptools'"
.format(self._interpreter))
return mixins
def _setup_command(self):
return self.__setup_command
class TargetAncestorIterator(object):
"""Supports iteration of target ancestor lineages."""
def __init__(self, build_graph):
self._build_graph = build_graph
def iter_target_siblings_and_ancestors(self, target):
"""Produces an iterator over a target's siblings and ancestor lineage.
:returns: A target iterator yielding the target and its siblings and then it ancestors from
nearest to furthest removed.
"""
def iter_targets_in_spec_path(spec_path):
try:
siblings = SiblingAddresses(spec_path)
for address in self._build_graph.inject_specs_closure([siblings]):
yield self._build_graph.get_target(address)
except AddressLookupError:
# A spec path may not have any addresses registered under it and that's ok.
# For example:
# a:a
# a/b/c:c
#
# Here a/b contains no addresses.
pass
def iter_siblings_and_ancestors(spec_path):
for sibling in iter_targets_in_spec_path(spec_path):
yield sibling
parent_spec_path = os.path.dirname(spec_path)
if parent_spec_path != spec_path:
for parent in iter_siblings_and_ancestors(parent_spec_path):
yield parent
for target in iter_siblings_and_ancestors(target.address.spec_path):
yield target
# TODO(John Sirois): Get jvm and python publishing on the same page.
# Either python should require all nodes in an exported target closure be either exported or
# 3rdparty or else jvm publishing should use an ExportedTargetDependencyCalculator to aggregate
# un-exported non-3rdparty interior nodes as needed. It seems like the latter is preferable since
# it can be used with a BUILD graph validator requiring completely exported subgraphs to enforce the
# former as a matter of local repo policy.
class ExportedTargetDependencyCalculator(AbstractClass):
"""Calculates the dependencies of exported targets.
When a target is exported many of its internal transitive library dependencies may be satisfied by
other internal targets that are also exported and "own" these internal transitive library deps.
In other words, exported targets generally can have reduced dependency sets and an
`ExportedTargetDependencyCalculator` can calculate these reduced dependency sets.
To use an `ExportedTargetDependencyCalculator` a subclass must be created that implements two
predicates and a walk function for the class of targets in question. For example, a
`JvmDependencyCalculator` would need to be able to identify jvm third party dependency targets,
and local exportable jvm library targets. In addition it would need to define a walk function
that knew how to walk a jvm target's dependencies.
"""
class UnExportedError(TaskError):
"""Indicates a target is not exported."""
class NoOwnerError(TaskError):
"""Indicates an exportable target has no owning exported target."""
class AmbiguousOwnerError(TaskError):
"""Indicates an exportable target has more than one owning exported target."""
def __init__(self, build_graph):
self._ancestor_iterator = TargetAncestorIterator(build_graph)
@abstractmethod
def requires_export(self, target):
"""Identifies targets that need to be exported (are internal targets owning source code).
:param target: The target to identify.
:returns: `True` if the given `target` owns files that should be included in exported packages
when the target is a member of an exported target's dependency graph.
"""
@abstractmethod
def is_exported(self, target):
"""Identifies targets of interest that are exported from this project.
:param target: The target to identify.
:returns: `True` if the given `target` represents a top-level target exported from this project.
"""
@abstractmethod
def dependencies(self, target):
"""Returns an iterator over the dependencies of the given target.
:param target: The target to iterate dependencies of.
:returns: An iterator over all of the target's dependencies.
"""
def _walk(self, target, visitor):
"""Walks the dependency graph for the given target.
:param target: The target to start the walk from.
:param visitor: A function that takes a target and returns `True` if its dependencies should
also be visited.
"""
visited = set()
def walk(current):
if current not in visited:
visited.add(current)
keep_going = visitor(current)
if keep_going:
for dependency in self.dependencies(current):
walk(dependency)
walk(target)
def _closure(self, target):
"""Return the target closure as defined by this dependency calculator's definition of a walk."""
closure = set()
def collect(current):
closure.add(current)
return True
self._walk(target, collect)
return closure
def reduced_dependencies(self, exported_target):
"""Calculates the reduced transitive dependencies for an exported target.
The reduced set of dependencies will be just those transitive dependencies "owned" by
the `exported_target`.
A target is considered "owned" if:
1. It's "3rdparty" and "directly reachable" from `exported_target` by at least 1 path.
2. It's not "3rdparty" and not "directly reachable" by any of `exported_target`'s "3rdparty"
dependencies.
Here "3rdparty" refers to targets identified as either `is_third_party` or `is_exported`.
And in this context "directly reachable" means the target can be reached by following a series
of dependency links from the `exported_target`, never crossing another exported target and
staying within the `exported_target` address space. It's the latter restriction that allows for
unambiguous ownership of exportable targets and mirrors the BUILD file convention of targets
only being able to own sources in their filesystem subtree. The single ambiguous case that can
arise is when there is more than one exported target in the same BUILD file family that can
"directly reach" a target in its address space.
:raises: `UnExportedError` if the given `exported_target` is not, in-fact, exported.
:raises: `NoOwnerError` if a transitive dependency is found with no proper owning exported
target.
:raises: `AmbiguousOwnerError` if there is more than one viable exported owner target for a
given transitive dependency.
"""
# The strategy adopted requires 3 passes:
# 1.) Walk the exported target to collect provisional owned exportable targets, but _not_
# 3rdparty since these may be introduced by exported subgraphs we discover in later steps!
# 2.) Determine the owner of each target collected in 1 by walking the ancestor chain to find
# the closest exported target. The ancestor chain is just all targets whose spec path is
# a prefix of th descendant. In other words, all targets in descendant's BUILD file family
# (its siblings), all targets in its parent directory BUILD file family, and so on.
# 3.) Finally walk the exported target once more, replacing each visited dependency with its
# owner.
if not self.is_exported(exported_target):
raise self.UnExportedError('Cannot calculate reduced dependencies for a non-exported '
'target, given: {}'.format(exported_target))
owner_by_owned_python_target = OrderedDict()
def collect_potentially_owned_python_targets(current):
owner_by_owned_python_target[current] = None # We can't know the owner in the 1st pass.
return (current == exported_target) or not self.is_exported(current)
self._walk(exported_target, collect_potentially_owned_python_targets)
for owned in owner_by_owned_python_target:
if self.requires_export(owned) and not self.is_exported(owned):
potential_owners = set()
for potential_owner in self._ancestor_iterator.iter_target_siblings_and_ancestors(owned):
if self.is_exported(potential_owner) and owned in self._closure(potential_owner):
potential_owners.add(potential_owner)
if not potential_owners:
raise self.NoOwnerError('No exported target owner found for {}'.format(owned))
owner = potential_owners.pop()
if potential_owners:
ambiguous_owners = [o for o in potential_owners
if o.address.spec_path == owner.address.spec_path]
if ambiguous_owners:
raise self.AmbiguousOwnerError('Owners for {} are ambiguous. Found {} and '
'{} others: {}'.format(owned,
owner,
len(ambiguous_owners),
ambiguous_owners))
owner_by_owned_python_target[owned] = owner
reduced_dependencies = OrderedSet()
def collect_reduced_dependencies(current):
if current == exported_target:
return True
else:
# The provider will be one of:
# 1. `None`, ie: a 3rdparty requirement we should collect.
# 2. `exported_target`, ie: a local exportable target owned by `exported_target` that we
# should collect
# 3. Or else a local exportable target owned by some other exported target in which case
# we should collect the exported owner.
owner = owner_by_owned_python_target.get(current)
if owner is None or owner == exported_target:
reduced_dependencies.add(current)
else:
reduced_dependencies.add(owner)
return owner == exported_target or not self.requires_export(current)
self._walk(exported_target, collect_reduced_dependencies)
return reduced_dependencies
class SetupPy(Task):
"""Generate setup.py-based Python projects."""
SOURCE_ROOT = b'src'
PYTHON_DISTS_PRODUCT = 'python_dists'
@staticmethod
def is_requirements(target):
return isinstance(target, PythonRequirementLibrary)
@staticmethod
def is_python_target(target):
return isinstance(target, PythonTarget)
@staticmethod
def is_resources_target(target):
return isinstance(target, Resources)
@classmethod
def has_provides(cls, target):
return cls.is_python_target(target) and target.provides
@classmethod
def product_types(cls):
return [cls.PYTHON_DISTS_PRODUCT]
class DependencyCalculator(ExportedTargetDependencyCalculator):
"""Calculates reduced dependencies for exported python targets."""
def requires_export(self, target):
# TODO(John Sirois): Consider switching to the more general target.has_sources() once Benjy's
# change supporting default globs is in (that change will smooth test migration).
return SetupPy.is_python_target(target) or SetupPy.is_resources_target(target)
def is_exported(self, target):
return SetupPy.has_provides(target)
def dependencies(self, target):
for dependency in target.dependencies:
yield dependency
if self.is_exported(target):
for binary in target.provided_binaries.values():
yield binary
@classmethod
def prepare(cls, options, round_manager):
round_manager.require_data(GatherSources.PYTHON_SOURCES)
round_manager.require_data(PythonInterpreter)
@classmethod
def register_options(cls, register):
super(SetupPy, cls).register_options(register)
register('--run',
help="The command to run against setup.py. Don't forget to quote any additional "
"parameters. If no run command is specified, pants will by default generate "
"and dump the source distribution.")
register('--recursive', type=bool,
help='Transitively run setup_py on all provided downstream targets.')
@classmethod
def iter_entry_points(cls, target):
"""Yields the name, entry_point pairs of binary targets in this PythonArtifact."""
for name, binary_target in target.provided_binaries.items():
concrete_target = binary_target
if not isinstance(concrete_target, PythonBinary) or concrete_target.entry_point is None:
raise TargetDefinitionException(target,
'Cannot add a binary to a PythonArtifact if it does not contain an entry_point.')
yield name, concrete_target.entry_point
@classmethod
def declares_namespace_package(cls, filename):
"""Given a filename, walk its ast and determine if it is declaring a namespace package.
Intended only for __init__.py files though it will work for any .py.
"""
with open(filename) as fp:
init_py = ast.parse(fp.read(), filename)
calls = [node for node in ast.walk(init_py) if isinstance(node, ast.Call)]
for call in calls:
if len(call.args) != 1:
continue
if isinstance(call.func, ast.Attribute) and call.func.attr != 'declare_namespace':
continue
if isinstance(call.func, ast.Name) and call.func.id != 'declare_namespace':
continue
if isinstance(call.args[0], ast.Name) and call.args[0].id == '__name__':
return True
return False
@classmethod
def nearest_subpackage(cls, package, all_packages):
"""Given a package, find its nearest parent in all_packages."""
def shared_prefix(candidate):
zipped = itertools.izip(package.split('.'), candidate.split('.'))
matching = itertools.takewhile(lambda pair: pair[0] == pair[1], zipped)
return [pair[0] for pair in matching]
shared_packages = list(filter(None, map(shared_prefix, all_packages)))
return '.'.join(max(shared_packages, key=len)) if shared_packages else package
@classmethod
def find_packages(cls, chroot, log=None):
"""Detect packages, namespace packages and resources from an existing chroot.
:returns: a tuple of:
set(packages)
set(namespace_packages)
map(package => set(files))
"""
base = os.path.join(chroot.path(), cls.SOURCE_ROOT)
packages, namespace_packages = set(), set()
resources = defaultdict(set)
def iter_files():
for root, _, files in safe_walk(base):
module = os.path.relpath(root, base).replace(os.path.sep, '.')
for filename in files:
yield module, filename, os.path.join(root, filename)
# establish packages, namespace packages in first pass
for module, filename, real_filename in iter_files():
if filename != '__init__.py':
continue
packages.add(module)
if cls.declares_namespace_package(real_filename):
namespace_packages.add(module)
# second pass establishes non-source content (resources)
for module, filename, real_filename in iter_files():
if filename.endswith('.py'):
if module not in packages:
# TODO(wickman) Consider changing this to a full-on error as it could indicate bad BUILD
# hygiene.
# raise cls.UndefinedSource('{} is source but does not belong to a package!'
# .format(filename))
if log:
log.warn('{} is source but does not belong to a package.'.format(real_filename))
else:
continue
submodule = cls.nearest_subpackage(module, packages)
if submodule == module:
resources[submodule].add(filename)
else:
assert module.startswith(submodule + '.')
relative_module = module[len(submodule) + 1:]
relative_filename = os.path.join(relative_module.replace('.', os.path.sep), filename)
resources[submodule].add(relative_filename)
return packages, namespace_packages, resources
@classmethod
def install_requires(cls, reduced_dependencies):
install_requires = OrderedSet()
for dep in reduced_dependencies:
if cls.is_requirements(dep):
for req in dep.payload.requirements:
install_requires.add(str(req.requirement))
elif cls.has_provides(dep):
install_requires.add(dep.provides.key)
return install_requires
def __init__(self, *args, **kwargs):
super(SetupPy, self).__init__(*args, **kwargs)
self._root = get_buildroot()
self._run = self.get_options().run
self._recursive = self.get_options().recursive
def write_contents(self, root_target, reduced_dependencies, chroot):
"""Write contents of the target."""
def write_target_source(target, src):
chroot.copy(os.path.join(get_buildroot(), target.target_base, src),
os.path.join(self.SOURCE_ROOT, src))
# check parent __init__.pys to see if they also need to be copied. this is to allow
# us to determine if they belong to regular packages or namespace packages.
while True:
src = os.path.dirname(src)
if not src:
# Do not allow the repository root to leak (i.e. '.' should not be a package in setup.py)
break
if os.path.exists(os.path.join(target.target_base, src, '__init__.py')):
chroot.copy(os.path.join(target.target_base, src, '__init__.py'),
os.path.join(self.SOURCE_ROOT, src, '__init__.py'))
def write_target(target):
for rel_source in target.sources_relative_to_buildroot():
abs_source_path = os.path.join(get_buildroot(), rel_source)
abs_source_root_path = os.path.join(get_buildroot(), target.target_base)
source_root_relative_path = os.path.relpath(abs_source_path, abs_source_root_path)
write_target_source(target, source_root_relative_path)
write_target(root_target)
for dependency in reduced_dependencies:
if self.is_python_target(dependency) and not dependency.provides:
write_target(dependency)
elif self.is_resources_target(dependency):
write_target(dependency)
def _setup_boilerplate(self):
return SETUP_BOILERPLATE
def write_setup(self, root_target, reduced_dependencies, chroot):
"""Write the setup.py of a target.
Must be run after writing the contents to the chroot.
"""
# NB: several explicit str conversions below force non-unicode strings in order to comply
# with setuptools expectations.
setup_keywords = root_target.provides.setup_py_keywords.copy()
package_dir = {b'': self.SOURCE_ROOT}
packages, namespace_packages, resources = self.find_packages(chroot, self.context.log)
if namespace_packages:
setup_keywords['namespace_packages'] = list(sorted(namespace_packages))
if packages:
setup_keywords.update(
package_dir=package_dir,
packages=list(sorted(packages)),
package_data=dict((str(package), list(map(str, rs)))
for (package, rs) in resources.items()))
setup_keywords['install_requires'] = list(self.install_requires(reduced_dependencies))
for binary_name, entry_point in self.iter_entry_points(root_target):
if 'entry_points' not in setup_keywords:
setup_keywords['entry_points'] = {}
if 'console_scripts' not in setup_keywords['entry_points']:
setup_keywords['entry_points']['console_scripts'] = []
setup_keywords['entry_points']['console_scripts'].append(
'{} = {}'.format(binary_name, entry_point))
# From http://stackoverflow.com/a/13105359
def convert(input):
if isinstance(input, dict):
out = dict()
for key, value in input.items():
out[convert(key)] = convert(value)
return out
elif isinstance(input, list):
return [convert(element) for element in input]
elif isinstance(input, string):
return to_bytes(input)
else:
return input
# Distutils does not support unicode strings in setup.py, so we must
# explicitly convert to binary strings as pants uses unicode_literals.
# Ideally we would write the output stream with an encoding, however,
# pprint.pformat embeds u's in the string itself during conversion.
# For that reason we convert each unicode string independently.
#
# hoth:~ travis$ python
# Python 2.6.8 (unknown, Aug 25 2013, 00:04:29)
# [GCC 4.2.1 Compatible Apple LLVM 5.0 (clang-500.0.68)] on darwin
# Type "help", "copyright", "credits" or "license" for more information.
# >>> import pprint
# >>> data = {u'entry_points': {u'console_scripts': [u'pants = pants.bin.pants_exe:main']}}
# >>> pprint.pformat(data, indent=4)
# "{ u'entry_points': { u'console_scripts': [ u'pants = pants.bin.pants_exe:main']}}"
# >>>
#
# For more information, see http://bugs.python.org/issue13943
chroot.write(self._setup_boilerplate().format(
setup_dict=pprint.pformat(convert(setup_keywords), indent=4),
setup_target=repr(root_target)
), 'setup.py')
# make sure that setup.py is included
chroot.write('include *.py'.encode('utf8'), 'MANIFEST.in')
def create_setup_py(self, target, dist_dir):
chroot = Chroot(dist_dir, name=target.provides.name)
dependency_calculator = self.DependencyCalculator(self.context.build_graph)
reduced_deps = dependency_calculator.reduced_dependencies(target)
self.write_contents(target, reduced_deps, chroot)
self.write_setup(target, reduced_deps, chroot)
target_base = '{}-{}'.format(target.provides.name, target.provides.version)
setup_dir = os.path.join(dist_dir, target_base)
safe_rmtree(setup_dir)
shutil.move(chroot.path(), setup_dir)
return setup_dir, reduced_deps
def execute(self):
# We operate on the target roots, except that we replace codegen targets with their
# corresponding synthetic targets, since those have the generated sources that actually
# get published. Note that the "provides" attributed is copied from the original target
# to the synthetic target, so that the latter can be used as a direct stand-in for the
# former here.
preliminary_targets = set(t for t in self.context.target_roots if self.has_provides(t))
targets = set(preliminary_targets)
for t in self.context.targets():
# A non-codegen target has derived_from equal to itself, so we check is_original
# to ensure that the synthetic targets take precedence.
# We check that the synthetic target has the same "provides" as the original, because
# there are other synthetic targets in play (e.g., resources targets) to which this
# substitution logic must not apply.
if (t.derived_from in preliminary_targets and not t.is_original and
self.has_provides(t) and t.provides == t.derived_from.provides):
targets.discard(t.derived_from)
targets.add(t)
if not targets:
raise TaskError('setup-py target(s) must provide an artifact.')
dist_dir = self.get_options().pants_distdir
# NB: We have to create and then run in 2 steps so that we can discover all exported targets
# in-play in the creation phase which then allows a tsort of these exported targets in the run
# phase to ensure an exported target is, for example (--run="sdist upload"), uploaded before any
# exported target that depends on it is uploaded.
created = {}
def create(target):
if target not in created:
self.context.log.info('Creating setup.py project for {}'.format(target))
setup_dir, dependencies = self.create_setup_py(target, dist_dir)
created[target] = setup_dir
if self._recursive:
for dep in dependencies:
if self.has_provides(dep):
create(dep)
for target in targets:
create(target)
interpreter = self.context.products.get_data(PythonInterpreter)
python_dists = self.context.products.register_data(self.PYTHON_DISTS_PRODUCT, {})
for target in reversed(sort_targets(created.keys())):
setup_dir = created.get(target)
if setup_dir:
if not self._run:
self.context.log.info('Running packager against {}'.format(setup_dir))
setup_runner = Packager(setup_dir, interpreter=interpreter)
tgz_name = os.path.basename(setup_runner.sdist())
sdist_path = os.path.join(dist_dir, tgz_name)
self.context.log.info('Writing {}'.format(sdist_path))
shutil.move(setup_runner.sdist(), sdist_path)
safe_rmtree(setup_dir)
python_dists[target] = sdist_path
else:
self.context.log.info('Running {} against {}'.format(self._run, setup_dir))
setup_runner = SetupPyRunner(setup_dir, self._run, interpreter=interpreter)
setup_runner.run()
python_dists[target] = setup_dir
|
|
# -*- coding: utf-8 -*-
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of acl command for cloud storage providers."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import encoding
from gslib import metrics
from gslib.cloud_api import AccessDeniedException
from gslib.cloud_api import BadRequestException
from gslib.cloud_api import PreconditionException
from gslib.cloud_api import Preconditions
from gslib.cloud_api import ServiceException
from gslib.command import Command
from gslib.command import SetAclExceptionHandler
from gslib.command import SetAclFuncWrapper
from gslib.command_argument import CommandArgument
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.help_provider import CreateHelpText
from gslib.storage_url import StorageUrlFromString
from gslib.storage_url import UrlsAreForSingleProvider
from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
from gslib.utils import acl_helper
from gslib.utils.constants import NO_MAX
from gslib.utils.retry_util import Retry
_SET_SYNOPSIS = """
gsutil acl set [-f] [-r] [-a] file-or-canned_acl_name url...
"""
_GET_SYNOPSIS = """
gsutil acl get url
"""
_CH_SYNOPSIS = """
gsutil acl ch [-f] [-r] <grant>... url...
where each <grant> is one of the following forms:
-u <id|email>:<perm>
-g <id|email|domain|All|AllAuth>:<perm>
-p <viewers|editors|owners>-<project number>:<perm>
-d <id|email|domain|All|AllAuth|<viewers|editors|owners>-<project number>>:<perm>
"""
_GET_DESCRIPTION = """
<B>GET</B>
The "acl get" command gets the ACL text for a bucket or object, which you can
save and edit for the acl set command.
"""
_SET_DESCRIPTION = """
<B>SET</B>
The "acl set" command allows you to set an Access Control List on one or
more buckets and objects. The simplest way to use it is to specify one of
the canned ACLs, e.g.,:
gsutil acl set private gs://bucket
If you want to make an object or bucket publicly readable or writable, it is
recommended to use "acl ch", to avoid accidentally removing OWNER permissions.
See the "acl ch" section for details.
See `Predefined ACLs
<https://cloud.google.com/storage/docs/access-control/lists#predefined-acl>`_
for a list of canned ACLs.
If you want to define more fine-grained control over your data, you can
retrieve an ACL using the "acl get" command, save the output to a file, edit
the file, and then use the "acl set" command to set that ACL on the buckets
and/or objects. For example:
gsutil acl get gs://bucket/file.txt > acl.txt
Make changes to acl.txt such as adding an additional grant, then:
gsutil acl set acl.txt gs://cats/file.txt
Note that you can set an ACL on multiple buckets or objects at once,
for example:
gsutil acl set acl.txt gs://bucket/*.jpg
If you have a large number of ACLs to update you might want to use the
gsutil -m option, to perform a parallel (multi-threaded/multi-processing)
update:
gsutil -m acl set acl.txt gs://bucket/*.jpg
Note that multi-threading/multi-processing is only done when the named URLs
refer to objects, which happens either if you name specific objects or
if you enumerate objects by using an object wildcard or specifying
the acl -r flag.
<B>SET OPTIONS</B>
The "set" sub-command has the following options
-R, -r Performs "acl set" request recursively, to all objects under
the specified URL.
-a Performs "acl set" request on all object versions.
-f Normally gsutil stops at the first error. The -f option causes
it to continue when it encounters errors. If some of the ACLs
couldn't be set, gsutil's exit status will be non-zero even if
this flag is set. This option is implicitly set when running
"gsutil -m acl...".
"""
_CH_DESCRIPTION = """
<B>CH</B>
The "acl ch" (or "acl change") command updates access control lists, similar
in spirit to the Linux chmod command. You can specify multiple access grant
additions and deletions in a single command run; all changes will be made
atomically to each object in turn. For example, if the command requests
deleting one grant and adding a different grant, the ACLs being updated will
never be left in an intermediate state where one grant has been deleted but
the second grant not yet added. Each change specifies a user or group grant
to add or delete, and for grant additions, one of R, W, O (for the
permission to be granted). A more formal description is provided in a later
section; below we provide examples.
<B>CH EXAMPLES</B>
Examples for "ch" sub-command:
Grant anyone on the internet READ access to the object example-object:
gsutil acl ch -u AllUsers:R gs://example-bucket/example-object
NOTE: By default, publicly readable objects are served with a Cache-Control
header allowing such objects to be cached for 3600 seconds. If you need to
ensure that updates become visible immediately, you should set a
Cache-Control header of "Cache-Control:private, max-age=0, no-transform" on
such objects. For help doing this, see "gsutil help setmeta".
Grant anyone on the internet WRITE access to the bucket example-bucket
(WARNING: this is not recommended as you will be responsible for the content):
gsutil acl ch -u AllUsers:W gs://example-bucket
Grant the user john.doe@example.com WRITE access to the bucket
example-bucket:
gsutil acl ch -u john.doe@example.com:WRITE gs://example-bucket
Grant the group admins@example.com OWNER access to all jpg files in
the top level of example-bucket:
gsutil acl ch -g admins@example.com:O gs://example-bucket/*.jpg
Grant the owners of project example-project WRITE access to the bucket
example-bucket:
gsutil acl ch -p owners-example-project:W gs://example-bucket
NOTE: You can replace 'owners' with 'viewers' or 'editors' to grant access
to a project's viewers/editors respectively.
Remove access to the bucket example-bucket for the viewers of project number
12345:
gsutil acl ch -d viewers-12345 gs://example-bucket
NOTE: You cannot remove the project owners group from ACLs of gs:// buckets in
the given project. Attempts to do so will appear to succeed, but the service
will add the project owners group into the new set of ACLs before applying it.
Note that removing a project requires you to reference the project by
its number (which you can see with the acl get command) as opposed to its
project ID string.
Grant the user with the specified canonical ID READ access to all objects
in example-bucket that begin with folder/:
gsutil acl ch -r \\
-u 84fac329bceSAMPLE777d5d22b8SAMPLE785ac2SAMPLE2dfcf7c4adf34da46:R \\
gs://example-bucket/folder/
Grant the service account foo@developer.gserviceaccount.com WRITE access to
the bucket example-bucket:
gsutil acl ch -u foo@developer.gserviceaccount.com:W gs://example-bucket
Grant all users from the `G Suite
<https://www.google.com/work/apps/business/>`_ domain my-domain.org READ
access to the bucket gcs.my-domain.org:
gsutil acl ch -g my-domain.org:R gs://gcs.my-domain.org
Remove any current access by john.doe@example.com from the bucket
example-bucket:
gsutil acl ch -d john.doe@example.com gs://example-bucket
If you have a large number of objects to update, enabling multi-threading
with the gsutil -m flag can significantly improve performance. The
following command adds OWNER for admin@example.org using
multi-threading:
gsutil -m acl ch -r -u admin@example.org:O gs://example-bucket
Grant READ access to everyone from my-domain.org and to all authenticated
users, and grant OWNER to admin@mydomain.org, for the buckets
my-bucket and my-other-bucket, with multi-threading enabled:
gsutil -m acl ch -r -g my-domain.org:R -g AllAuth:R \\
-u admin@mydomain.org:O gs://my-bucket/ gs://my-other-bucket
<B>CH ROLES</B>
You may specify the following roles with either their shorthand or
their full name:
R: READ
W: WRITE
O: OWNER
For more information on these roles and the access they grant, see the
permissions section of the `Access Control Lists page
<https://cloud.google.com/storage/docs/access-control/lists#permissions>`_.
<B>CH ENTITIES</B>
There are four different entity types: Users, Groups, All Authenticated Users,
and All Users.
Users are added with -u and a plain ID or email address, as in
"-u john-doe@gmail.com:r". Note: Service Accounts are considered to be users.
Groups are like users, but specified with the -g flag, as in
"-g power-users@example.com:fc". Groups may also be specified as a full
domain, as in "-g my-company.com:r".
AllAuthenticatedUsers and AllUsers are specified directly, as
in "-g AllUsers:R" or "-g AllAuthenticatedUsers:O". These are case
insensitive, and may be shortened to "all" and "allauth", respectively.
Removing roles is specified with the -d flag and an ID, email
address, domain, or one of AllUsers or AllAuthenticatedUsers.
Many entities' roles can be specified on the same command line, allowing
bundled changes to be executed in a single run. This will reduce the number of
requests made to the server.
<B>CH OPTIONS</B>
The "ch" sub-command has the following options
-d Remove all roles associated with the matching entity.
-f Normally gsutil stops at the first error. The -f option causes
it to continue when it encounters errors. With this option the
gsutil exit status will be 0 even if some ACLs couldn't be
changed.
-g Add or modify a group entity's role.
-p Add or modify a project viewers/editors/owners role.
-R, -r Performs acl ch request recursively, to all objects under the
specified URL.
-u Add or modify a user entity's role.
"""
_SYNOPSIS = (_SET_SYNOPSIS + _GET_SYNOPSIS.lstrip('\n') +
_CH_SYNOPSIS.lstrip('\n') + '\n\n')
_DESCRIPTION = ("""
The acl command has three sub-commands:
""" + '\n'.join([_GET_DESCRIPTION, _SET_DESCRIPTION, _CH_DESCRIPTION]))
_DETAILED_HELP_TEXT = CreateHelpText(_SYNOPSIS, _DESCRIPTION)
_get_help_text = CreateHelpText(_GET_SYNOPSIS, _GET_DESCRIPTION)
_set_help_text = CreateHelpText(_SET_SYNOPSIS, _SET_DESCRIPTION)
_ch_help_text = CreateHelpText(_CH_SYNOPSIS, _CH_DESCRIPTION)
def _ApplyExceptionHandler(cls, exception):
cls.logger.error('Encountered a problem: %s', exception)
cls.everything_set_okay = False
def _ApplyAclChangesWrapper(cls, url_or_expansion_result, thread_state=None):
cls.ApplyAclChanges(url_or_expansion_result, thread_state=thread_state)
class AclCommand(Command):
"""Implementation of gsutil acl command."""
# Command specification. See base class for documentation.
command_spec = Command.CreateCommandSpec(
'acl',
command_name_aliases=['getacl', 'setacl', 'chacl'],
usage_synopsis=_SYNOPSIS,
min_args=2,
max_args=NO_MAX,
supported_sub_args='afRrg:u:d:p:',
file_url_ok=False,
provider_url_ok=False,
urls_start_arg=1,
gs_api_support=[ApiSelector.XML, ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
argparse_arguments={
'set': [
CommandArgument.MakeFileURLOrCannedACLArgument(),
CommandArgument.MakeZeroOrMoreCloudURLsArgument()
],
'get': [CommandArgument.MakeNCloudURLsArgument(1)],
'ch': [CommandArgument.MakeZeroOrMoreCloudURLsArgument()],
})
# Help specification. See help_provider.py for documentation.
help_spec = Command.HelpSpec(
help_name='acl',
help_name_aliases=['getacl', 'setacl', 'chmod', 'chacl'],
help_type='command_help',
help_one_line_summary='Get, set, or change bucket and/or object ACLs',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={
'get': _get_help_text,
'set': _set_help_text,
'ch': _ch_help_text
},
)
def _CalculateUrlsStartArg(self):
if not self.args:
self.RaiseWrongNumberOfArgumentsException()
if (self.args[0].lower() == 'set') or (self.command_alias_used == 'setacl'):
return 1
else:
return 0
def _SetAcl(self):
"""Parses options and sets ACLs on the specified buckets/objects."""
self.continue_on_error = False
if self.sub_opts:
for o, unused_a in self.sub_opts:
if o == '-a':
self.all_versions = True
elif o == '-f':
self.continue_on_error = True
elif o == '-r' or o == '-R':
self.recursion_requested = True
else:
self.RaiseInvalidArgumentException()
try:
self.SetAclCommandHelper(SetAclFuncWrapper, SetAclExceptionHandler)
except AccessDeniedException as unused_e:
self._WarnServiceAccounts()
raise
if not self.everything_set_okay:
raise CommandException('ACLs for some objects could not be set.')
def _ChAcl(self):
"""Parses options and changes ACLs on the specified buckets/objects."""
self.parse_versions = True
self.changes = []
self.continue_on_error = False
if self.sub_opts:
for o, a in self.sub_opts:
if o == '-f':
self.continue_on_error = True
elif o == '-g':
if 'gserviceaccount.com' in a:
raise CommandException(
'Service accounts are considered users, not groups; please use '
'"gsutil acl ch -u" instead of "gsutil acl ch -g"')
self.changes.append(
acl_helper.AclChange(a, scope_type=acl_helper.ChangeType.GROUP))
elif o == '-p':
self.changes.append(
acl_helper.AclChange(a, scope_type=acl_helper.ChangeType.PROJECT))
elif o == '-u':
self.changes.append(
acl_helper.AclChange(a, scope_type=acl_helper.ChangeType.USER))
elif o == '-d':
self.changes.append(acl_helper.AclDel(a))
elif o == '-r' or o == '-R':
self.recursion_requested = True
else:
self.RaiseInvalidArgumentException()
if not self.changes:
raise CommandException('Please specify at least one access change '
'with the -g, -u, or -d flags')
if (not UrlsAreForSingleProvider(self.args) or
StorageUrlFromString(self.args[0]).scheme != 'gs'):
raise CommandException(
'The "{0}" command can only be used with gs:// URLs'.format(
self.command_name))
self.everything_set_okay = True
self.ApplyAclFunc(_ApplyAclChangesWrapper,
_ApplyExceptionHandler,
self.args,
object_fields=['acl', 'generation', 'metageneration'])
if not self.everything_set_okay:
raise CommandException('ACLs for some objects could not be set.')
def _RaiseForAccessDenied(self, url):
self._WarnServiceAccounts()
raise CommandException('Failed to set acl for %s. Please ensure you have '
'OWNER-role access to this resource.' % url)
@Retry(ServiceException, tries=3, timeout_secs=1)
def ApplyAclChanges(self, name_expansion_result, thread_state=None):
"""Applies the changes in self.changes to the provided URL.
Args:
name_expansion_result: NameExpansionResult describing the target object.
thread_state: If present, gsutil Cloud API instance to apply the changes.
"""
if thread_state:
gsutil_api = thread_state
else:
gsutil_api = self.gsutil_api
url = name_expansion_result.expanded_storage_url
if url.IsBucket():
bucket = gsutil_api.GetBucket(url.bucket_name,
provider=url.scheme,
fields=['acl', 'metageneration'])
current_acl = bucket.acl
elif url.IsObject():
gcs_object = encoding.JsonToMessage(apitools_messages.Object,
name_expansion_result.expanded_result)
current_acl = gcs_object.acl
if not current_acl:
self._RaiseForAccessDenied(url)
if self._ApplyAclChangesAndReturnChangeCount(url, current_acl) == 0:
self.logger.info('No changes to %s', url)
return
try:
if url.IsBucket():
preconditions = Preconditions(meta_gen_match=bucket.metageneration)
bucket_metadata = apitools_messages.Bucket(acl=current_acl)
gsutil_api.PatchBucket(url.bucket_name,
bucket_metadata,
preconditions=preconditions,
provider=url.scheme,
fields=['id'])
else: # Object
preconditions = Preconditions(gen_match=gcs_object.generation,
meta_gen_match=gcs_object.metageneration)
object_metadata = apitools_messages.Object(acl=current_acl)
try:
gsutil_api.PatchObjectMetadata(url.bucket_name,
url.object_name,
object_metadata,
preconditions=preconditions,
provider=url.scheme,
generation=url.generation,
fields=['id'])
except PreconditionException as e:
# Special retry case where we want to do an additional step, the read
# of the read-modify-write cycle, to fetch the correct object
# metadata before reattempting ACL changes.
self._RefetchObjectMetadataAndApplyAclChanges(url, gsutil_api)
self.logger.info('Updated ACL on %s', url)
except BadRequestException as e:
# Don't retry on bad requests, e.g. invalid email address.
raise CommandException('Received bad request from server: %s' % str(e))
except AccessDeniedException:
self._RaiseForAccessDenied(url)
except PreconditionException as e:
# For objects, retry attempts should have already been handled.
if url.IsObject():
raise CommandException(str(e))
# For buckets, raise PreconditionException and continue to next retry.
raise e
@Retry(PreconditionException, tries=3, timeout_secs=1)
def _RefetchObjectMetadataAndApplyAclChanges(self, url, gsutil_api):
"""Reattempts object ACL changes after a PreconditionException."""
gcs_object = gsutil_api.GetObjectMetadata(
url.bucket_name,
url.object_name,
provider=url.scheme,
fields=['acl', 'generation', 'metageneration'])
current_acl = gcs_object.acl
if self._ApplyAclChangesAndReturnChangeCount(url, current_acl) == 0:
self.logger.info('No changes to %s', url)
return
object_metadata = apitools_messages.Object(acl=current_acl)
preconditions = Preconditions(gen_match=gcs_object.generation,
meta_gen_match=gcs_object.metageneration)
gsutil_api.PatchObjectMetadata(url.bucket_name,
url.object_name,
object_metadata,
preconditions=preconditions,
provider=url.scheme,
generation=gcs_object.generation,
fields=['id'])
def _ApplyAclChangesAndReturnChangeCount(self, storage_url, acl_message):
modification_count = 0
for change in self.changes:
modification_count += change.Execute(storage_url, acl_message, 'acl',
self.logger)
return modification_count
def RunCommand(self):
"""Command entry point for the acl command."""
action_subcommand = self.args.pop(0)
self.ParseSubOpts(check_args=True)
# Commands with both suboptions and subcommands need to reparse for
# suboptions, so we log again.
metrics.LogCommandParams(sub_opts=self.sub_opts)
self.def_acl = False
if action_subcommand == 'get':
metrics.LogCommandParams(subcommands=[action_subcommand])
self.GetAndPrintAcl(self.args[0])
elif action_subcommand == 'set':
metrics.LogCommandParams(subcommands=[action_subcommand])
self._SetAcl()
elif action_subcommand in ('ch', 'change'):
metrics.LogCommandParams(subcommands=[action_subcommand])
self._ChAcl()
else:
raise CommandException(
('Invalid subcommand "%s" for the %s command.\n'
'See "gsutil help acl".') % (action_subcommand, self.command_name))
return 0
|
|
#!/usr/bin/env python2
import sys
import requests
import argparse
import os.path
from datetime import datetime
import xml.etree.ElementTree as et
from xml.dom import minidom
import random
if ( len( sys.argv ) < 2 ):
print "\nMissing parameters. Run \"%s -h\" for help.\n" %(sys.argv[0]);
exit();
parser = argparse.ArgumentParser( description='SOAP web service Fuzzer' );
parser.add_argument( 'url', help='Web service URL to fuzz' );
parser.add_argument( '--no-cert-validate', action='store_true', help="Disable certificate validation" );
parser.add_argument( '--auto', action='store_true', help="Enable automatic testing" );
header_group = parser.add_mutually_exclusive_group();
header_group.add_argument( '--header', metavar='<Header>', nargs='*', help='Specify required request headers' );
header_group.add_argument( '--fheader', metavar='<Headers file>', help='Specify a file containing the required request headers' );
parser.add_argument( '--ua', metavar='<User-Agent>', help='Specify User-Agent header' );
parser.add_argument( '--ct', metavar='<Content-Type>', help='Specify Content-Type header' );
data_group = parser.add_mutually_exclusive_group();
data_group.add_argument( '--data', metavar='<POST content>', help='Data to be sent inside the request body' );
data_group.add_argument( '--fdata', metavar='<POST content file>', help='Specify a file containing the data to be sent inside the request body' );
args = parser.parse_args()
def end( reason ):
print '[-] ' + reason;
exit();
def check_url_syntax( url ):
if ((url.find('http://',0,7) == -1) and (url.find('https://',0,8) == -1)):
end( 'Address not starting with http or https.\n[-] Check your URL and try again.\n' );
def verify_url( url ):
print('\n[+] Checking if URL is available...');
if (args.no_cert_validate):
req = requests.post(url, data='', verify=False);
else:
req = requests.post(url, data='');
if (req.status_code == requests.codes.ok):
return
else:
print "[+] HTTP Response Status Code %s - %s" %( req.status_code, req.reason );
end("Exiting.\n");
def make_request( url, usr_headers, content='None' ):
if (args.no_cert_validate):
http = requests.post( url, data=content, headers=usr_headers, verify=False );
else:
http = requests.post( url, data=content, headers=usr_headers );
http.close();
return http;
def get_save_filename( url, clock ):
if url.startswith( 'http://' ):
address = url.replace( 'http://', '', 1 );
address = address.replace( '/', '_' );
address = address + '_' + clock; # www.example.com_webservice.php_20141110_1822.15.059238.xml
return address;
elif url.startswith( 'https://' ):
address = url.replace( 'https://', '', 1 );
address = address.replace( '/', '_' );
address = address + '_' + clock; # www.example.com_webservice.php_20141110_1822.15.059238.xml
return address;
else:
end('Malformed URL');
def save_data( request_content, response_content, clock ):
path = 'requests/'; # path to write requests's response
if not os.path.exists(path):
os.makedirs(path);
with open( path + get_save_filename( args.url, clock ) + '_req.xml', 'w+' ) as fp_req: # requests/www.example.com_webservice.php_20141110_1822.15.059238_req.xml
fp_req.write( request_content.encode('utf-8') ); # fixing problems with unicode characters
with open( path + get_save_filename( args.url, clock ) + '_resp.xml', 'w+' ) as fp_resp: # requests/www.example.com_webservice.php_20141110_1822.15.059238_resp.xml
fp_resp.write( response_content.encode( 'utf-8' ) ); # fixing problems with unicode characters
def add_default_headers():
return dict({ 'Content-Type': 'text/xml; charset=ascii', 'User-Agent': 'FuzzML/1.0', 'SOAPAction': '\"\"' });
def add_header( header_dict, field_value_dict ):
header_dict.update( field_value_dict );
return
def add_headers( args ):
print ( '[+] Adding headers...' );
hr = add_default_headers();
if ( args.header is not None ):
add_header( hr, list2dict( args.header ) );
if ( args.fheader is not None ):
add_header( hr, get_headers_from_file( args.fheader ) );
if ( args.ua is not None ):
add_header( hr, list2dict( [ 'User-Agent', args.ua ] ) );
if ( args.ct is not None ):
add_header( hr, list2dict( [ 'Content-Type', args.ct ] ) );
return hr;
def get_headers_from_file( hdr_file ):
if (os.path.exists( hdr_file )):
fp_hdr = open( hdr_file, 'r');
lines = dict();
for line in fp_hdr:
line = replace_tabs(line);
lines.update( list2dict( line ) );
fp_hdr.close();
return lines;
else:
end( "File not found: %s\n" %( hdr_file ) );
def list2dict( llist ):
converted_list = list();
for i in range( 0, len( llist ) ):
converted_list.extend( llist[i].split(' ') );
return dict( converted_list[i:i+2] for i in range( 0, len( converted_list ), 2) )
def replace_tabs( string ):
return string.split();
def set_req_body( cmdline_data, file_data ):
if ( ( cmdline_data is not None ) and ( file_data is None ) ): # if the user defined data through command line
return cmdline_data;
elif ( ( cmdline_data is None ) and (file_data is not None ) ): # if the user defined data through a file
if os.path.exists( file_data ):
with open( file_data, 'r' ) as f:
body = f.read( 20 * 1024 ); # read at most 20 KB from file
return body;
elif ( ( cmdline_data is not None ) and ( file_data is not None ) ):
end( 'You cannot specify BOTH parameters: --data AND --fdata. Choose only one.\n' );
def parse_xml_req( xml_data ):
if ( ( not isinstance( xml_data, str ) ) and ( not isinstance( xml_data, et.Element ) ) ):
end( 'Unrecognized xml data.' );
if ( isinstance( xml_data, str ) ):
root = et.fromstring( xml_data );
elif ( isinstance( xml_data, et.Element ) ):
root = xml_data;
return root;
def fuzzml_element_duplication( root, url, hr ):
new_tree = copy_tree ( root );
tree_root = new_tree.getroot();
nodes_to_duplicate = get_nodes_list( tree_root );
if ( nodes_to_duplicate ):
print( '[+] Fuzzing elements (by duplication) and saving responses...' );
for node in nodes_to_duplicate:
children = get_children( node );
for child in children:
child_tree = copy_tree( child );
child_dup = child_tree.getroot();
node.insert( get_children( node ).index( child ), child_dup ); # places the duplicated node side-by-side with the original node
keep_information( et.tostring( tree_root ), url, hr );
node.remove( child_dup );
else:
end( 'Tree has only one Element\n' );
def fuzzml_element_omission( root, url, hr ):
new_tree = copy_tree ( root );
tree_root = new_tree.getroot();
nodes_to_remove = get_nodes_list( tree_root );
if ( nodes_to_remove ):
print( '[+] Fuzzing elements (by omission) and saving responses...' );
for node in nodes_to_remove:
children = get_children( node );
for child in children:
index = get_children( node ).index( child ); # remember element position
node.remove( child );
keep_information( et.tostring( tree_root ), url, hr );
node.insert( index, child ); # places the removed node back into place
else:
end( 'Tree has only one Element\n' );
def fuzzml_element_tag_malformation( root, url, hr ):
new_tree = copy_tree ( root );
tree_root = new_tree.getroot();
nodes_to_remove = get_nodes_list( tree_root );
if ( nodes_to_remove ):
print( '[+] Fuzzing elements (by tag malformation) and saving responses...' );
xml_allowed_chars = sum( list( ( range(65,90),[95],range(97,122) ) ), [] ); # creating list of acceptable chars in an xml tag
for node in nodes_to_remove:
children = get_children( node );
for child in children:
if ( '}' in child.tag ):
namespace = child.tag.split('}')[0];
tag = child.tag.split('}')[1];
else:
tag = str( child.tag );
element_position = get_children( node ).index( child ); # remember element position
char_index = random.choice(range(0, len( tag ))); # choosing a random char to strip from tag
if (namespace):
child.tag = namespace + '}' + tag.replace( tag[ char_index ], '' ); # stripping char from tag
else:
child.tag = tag.replace( tag[ char_index ], '' ); # stripping char from tag
child.tag = child.tag + chr(random.choice( xml_allowed_chars ) ) ; # adding a random ascii char suffix
keep_information( et.tostring( tree_root ), url, hr );
if (namespace):
child.tag = namespace + '}' + tag ;
else:
child.tag = tag ; # putting everything back to normal
else:
end( 'Tree has only one Element\n' );
def keep_information( fuzzed_xml, url, hr ):
fuzzed_xml_request = minidom.parseString( fuzzed_xml );
fuzzed_xml_response = make_request( url, hr, fuzzed_xml_request.toprettyxml() );
fuzzed_xml_response = minidom.parseString( fuzzed_xml_response.text.encode( 'utf-8' ) );
save_data( fuzzed_xml_request.toprettyxml(), fuzzed_xml_response.toprettyxml(), datetime.now().strftime( "%Y%m%d_%H%M.%S.%f" ) );
def get_nodes_list( node ):
return list( node.iter() );
def copy_tree( tree_root ):
return et.ElementTree( tree_root );
def get_children( node ):
return list( node );
def main():
print;
check_url_syntax( args.url );
if args.auto:
verify_url( args.url );
hr = add_headers( args );
content = set_req_body( args.data, args.fdata );
print( '[+] Parsing XML...' );
xml_root = parse_xml_req( content );
fuzzml_element_duplication( xml_root, args.url, hr );
fuzzml_element_omission( xml_root, args.url, hr );
fuzzml_element_tag_malformation( xml_root, args.url, hr );
print( '[+] Fuzzing complete. Saved resposes are inside the "requests" folder.\n')
if ( __name__ == '__main__' ):
main();
|
|
"""Support to select an option from a list."""
import logging
import typing
import voluptuous as vol
from homeassistant.const import (
ATTR_EDITABLE,
CONF_ICON,
CONF_ID,
CONF_NAME,
SERVICE_RELOAD,
)
from homeassistant.core import callback
from homeassistant.helpers import collection
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.restore_state import RestoreEntity
import homeassistant.helpers.service
from homeassistant.helpers.storage import Store
from homeassistant.helpers.typing import ConfigType, HomeAssistantType, ServiceCallType
_LOGGER = logging.getLogger(__name__)
DOMAIN = "input_select"
ENTITY_ID_FORMAT = DOMAIN + ".{}"
CONF_INITIAL = "initial"
CONF_OPTIONS = "options"
ATTR_OPTION = "option"
ATTR_OPTIONS = "options"
SERVICE_SELECT_OPTION = "select_option"
SERVICE_SELECT_NEXT = "select_next"
SERVICE_SELECT_PREVIOUS = "select_previous"
SERVICE_SET_OPTIONS = "set_options"
STORAGE_KEY = DOMAIN
STORAGE_VERSION = 1
CREATE_FIELDS = {
vol.Required(CONF_NAME): vol.All(str, vol.Length(min=1)),
vol.Required(CONF_OPTIONS): vol.All(cv.ensure_list, vol.Length(min=1), [cv.string]),
vol.Optional(CONF_INITIAL): cv.string,
vol.Optional(CONF_ICON): cv.icon,
}
UPDATE_FIELDS = {
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_OPTIONS): vol.All(cv.ensure_list, vol.Length(min=1), [cv.string]),
vol.Optional(CONF_INITIAL): cv.string,
vol.Optional(CONF_ICON): cv.icon,
}
def _cv_input_select(cfg):
"""Configure validation helper for input select (voluptuous)."""
options = cfg[CONF_OPTIONS]
initial = cfg.get(CONF_INITIAL)
if initial is not None and initial not in options:
raise vol.Invalid(
'initial state "{}" is not part of the options: {}'.format(
initial, ",".join(options)
)
)
return cfg
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: cv.schema_with_slug_keys(
vol.All(
{
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_OPTIONS): vol.All(
cv.ensure_list, vol.Length(min=1), [cv.string]
),
vol.Optional(CONF_INITIAL): cv.string,
vol.Optional(CONF_ICON): cv.icon,
},
_cv_input_select,
)
)
},
extra=vol.ALLOW_EXTRA,
)
RELOAD_SERVICE_SCHEMA = vol.Schema({})
async def async_setup(hass: HomeAssistantType, config: ConfigType) -> bool:
"""Set up an input select."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
id_manager = collection.IDManager()
yaml_collection = collection.YamlCollection(
logging.getLogger(f"{__name__}.yaml_collection"), id_manager
)
collection.attach_entity_component_collection(
component, yaml_collection, InputSelect.from_yaml
)
storage_collection = InputSelectStorageCollection(
Store(hass, STORAGE_VERSION, STORAGE_KEY),
logging.getLogger(f"{__name__}.storage_collection"),
id_manager,
)
collection.attach_entity_component_collection(
component, storage_collection, InputSelect
)
await yaml_collection.async_load(
[{CONF_ID: id_, **cfg} for id_, cfg in config.get(DOMAIN, {}).items()]
)
await storage_collection.async_load()
collection.StorageCollectionWebsocket(
storage_collection, DOMAIN, DOMAIN, CREATE_FIELDS, UPDATE_FIELDS
).async_setup(hass)
collection.attach_entity_registry_cleaner(hass, DOMAIN, DOMAIN, yaml_collection)
collection.attach_entity_registry_cleaner(hass, DOMAIN, DOMAIN, storage_collection)
async def reload_service_handler(service_call: ServiceCallType) -> None:
"""Reload yaml entities."""
conf = await component.async_prepare_reload(skip_reset=True)
if conf is None:
conf = {DOMAIN: {}}
await yaml_collection.async_load(
[{CONF_ID: id_, **cfg} for id_, cfg in conf.get(DOMAIN, {}).items()]
)
homeassistant.helpers.service.async_register_admin_service(
hass,
DOMAIN,
SERVICE_RELOAD,
reload_service_handler,
schema=RELOAD_SERVICE_SCHEMA,
)
component.async_register_entity_service(
SERVICE_SELECT_OPTION,
{vol.Required(ATTR_OPTION): cv.string},
"async_select_option",
)
component.async_register_entity_service(
SERVICE_SELECT_NEXT, {}, lambda entity, call: entity.async_offset_index(1)
)
component.async_register_entity_service(
SERVICE_SELECT_PREVIOUS, {}, lambda entity, call: entity.async_offset_index(-1)
)
component.async_register_entity_service(
SERVICE_SET_OPTIONS,
{
vol.Required(ATTR_OPTIONS): vol.All(
cv.ensure_list, vol.Length(min=1), [cv.string]
)
},
"async_set_options",
)
return True
class InputSelectStorageCollection(collection.StorageCollection):
"""Input storage based collection."""
CREATE_SCHEMA = vol.Schema(vol.All(CREATE_FIELDS, _cv_input_select))
UPDATE_SCHEMA = vol.Schema(UPDATE_FIELDS)
async def _process_create_data(self, data: typing.Dict) -> typing.Dict:
"""Validate the config is valid."""
return self.CREATE_SCHEMA(data)
@callback
def _get_suggested_id(self, info: typing.Dict) -> str:
"""Suggest an ID based on the config."""
return info[CONF_NAME]
async def _update_data(self, data: dict, update_data: typing.Dict) -> typing.Dict:
"""Return a new updated data object."""
update_data = self.UPDATE_SCHEMA(update_data)
return _cv_input_select({**data, **update_data})
class InputSelect(RestoreEntity):
"""Representation of a select input."""
def __init__(self, config: typing.Dict):
"""Initialize a select input."""
self._config = config
self.editable = True
self._current_option = config.get(CONF_INITIAL)
@classmethod
def from_yaml(cls, config: typing.Dict) -> "InputSelect":
"""Return entity instance initialized from yaml storage."""
input_select = cls(config)
input_select.entity_id = ENTITY_ID_FORMAT.format(config[CONF_ID])
input_select.editable = False
return input_select
async def async_added_to_hass(self):
"""Run when entity about to be added."""
await super().async_added_to_hass()
if self._current_option is not None:
return
state = await self.async_get_last_state()
if not state or state.state not in self._options:
self._current_option = self._options[0]
else:
self._current_option = state.state
@property
def should_poll(self):
"""If entity should be polled."""
return False
@property
def name(self):
"""Return the name of the select input."""
return self._config.get(CONF_NAME)
@property
def icon(self):
"""Return the icon to be used for this entity."""
return self._config.get(CONF_ICON)
@property
def _options(self) -> typing.List[str]:
"""Return a list of selection options."""
return self._config[CONF_OPTIONS]
@property
def state(self):
"""Return the state of the component."""
return self._current_option
@property
def state_attributes(self):
"""Return the state attributes."""
return {ATTR_OPTIONS: self._config[ATTR_OPTIONS], ATTR_EDITABLE: self.editable}
@property
def unique_id(self) -> typing.Optional[str]:
"""Return unique id for the entity."""
return self._config[CONF_ID]
async def async_select_option(self, option):
"""Select new option."""
if option not in self._options:
_LOGGER.warning(
"Invalid option: %s (possible options: %s)",
option,
", ".join(self._options),
)
return
self._current_option = option
self.async_write_ha_state()
async def async_offset_index(self, offset):
"""Offset current index."""
current_index = self._options.index(self._current_option)
new_index = (current_index + offset) % len(self._options)
self._current_option = self._options[new_index]
self.async_write_ha_state()
async def async_set_options(self, options):
"""Set options."""
self._current_option = options[0]
self._config[CONF_OPTIONS] = options
self.async_write_ha_state()
async def async_update_config(self, config: typing.Dict) -> None:
"""Handle when the config is updated."""
self._config = config
self.async_write_ha_state()
|
|
#!/usr/bin/env python
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module implementing the command line entry point for the `Testdoc` tool.
This module can be executed from the command line using the following
approaches::
python -m robot.testdoc
python path/to/robot/testdoc.py
Instead of ``python`` it is possible to use also other Python interpreters.
This module also provides :func:`testdoc` and :func:`testdoc_cli` functions
that can be used programmatically. Other code is for internal usage.
"""
from six import PY3
if PY3:
long = int
USAGE = """robot.testdoc -- Robot Framework test data documentation tool
Version: <VERSION>
Usage: python -m robot.testdoc [options] data_sources output_file
Testdoc generates a high level test documentation based on Robot Framework
test data. Generated documentation includes name, documentation and other
metadata of each test suite and test case, as well as the top-level keywords
and their arguments.
Options
=======
-T --title title Set the title of the generated documentation.
Underscores in the title are converted to spaces.
The default title is the name of the top level suite.
-N --name name Override the name of the top level suite.
-D --doc document Override the documentation of the top level suite.
-M --metadata name:value * Set/override metadata of the top level suite.
-G --settag tag * Set given tag(s) to all test cases.
-t --test name * Include tests by name.
-s --suite name * Include suites by name.
-i --include tag * Include tests by tags.
-e --exclude tag * Exclude tests by tags.
-h -? --help Print this help.
All options except --title have exactly same semantics as same options have
when executing test cases.
Execution
=========
Data can be given as a single file, directory, or as multiple files and
directories. In all these cases, the last argument must be the file where
to write the output. The output is always created in HTML format.
Testdoc works with all interpreters supported by Robot Framework (Python,
Jython and IronPython). It can be executed as an installed module like
`python -m robot.testdoc` or as a script like `python path/robot/testdoc.py`.
Examples:
python -m robot.testdoc my_test.html testdoc.html
jython -m robot.testdoc -N smoke_tests -i smoke path/to/my_tests smoke.html
ipy path/to/robot/testdoc.py first_suite.txt second_suite.txt output.html
For more information about Testdoc and other built-in tools, see
http://robotframework.org/robotframework/#built-in-tools.
"""
import os.path
import sys
import time
# Allows running as a script. __name__ check needed with multiprocessing:
# http://code.google.com/p/robotframework/issues/detail?id=1137
if 'robot' not in sys.modules and __name__ == '__main__':
import pythonpathsetter
from robot.conf import RobotSettings
from robot.htmldata import HtmlFileWriter, ModelWriter, JsonWriter, TESTDOC
from robot.parsing import disable_curdir_processing
from robot.running import TestSuiteBuilder
from robot.utils import (abspath, Application, format_time, get_link_path,
html_escape, html_format, is_string,
secs_to_timestr, seq2str2, timestr_to_secs, unescape)
class TestDoc(Application):
def __init__(self):
Application.__init__(self, USAGE, arg_limits=(2,))
def main(self, datasources, title=None, **options):
outfile = abspath(datasources.pop())
suite = TestSuiteFactory(datasources, **options)
self._write_test_doc(suite, outfile, title)
self.console(outfile)
def _write_test_doc(self, suite, outfile, title):
with open(outfile, 'w') as output:
model_writer = TestdocModelWriter(output, suite, title)
HtmlFileWriter(output, model_writer).write(TESTDOC)
@disable_curdir_processing
def TestSuiteFactory(datasources, **options):
settings = RobotSettings(options)
if is_string(datasources):
datasources = [datasources]
suite = TestSuiteBuilder().build(*datasources)
suite.configure(**settings.suite_config)
return suite
class TestdocModelWriter(ModelWriter):
def __init__(self, output, suite, title=None):
self._output = output
self._output_path = getattr(output, 'name', None)
self._suite = suite
self._title = title.replace('_', ' ') if title else suite.name
def write(self, line):
self._output.write('<script type="text/javascript">\n')
self.write_data()
self._output.write('</script>\n')
def write_data(self):
generated_time = time.localtime()
model = {
'suite': JsonConverter(self._output_path).convert(self._suite),
'title': self._title,
'generated': format_time(generated_time, gmtsep=' '),
'generatedMillis': long(time.mktime(generated_time) * 1000)
}
JsonWriter(self._output).write_json('testdoc = ', model)
class JsonConverter(object):
def __init__(self, output_path=None):
self._output_path = output_path
def convert(self, suite):
return self._convert_suite(suite)
def _convert_suite(self, suite):
return {
'source': suite.source or '',
'relativeSource': self._get_relative_source(suite.source),
'id': suite.id,
'name': self._escape(suite.name),
'fullName': self._escape(suite.longname),
'doc': self._html(suite.doc),
'metadata': [(self._escape(name), self._html(value))
for name, value in suite.metadata.items()],
'numberOfTests': suite.test_count ,
'suites': self._convert_suites(suite),
'tests': self._convert_tests(suite),
'keywords': list(self._convert_keywords(suite))
}
def _get_relative_source(self, source):
if not source or not self._output_path:
return ''
return get_link_path(source, os.path.dirname(self._output_path))
def _escape(self, item):
return html_escape(item)
def _html(self, item):
return html_format(unescape(item))
def _convert_suites(self, suite):
return [self._convert_suite(s) for s in suite.suites]
def _convert_tests(self, suite):
return [self._convert_test(t) for t in suite.tests]
def _convert_test(self, test):
return {
'name': self._escape(test.name),
'fullName': self._escape(test.longname),
'id': test.id,
'doc': self._html(test.doc),
'tags': [self._escape(t) for t in test.tags],
'timeout': self._get_timeout(test.timeout),
'keywords': list(self._convert_keywords(test))
}
def _convert_keywords(self, item):
for kw in getattr(item, 'keywords', []):
if kw.type == kw.SETUP_TYPE:
yield self._convert_keyword(kw, 'SETUP')
elif kw.type == kw.TEARDOWN_TYPE:
yield self._convert_keyword(kw, 'TEARDOWN')
elif kw.type == kw.FOR_LOOP_TYPE:
yield self._convert_for_loop(kw)
else:
yield self._convert_keyword(kw, 'KEYWORD')
def _convert_for_loop(self, kw):
return {
'name': self._escape(self._get_for_loop(kw)),
'arguments': '',
'type': 'FOR'
}
def _convert_keyword(self, kw, kw_type):
return {
'name': self._escape(self._get_kw_name(kw)),
'arguments': self._escape(', '.join(kw.args)),
'type': kw_type
}
def _get_kw_name(self, kw):
if kw.assign:
return '%s = %s' % (', '.join(a.rstrip('= ') for a in kw.assign), kw.name)
return kw.name
def _get_for_loop(self, kw):
joiner = ' %s ' % kw.flavor
return ', '.join(kw.variables) + joiner + seq2str2(kw.values)
def _get_timeout(self, timeout):
if timeout is None:
return ''
try:
tout = secs_to_timestr(timestr_to_secs(timeout.value))
except ValueError:
tout = timeout.value
if timeout.message:
tout += ' :: ' + timeout.message
return tout
def testdoc_cli(arguments):
"""Executes `Testdoc` similarly as from the command line.
:param arguments: command line arguments as a list of strings.
For programmatic usage the :func:`testdoc` function is typically better. It
has a better API for that and does not call :func:`sys.exit` like
this function.
Example::
from robot.testdoc import testdoc_cli
testdoc_cli(['--title', 'Test Plan', 'mytests', 'plan.html'])
"""
TestDoc().execute_cli(arguments)
def testdoc(*arguments, **options):
"""Executes `Testdoc` programmatically.
Arguments and options have same semantics, and options have same names,
as arguments and options to Testdoc.
Example::
from robot.testdoc import testdoc
testdoc('mytests', 'plan.html', title='Test Plan')
"""
TestDoc().execute(*arguments, **options)
if __name__ == '__main__':
testdoc_cli(sys.argv[1:])
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""Qubole hook"""
import datetime
import logging
import os
import pathlib
import time
from qds_sdk.commands import (
Command, DbExportCommand, DbImportCommand, DbTapQueryCommand, HadoopCommand, HiveCommand, PigCommand,
PrestoCommand, ShellCommand, SparkCommand, SqlCommand,
)
from qds_sdk.qubole import Qubole
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.utils.state import State
log = logging.getLogger(__name__)
COMMAND_CLASSES = {
"hivecmd": HiveCommand,
"prestocmd": PrestoCommand,
"hadoopcmd": HadoopCommand,
"shellcmd": ShellCommand,
"pigcmd": PigCommand,
"sparkcmd": SparkCommand,
"dbtapquerycmd": DbTapQueryCommand,
"dbexportcmd": DbExportCommand,
"dbimportcmd": DbImportCommand,
"sqlcmd": SqlCommand
}
POSITIONAL_ARGS = {
'hadoopcmd': ['sub_command'],
'shellcmd': ['parameters'],
'pigcmd': ['parameters']
}
def flatten_list(list_of_lists):
"""Flatten the list"""
return [element for array in list_of_lists for element in array]
def filter_options(options):
"""Remove options from the list"""
options_to_remove = ["help", "print-logs-live", "print-logs"]
return [option for option in options if option not in options_to_remove]
def get_options_list(command_class):
"""Get options list"""
options_list = [option.get_opt_string().strip("--") for option in command_class.optparser.option_list]
return filter_options(options_list)
def build_command_args():
"""Build Command argument from command and options"""
command_args, hyphen_args = {}, set()
for cmd in COMMAND_CLASSES:
# get all available options from the class
opts_list = get_options_list(COMMAND_CLASSES[cmd])
# append positional args if any for the command
if cmd in POSITIONAL_ARGS:
opts_list += POSITIONAL_ARGS[cmd]
# get args with a hyphen and replace them with underscore
for index, opt in enumerate(opts_list):
if "-" in opt:
opts_list[index] = opt.replace("-", "_")
hyphen_args.add(opts_list[index])
command_args[cmd] = opts_list
return command_args, list(hyphen_args)
COMMAND_ARGS, HYPHEN_ARGS = build_command_args()
class QuboleHook(BaseHook):
"""Hook for Qubole communication"""
def __init__(self, *args, **kwargs): # pylint: disable=unused-argument
conn = self.get_connection(kwargs['qubole_conn_id'])
Qubole.configure(api_token=conn.password, api_url=conn.host)
self.task_id = kwargs['task_id']
self.dag_id = kwargs['dag'].dag_id
self.kwargs = kwargs
self.cls = COMMAND_CLASSES[self.kwargs['command_type']]
self.cmd = None
self.task_instance = None
@staticmethod
def handle_failure_retry(context):
"""Handle retries in case of failures"""
ti = context['ti']
cmd_id = ti.xcom_pull(key='qbol_cmd_id', task_ids=ti.task_id)
if cmd_id is not None:
cmd = Command.find(cmd_id)
if cmd is not None:
if cmd.status == 'done':
log.info('Command ID: %s has been succeeded, hence marking this '
'TI as Success.', cmd_id)
ti.state = State.SUCCESS
elif cmd.status == 'running':
log.info('Cancelling the Qubole Command Id: %s', cmd_id)
cmd.cancel()
def execute(self, context):
"""Execute call"""
args = self.cls.parse(self.create_cmd_args(context))
self.cmd = self.cls.create(**args)
self.task_instance = context['task_instance']
context['task_instance'].xcom_push(key='qbol_cmd_id', value=self.cmd.id)
self.log.info(
"Qubole command created with Id: %s and Status: %s",
self.cmd.id, self.cmd.status
)
while not Command.is_done(self.cmd.status):
time.sleep(Qubole.poll_interval)
self.cmd = self.cls.find(self.cmd.id)
self.log.info("Command Id: %s and Status: %s", self.cmd.id, self.cmd.status)
if 'fetch_logs' in self.kwargs and self.kwargs['fetch_logs'] is True:
self.log.info("Logs for Command Id: %s \n%s", self.cmd.id, self.cmd.get_log())
if self.cmd.status != 'done':
raise AirflowException('Command Id: {0} failed with Status: {1}'.format(
self.cmd.id, self.cmd.status))
def kill(self, ti):
"""
Kill (cancel) a Qubole command
:param ti: Task Instance of the dag, used to determine the Quboles command id
:return: response from Qubole
"""
if self.cmd is None:
if not ti and not self.task_instance:
raise Exception("Unable to cancel Qubole Command, context is unavailable!")
elif not ti:
ti = self.task_instance
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=ti.task_id)
self.cmd = self.cls.find(cmd_id)
if self.cls and self.cmd:
self.log.info('Sending KILL signal to Qubole Command Id: %s', self.cmd.id)
self.cmd.cancel()
def get_results(self, ti=None, fp=None, inline=True, delim=None, fetch=True):
"""
Get results (or just s3 locations) of a command from Qubole and save into a file
:param ti: Task Instance of the dag, used to determine the Quboles command id
:param fp: Optional file pointer, will create one and return if None passed
:param inline: True to download actual results, False to get s3 locations only
:param delim: Replaces the CTL-A chars with the given delim, defaults to ','
:param fetch: when inline is True, get results directly from s3 (if large)
:return: file location containing actual results or s3 locations of results
"""
if fp is None:
iso = datetime.datetime.utcnow().isoformat()
logpath = os.path.expanduser(
conf.get('logging', 'BASE_LOG_FOLDER')
)
resultpath = logpath + '/' + self.dag_id + '/' + self.task_id + '/results'
pathlib.Path(resultpath).mkdir(parents=True, exist_ok=True)
fp = open(resultpath + '/' + iso, 'wb')
if self.cmd is None:
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=self.task_id)
self.cmd = self.cls.find(cmd_id)
self.cmd.get_results(fp, inline, delim, fetch)
fp.flush()
fp.close()
return fp.name
def get_log(self, ti):
"""
Get Logs of a command from Qubole
:param ti: Task Instance of the dag, used to determine the Quboles command id
:return: command log as text
"""
if self.cmd is None:
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=self.task_id)
Command.get_log_id(cmd_id)
def get_jobs_id(self, ti):
"""
Get jobs associated with a Qubole commands
:param ti: Task Instance of the dag, used to determine the Quboles command id
:return: Job information associated with command
"""
if self.cmd is None:
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=self.task_id)
Command.get_jobs_id(cmd_id)
def create_cmd_args(self, context):
"""Creates command arguments"""
args = []
cmd_type = self.kwargs['command_type']
inplace_args = None
tags = {self.dag_id, self.task_id, context['run_id']}
positional_args_list = flatten_list(POSITIONAL_ARGS.values())
for key, value in self.kwargs.items():
if key in COMMAND_ARGS[cmd_type]:
if key in HYPHEN_ARGS:
args.append("--{0}={1}".format(key.replace('_', '-'), value))
elif key in positional_args_list:
inplace_args = value
elif key == 'tags':
self._add_tags(tags, value)
else:
args.append("--{0}={1}".format(key, value))
if key == 'notify' and value is True:
args.append("--notify")
args.append("--tags={0}".format(','.join(filter(None, tags))))
if inplace_args is not None:
args += inplace_args.split(' ')
return args
@staticmethod
def _add_tags(tags, value):
if isinstance(value, str):
tags.add(value)
elif isinstance(value, (list, tuple)):
tags.extend(value)
|
|
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from io import BytesIO
import json
from lxml import etree
import os
import re
import requests
try:
import configparser
except ImportError:
import ConfigParser as configparser
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
class IniConfig(object):
"""Object that stores zanata.ini configuration
Read zanata.ini and make its values available.
Attributes:
inifile: The path to the ini file to load values from.
"""
def __init__(self, inifile):
self.inifile = inifile
self._load_config()
def _load_config(self):
"""Load configuration from the zanata.ini file
Parses the ini file and stores its data.
"""
if not os.path.isfile(self.inifile):
raise ValueError('zanata.ini file not found.')
config = configparser.ConfigParser()
try:
config.read(self.inifile)
except configparser.Error:
raise ValueError('zanata.ini could not be parsed, please check '
'format.')
for item in config.items('servers'):
item_type = item[0].split('.')[1]
if item_type in ('username', 'key', 'url'):
setattr(self, item_type, item[1])
class ZanataRestService(object):
def __init__(self, zconfig, accept='application/xml',
content_type='application/xml', verify=True):
self.url = zconfig.url
if "charset" not in content_type:
content_type = "%s;charset=utf8" % content_type
self.headers = {'Accept': accept,
'Content-Type': content_type,
'X-Auth-User': zconfig.username,
'X-Auth-Token': zconfig.key}
self.verify = verify
def _construct_url(self, url_fragment):
return urljoin(self.url, url_fragment)
def query(self, url_fragment, raise_errors=True):
request_url = self._construct_url(url_fragment)
try:
r = requests.get(request_url, verify=self.verify,
headers=self.headers)
except requests.exceptions.ConnectionError:
raise ValueError('Connection error')
if raise_errors and r.status_code != 200:
raise ValueError('Got status code %s for %s' %
(r.status_code, request_url))
if raise_errors and not r.content:
raise ValueError('Did not receive any data from %s' % request_url)
return r
def push(self, url_fragment, data):
request_url = self._construct_url(url_fragment)
try:
return requests.put(request_url, verify=self.verify,
headers=self.headers, data=json.dumps(data))
except requests.exceptions.ConnectionError:
raise ValueError('Connection error')
class ProjectConfig(object):
"""Object that stores zanata.xml per-project configuration.
Write out a zanata.xml file for the project given the supplied values.
Attributes:
zconfig (IniConfig): zanata.ini values
xmlfile (str): path to zanata.xml to read or write
rules (list): list of two-ples with pattern and rules
"""
def __init__(self, zconfig, xmlfile, rules, verify, **kwargs):
self.rest_service = ZanataRestService(zconfig, verify=verify)
self.xmlfile = xmlfile
self.rules = self._parse_rules(rules)
for key, value in kwargs.items():
setattr(self, key, value)
self._create_config()
def _get_tag_prefix(self, root):
"""XML utility method
Get the namespace of the XML file so we can
use it to search for tags.
"""
return '{%s}' % etree.QName(root).namespace
def _parse_rules(self, rules):
"""Parse a two-ple of pattern, rule.
Returns a list of dictionaries with 'pattern' and 'rule' keys.
"""
return [{'pattern': rule[0], 'rule': rule[1]} for rule in rules]
def _create_config(self):
"""Create zanata.xml
Use the supplied parameters to create zanata.xml by downloading
a base version of the file and adding customizations.
"""
xml = self._fetch_zanata_xml()
self._add_configuration(xml)
self._write_xml(xml)
def _fetch_zanata_xml(self):
"""Get base zanata.xml
Download a basic version of the configuration for the project
using Zanata's REST API.
"""
r = self.rest_service.query(
'/rest/projects/p/%s/iterations/i/%s/config'
% (self.project, self.version))
project_config = r.content
p = etree.XMLParser(remove_blank_text=True)
try:
xml = etree.parse(BytesIO(project_config), p)
except etree.ParseError:
raise ValueError('Error parsing xml output')
return xml
def _add_configuration(self, xml):
"""Insert additional configuration
Add locale mapping rules to the base zanata.xml retrieved from
the server.
Args:
xml (etree): zanata.xml file contents
"""
root = xml.getroot()
s = etree.SubElement(root, 'src-dir')
s.text = self.srcdir
t = etree.SubElement(root, 'trans-dir')
t.text = self.txdir
rules = etree.SubElement(root, 'rules')
for rule in self.rules:
new_rule = etree.SubElement(rules, 'rule')
new_rule.attrib['pattern'] = rule['pattern']
new_rule.text = rule['rule']
if self.excludes:
excludes = etree.SubElement(root, 'excludes')
excludes.text = self.excludes
tag_prefix = self._get_tag_prefix(root)
# Work around https://bugzilla.redhat.com/show_bug.cgi?id=1219624
# by removing port number in URL if it's there
url = root.find('%surl' % tag_prefix)
url.text = re.sub(':443', '', url.text)
def _write_xml(self, xml):
"""Write xml
Write out xml to zanata.xml.
"""
try:
xml.write(self.xmlfile, pretty_print=True)
except IOError:
raise ValueError('Error writing zanata.xml.')
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for joint."""
import operator
from absl.testing import absltest
import numpy as np
from dp_topk import joint
from dp_topk.differential_privacy import NeighborType
def assert_array_less_equal(x, y, err_msg='', verbose=True):
return np.testing.assert_array_compare(
operator.__le__,
x,
y,
err_msg=err_msg,
verbose=verbose,
header='x is not less than or equal to y.',
equal_inf=False)
class JointTest(absltest.TestCase):
def test_make_diff_matrix_distinct_outputs(self):
item_counts = np.repeat(np.arange(5), 5)[::-1]
k = 5
diff_matrix = joint.make_diff_matrix(item_counts, k)
distinct_count = len(np.unique(diff_matrix))
expected_distinct_count = len(item_counts) * k
self.assertEqual(distinct_count, expected_distinct_count)
def test_make_diff_matrix_increasing_rows(self):
item_counts = np.repeat(np.arange(5), 5)[::-1]
k = 5
diff_matrix = joint.make_diff_matrix(item_counts, k)
rows_increasing = [
all(diff_matrix[row_idx][i] < diff_matrix[row_idx][i + 1]
for i in range(len(item_counts) - 1))
for row_idx in range(k)
]
expected_rows_increasing = [1] * k
np.testing.assert_array_equal(rows_increasing, expected_rows_increasing)
def test_make_diff_matrix_decreasing_columns(self):
item_counts = np.repeat(np.arange(5), 5)[::-1]
k = 5
diff_matrix = joint.make_diff_matrix(item_counts, k)
columns_decreasing = [
all(diff_matrix[i][col_idx] > diff_matrix[i + 1][col_idx]
for i in range(k - 1))
for col_idx in range(len(item_counts))
]
expected_columns_decreasing = [1] * len(item_counts)
np.testing.assert_array_equal(columns_decreasing,
expected_columns_decreasing)
def test_make_diff_matrix(self):
item_counts = np.array([5, 5, 3])
k = 2
diff_matrix = joint.make_diff_matrix(item_counts, k)
expected_diff_matrix = np.array([[1. / 3, 5. / 12, 2.5],
[1. / 12, 1. / 6, 2.25]])
np.testing.assert_array_almost_equal(
diff_matrix, expected_diff_matrix, decimal=6)
def test_get_diffs_to_positions(self):
diff_matrix = np.array([[21, 4, 3, 12, 9], [7, 6, 22, 13, 0],
[17, 10, 5, 15, 2], [8, 16, 20, 14, 24],
[19, 11, 1, 18, 23]])
diffs_to_positions = joint.get_diffs_to_positions(diff_matrix)
expected_diffs_to_positions = np.array([[
1, 4, 2, 0, 0, 2, 1, 1, 3, 0, 2, 4, 0, 1, 3, 2, 3, 2, 4, 4, 3, 0, 1, 4,
3
],
[
4, 2, 4, 2, 1, 2, 1, 0, 0, 4, 1,
1, 3, 3, 3, 3, 1, 0, 3, 0, 2, 0,
2, 4, 4
]])
np.testing.assert_array_equal(diffs_to_positions,
expected_diffs_to_positions)
def test_brute_compute_log_diff_counts(self):
diff_matrix = np.array([[0.3125, 2.375, 2.4375, 6.5],
[-1.9375, 0.125, 0.1875, 4.25]])
with np.errstate(divide='ignore'):
expected_log_diff_counts = np.log([0, 0, 0, 2, 2, 2, 3, 3])
brute_log_diff_counts = joint.brute_compute_log_diff_counts(
diff_matrix, np.sort(np.ndarray.flatten(diff_matrix)))
np.testing.assert_array_equal(brute_log_diff_counts,
expected_log_diff_counts)
def test_compute_log_diff_counts(self):
for d in [5, 6, 7]:
for k in [2, 3, 4]:
for _ in range(100):
uniform_item_counts = np.sort(np.random.choice(10 * d, size=d))[::-1]
diff_matrix = joint.make_diff_matrix(uniform_item_counts, k)
diffs_to_positions = joint.get_diffs_to_positions(diff_matrix)
sorted_diffs = np.sort(diff_matrix, axis=None)
log_diff_counts = joint.compute_log_diff_counts(
diff_matrix, diffs_to_positions)
expected_log_diff_counts = joint.brute_compute_log_diff_counts(
diff_matrix, sorted_diffs)
np.testing.assert_array_almost_equal(
log_diff_counts, expected_log_diff_counts, decimal=6)
def test_racing_sample_distribution(self):
log_terms = np.array([0, 0, 1, 1.5, 2, 3])
sampled_counts = np.zeros(len(log_terms))
num_trials = 10000
for _ in range(num_trials):
sampled_counts[joint.racing_sample(log_terms)] += 1
expected_sample_probs = np.array(
[0.0273, 0.0273, 0.0741, 0.122, 0.201, 0.548])
expected_sample_widths = 4 * np.sqrt(
expected_sample_probs * (1 - expected_sample_probs) / num_trials)
np.testing.assert_array_less(
sampled_counts,
num_trials * (expected_sample_probs + expected_sample_widths))
np.testing.assert_array_less(
num_trials * (expected_sample_probs - expected_sample_widths),
sampled_counts)
def test_sample_diff_idx_distribution_add_remove(self):
sorted_diffs = np.array(
[-1.9375, 0.125, 0.1875, 0.3125, 2.375, 2.4375, 4.25, 6.5])
with np.errstate(divide='ignore'):
log_diff_counts = np.log([0, 0, 1, 3, 4, 2, 15, 19])
sampled_counts = np.zeros(len(log_diff_counts))
eps = 1.
num_trials = 100000
for _ in range(num_trials):
sampled_counts[joint.sample_diff_idx(log_diff_counts, sorted_diffs, eps,
NeighborType.ADD_REMOVE)] += 1
expected_sample_probs = np.array(
[0, 0, 0.109, 0.327, 0.160, 0.0801, 0.221, 0.103])
expected_sample_widths = 4 * np.sqrt(
expected_sample_probs * (1 - expected_sample_probs) / num_trials)
assert_array_less_equal(
sampled_counts,
num_trials * (expected_sample_probs + expected_sample_widths))
assert_array_less_equal(
num_trials * (expected_sample_probs - expected_sample_widths),
sampled_counts)
def test_sample_diff_idx_distribution_swap(self):
sorted_diffs = np.array(
[-1.9375, 0.125, 0.1875, 0.3125, 2.375, 2.4375, 4.25, 6.5])
with np.errstate(divide='ignore'):
log_diff_counts = np.log([0, 0, 1, 3, 4, 2, 15, 19])
sampled_counts = np.zeros(len(log_diff_counts))
eps = 1.
num_trials = 10000
for _ in range(num_trials):
sampled_counts[joint.sample_diff_idx(log_diff_counts, sorted_diffs, eps,
NeighborType.SWAP)] += 1
expected_sample_probs = np.array(
[0, 0, 0.0575, 0.172, 0.139, 0.0697, 0.317, 0.244])
expected_sample_widths = 4 * np.sqrt(
expected_sample_probs * (1 - expected_sample_probs) / num_trials)
assert_array_less_equal(
sampled_counts,
num_trials * (expected_sample_probs + expected_sample_widths))
assert_array_less_equal(
num_trials * (expected_sample_probs - expected_sample_widths),
sampled_counts)
def test_sequence_from_diff_pick_first(self):
diff_matrix = np.array([[
0.3611111111, 5.3888888889, 10.4166666667, 10.4444444444, 13.4722222222,
13.5
],
[
-4.8055555556, 0.2222222222, 5.25, 5.2777777778,
8.3055555556, 8.3333333333
],
[
-9.9722222222, -4.9444444444, 0.0833333333,
0.1111111111, 3.1388888889, 3.1666666667
]])
diff = 5.25
expected_sequence = np.array([0, 2, 1])
sequence = joint.sequence_from_diff(diff, 1, 2, diff_matrix, lambda x: x[0])
np.testing.assert_array_equal(sequence, expected_sequence)
def test_sequence_from_diff_pick_last(self):
diff_matrix = np.array([[
0.3611111111, 5.3888888889, 10.4166666667, 10.4444444444, 13.4722222222,
13.5
],
[
-4.8055555556, 0.2222222222, 5.25, 5.2777777778,
8.3055555556, 8.3333333333
],
[
-9.9722222222, -4.9444444444, 0.0833333333,
0.1111111111, 3.1388888889, 3.1666666667
]])
diff = 5.25
expected_sequence = np.array([0, 2, 5])
sequence = joint.sequence_from_diff(diff, 1, 2, diff_matrix,
lambda x: x[-1])
np.testing.assert_array_equal(sequence, expected_sequence)
def test_sequence_from_diff_distribution(self):
diff_matrix = np.array([[0.3125, 2.375, 2.4375, 6.5],
[-1.9375, 0.125, 0.1875, 4.25]])
diff = 4.25
sequence_counts = np.zeros(4)
num_trials = 10000
for _ in range(num_trials):
sequence = joint.sequence_from_diff(diff, 1, 3, diff_matrix)
if sequence[0] not in [0, 1, 2] or sequence[1] != 3:
sequence_counts[3] += 1
else:
sequence_counts[sequence[0]] += 1
expected_sequence_probs = np.array([1. / 3, 1. / 3, 1. / 3, 0])
expected_sequence_widths = 4 * np.sqrt(
expected_sequence_probs * (1 - expected_sequence_probs) / num_trials)
assert_array_less_equal(
sequence_counts,
num_trials * (expected_sequence_probs + expected_sequence_widths))
assert_array_less_equal(
num_trials * (expected_sequence_probs - expected_sequence_widths),
sequence_counts)
def test_joint_distribution_add_remove(self):
item_counts = np.array([10, 10, 5])
k = 2
eps = 1
neighbor_type = NeighborType.ADD_REMOVE
sensitivity = 1
diff_matrix = np.array([[1. / 3, 5. / 12, 5.5], [1. / 12, 1. / 6, 5.25]])
sequence_counts = np.zeros(7)
num_trials = 10000
for _ in range(num_trials):
sequence = joint.joint(item_counts, k, eps, neighbor_type)
sequence_diff = max(diff_matrix[0, sequence[0]], diff_matrix[1,
sequence[1]])
if sequence_diff == 1. / 12:
sequence_counts[0] += 1
elif sequence_diff == 1. / 6:
sequence_counts[1] += 1
elif sequence_diff == 1. / 3:
sequence_counts[2] += 1
elif sequence_diff == 5. / 12:
sequence_counts[3] += 1
elif sequence_diff == 5.25:
sequence_counts[4] += 1
elif sequence_diff == 5.5:
sequence_counts[5] += 1
else:
sequence_counts[6] += 1
with np.errstate(divide='ignore'):
log_diff_counts = np.log(np.array([0, 0, 1, 1, 2, 2]))
sorted_diffs = np.array([1. / 12, 1. / 6, 1. / 3, 5. / 12, 5.25, 5.5])
unnormalized_probabilities = np.exp(log_diff_counts - (eps * sorted_diffs /
(2 * sensitivity)))
expected_sequence_probs = np.zeros(7)
expected_sequence_probs[:-1] = unnormalized_probabilities / np.sum(
unnormalized_probabilities)
expected_sequence_widths = 4 * np.sqrt(
expected_sequence_probs * (1 - expected_sequence_probs) / num_trials)
assert_array_less_equal(
sequence_counts,
num_trials * (expected_sequence_probs + expected_sequence_widths))
assert_array_less_equal(
num_trials * (expected_sequence_probs - expected_sequence_widths),
sequence_counts)
def test_joint_distribution_swap(self):
item_counts = np.array([10, 10, 5])
k = 2
eps = 1
neighbor_type = NeighborType.SWAP
sensitivity = 2
diff_matrix = np.array([[1. / 3, 5. / 12, 5.5], [1. / 12, 1. / 6, 5.25]])
sequence_counts = np.zeros(7)
num_trials = 10000
for _ in range(num_trials):
sequence = joint.joint(item_counts, k, eps, neighbor_type)
sequence_diff = max(diff_matrix[0, sequence[0]], diff_matrix[1,
sequence[1]])
if sequence_diff == 1. / 12:
sequence_counts[0] += 1
elif sequence_diff == 1. / 6:
sequence_counts[1] += 1
elif sequence_diff == 1. / 3:
sequence_counts[2] += 1
elif sequence_diff == 5. / 12:
sequence_counts[3] += 1
elif sequence_diff == 5.25:
sequence_counts[4] += 1
elif sequence_diff == 5.5:
sequence_counts[5] += 1
else:
sequence_counts[6] += 1
with np.errstate(divide='ignore'):
log_diff_counts = np.log(np.array([0, 0, 1, 1, 2, 2]))
sorted_diffs = np.array([1. / 12, 1. / 6, 1. / 3, 5. / 12, 5.25, 5.5])
unnormalized_probabilities = np.exp(log_diff_counts - (eps * sorted_diffs /
(2 * sensitivity)))
expected_sequence_probs = np.zeros(7)
expected_sequence_probs[:-1] = unnormalized_probabilities / np.sum(
unnormalized_probabilities)
expected_sequence_widths = 4 * np.sqrt(
expected_sequence_probs * (1 - expected_sequence_probs) / num_trials)
assert_array_less_equal(
sequence_counts,
num_trials * (expected_sequence_probs + expected_sequence_widths))
assert_array_less_equal(
num_trials * (expected_sequence_probs - expected_sequence_widths),
sequence_counts)
if __name__ == '__main__':
absltest.main()
|
|
'''
Copyright 2015 University of Auckland
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
from opencmiss.neon.core.neonmodelsources import deserializeNeonModelSource
from opencmiss.zinc.status import OK as ZINC_OK
from opencmiss.neon.core.misc.neonerror import NeonError
class NeonRegion(object):
def __init__(self, name, zincRegion, parent=None):
self._name = name
self._parent = parent
self._children = []
self._modelSources = []
self._zincRegion = zincRegion
# record whether region was created by ancestor model source; see: _reloadModelSources
self._ancestorModelSourceCreated = False
# callback class, only for root region
if not parent:
self._regionChangeCallbacks = []
self._fieldTypeDict = {}
# def __del__(self):
# print("NeonRegion.__del__ " + self.getDisplayName())
def freeContents(self):
"""
Deletes subobjects of region to help free memory held by Zinc objects earlier.
"""
del self._zincRegion
for child in self._children:
child.freeContents()
def _createBlankCopy(self):
zincRegion = self._zincRegion.createRegion()
if self._name:
zincRegion.setName(self._name)
blankRegion = NeonRegion(self._name, zincRegion, self._parent)
return blankRegion
def _assign(self, source):
"""
Replace contents of self with that of source. Fixes up Zinc parent/child region relationships.
"""
if self._parent:
oldZincRegion = self._zincRegion
zincSiblingAfter = oldZincRegion.getNextSibling()
else:
oldZincRegion = None
zincSiblingAfter = None
self.freeContents()
self._name = source._name
# self._parent = source._parent should not be changed
self._children = source._children
for child in self._children:
child._parent = self
self._modelSources = source._modelSources
self._zincRegion = source._zincRegion
# self._ancestorModelSourceCreated is unchanged
if self._parent:
self._parent._zincRegion.removeChild(oldZincRegion)
self._parent._zincRegion.insertChildBefore(self._zincRegion, zincSiblingAfter)
def _informRegionChange(self, treeChange):
"""
Called by regions when their tree structure changes or zinc regions are rebuilt.
Informs registered clients of change. Root region handle these signals for whole tree.
"""
rootRegion = self
while rootRegion._parent:
rootRegion = rootRegion._parent
for callback in rootRegion._regionChangeCallbacks:
callback(self, treeChange)
def connectRegionChange(self, callableObject):
"""
Request callbacks on region tree changes.
:param callableObject: Callable object taking a NeonRegion argument and a boolean flag which is True if tree
structure below region needs to be rebuilt.
"""
self._regionChangeCallbacks.append(callableObject)
def _loadModelSourceStreams(self, streamInfo):
self._zincRegion.beginHierarchicalChange()
result = self._zincRegion.read(streamInfo)
fieldmodule = self._zincRegion.getFieldmodule()
fieldmodule.defineAllFaces()
self._zincRegion.endHierarchicalChange()
if result != ZINC_OK:
raise NeonError("Failed to load model sources into region " + self.getPath())
def _loadModelSource(self, modelSource):
streamInfo = self._zincRegion.createStreaminformationRegion()
modelSource.addToZincStreaminformationRegion(streamInfo)
self._loadModelSourceStreams(streamInfo)
newRegionCount = self._discoverNewZincRegions()
self._informRegionChange(newRegionCount > 0)
def _loadModelSources(self):
streamInfo = self._zincRegion.createStreaminformationRegion()
for modelSource in self._modelSources:
modelSource.addToZincStreaminformationRegion(streamInfo)
self._loadModelSourceStreams(streamInfo)
def _reload(self):
"""
Must be called when already-loaded model source modified or deleted.
Saves and reloads region tree, starting at ancestor if this region was created by its model source.
"""
if self._ancestorModelSourceCreated:
self._parent._reload()
else:
# beware this breaks parent/child links such as current selection / hierarchical groups
dictSave = self.serialize()
tmpRegion = self._createBlankCopy()
tmpRegion.deserialize(dictSave)
self._assign(tmpRegion)
self._informRegionChange(True)
def _discoverNewZincRegions(self):
"""
Ensure there are Neon regions for every Zinc Region in tree
:return: Number of new descendant regions created
"""
newRegionCount = 0
zincChildRef = self._zincRegion.getFirstChild()
while zincChildRef.isValid():
childName = zincChildRef.getName()
neonChild = self._findChildByName(childName)
if not neonChild:
neonChild = NeonRegion(childName, zincChildRef, self)
neonChild._ancestorModelSourceCreated = True
self._children.append(neonChild)
newRegionCount += (1 + neonChild._discoverNewZincRegions())
zincChildRef = zincChildRef.getNextSibling()
return newRegionCount
def _findChildByName(self, name):
for child in self._children:
if child._name == name:
return child
return None
def _generateChildName(self):
count = len(self._children) + 1
while True:
name = "region" + str(count)
if not self._findChildByName(name):
return name
count += 1
return None
def deserialize(self, dictInput):
if "Model" in dictInput:
model = dictInput["Model"]
if "Sources" in model:
try:
for dictModelSource in model["Sources"]:
modelSource = deserializeNeonModelSource(dictModelSource)
if modelSource:
self._modelSources.append(modelSource)
except NeonError as neonError:
raise NeonError(neonError.getMessage() + " in region " + self.getPath())
self._loadModelSources()
if "Fieldmodule" in dictInput:
# must define fields before scene otherwise referenced fields won't exist
fieldmodule = self._zincRegion.getFieldmodule()
fieldmoduleDescription = json.dumps(dictInput["Fieldmodule"])
result = fieldmodule.readDescription(fieldmoduleDescription)
if result != ZINC_OK:
raise NeonError("Failed to read field module description into region " + self.getPath())
if "Scene" in dictInput:
scene = self._zincRegion.getScene()
sceneDescription = json.dumps(dictInput["Scene"])
result = scene.readDescription(sceneDescription, True)
if result != ZINC_OK:
raise NeonError("Failed to read scene description into region " + self.getPath())
if ("Fieldmodule" in dictInput) and isinstance(dictInput["Fieldmodule"], dict) and \
("Fields" in dictInput["Fieldmodule"]):
# clear IsManaged flags for fields so marked; do last otherwise fields in use by scene may be destroyed
fieldsDict = dictInput["Fieldmodule"]["Fields"]
for fieldDict in fieldsDict:
isManaged = fieldDict["IsManaged"]
if not isManaged:
field = fieldmodule.findFieldByName(fieldDict["Name"])
if field.isValid():
field.setManaged(False)
for currentKey in fieldDict.keys():
if currentKey.find('Field') != -1:
self._fieldTypeDict[fieldDict["Name"]] = currentKey
# following assumes no neon child regions exist, i.e. we are deserializing into a blank region
# for each neon region, ensure there is a matching zinc region in the same order, and recurse
zincChildRef = self._zincRegion.getFirstChild()
if "ChildRegions" in dictInput:
for dictChild in dictInput["ChildRegions"]:
childName = dictChild["Name"]
# see if zinc child with this name created by model source here or in ancestor region
ancestorModelSourceCreated = True
zincChild = self._zincRegion.findChildByName(childName)
if zincChildRef.isValid() and (zincChild == zincChildRef):
zincChildRef = zincChildRef.getNextSibling()
else:
if not zincChild.isValid():
zincChild = self._zincRegion.createRegion()
zincChild.setName(childName)
ancestorModelSourceCreated = False
self._zincRegion.insertChildBefore(zincChild, zincChildRef)
neonChild = NeonRegion(childName, zincChild, self)
neonChild._ancestorModelSourceCreated = ancestorModelSourceCreated
self._children.append(neonChild)
neonChild.deserialize(dictChild)
self._discoverNewZincRegions()
def serialize(self, basePath=None):
dictOutput = {}
if self._name:
dictOutput["Name"] = self._name
dictOutput["Model"] = {}
if self._modelSources:
tmpOutput = []
for modelSource in self._modelSources:
tmpOutput.append(modelSource.serialize(basePath))
dictOutput["Model"]["Sources"] = tmpOutput
if not dictOutput["Model"]:
dictOutput.pop("Model")
if self._zincRegion:
fieldmodule = self._zincRegion.getFieldmodule()
fieldmoduleDescription = fieldmodule.writeDescription()
dictOutput["Fieldmodule"] = json.loads(fieldmoduleDescription)
scene = self._zincRegion.getScene()
sceneDescription = scene.writeDescription()
dictOutput["Scene"] = json.loads(sceneDescription)
if self._children:
tmpOutput = []
for child in self._children:
tmpOutput.append(child.serialize(basePath))
dictOutput["ChildRegions"] = tmpOutput
return dictOutput
def getDisplayName(self):
if self._name:
return self._name
elif not self._parent:
return "/"
return "?"
def getName(self):
return self._name
def getPath(self):
if self._name:
return self._parent.getPath() + self._name + "/"
return "/"
def getParent(self):
return self._parent
def getZincRegion(self):
return self._zincRegion
def getChildCount(self):
return len(self._children)
def getChild(self, index):
return self._children[index]
def getFieldTypeDict(self):
return self._fieldTypeDict
def addFieldTypeToDict(self, field, fieldType):
if field and field.isValid():
self._fieldTypeDict[field.getName()] = fieldType
def replaceFieldTypeKey(self, oldName, newName):
if oldName in self._fieldTypeDict:
self._fieldTypeDict[newName] = self._fieldTypeDict.pop(oldName)
def clear(self):
"""
Clear all contents of region. Can be called for root region
"""
tmpRegion = self._createBlankCopy()
self._assign(tmpRegion)
if self._ancestorModelSourceCreated:
self._reload()
else:
self._informRegionChange(True)
def createChild(self):
"""
Create a child region with a default name
:return: The new Neon Region
"""
childName = self._generateChildName()
zincRegion = self._zincRegion.createChild(childName)
if zincRegion.isValid():
childRegion = NeonRegion(childName, zincRegion, self)
self._children.append(childRegion)
self._informRegionChange(True)
return childRegion
return None
def removeChild(self, childRegion):
"""
Remove child region and destroy
"""
self._children.remove(childRegion)
self._zincRegion.removeChild(childRegion._zincRegion)
childRegion._parent = None
childRegion.freeContents()
if childRegion._ancestorModelSourceCreated:
self._reload()
else:
self._informRegionChange(True)
def remove(self):
"""
Remove self from region tree and destroy; replace with blank region if root
"""
if self._parent:
self._parent.removeChild(self)
else:
self.clear()
def setName(self, name):
if not self._parent:
return False
if len(name) == 0:
return False
if self._ancestorModelSourceCreated:
return False
if ZINC_OK != self._zincRegion.setName(name):
return False
self._name = name
self._informRegionChange(True)
return True
def getModelSources(self):
return self._modelSources
def addModelSource(self, modelSource):
"""
Add model source, applying it if not currently editing
:param modelSource: The model source to add
"""
self._modelSources.append(modelSource)
if not modelSource.isEdit():
self.applyModelSource(modelSource)
def applyModelSource(self, modelSource):
"""
Apply model source, loading it or reloading it with all other sources as required
:param modelSource: The model source to apply
"""
modelSource.setEdit(False)
if modelSource.isLoaded():
self._reload()
else:
self._loadModelSource(modelSource)
def removeModelSource(self, modelSource):
"""
Remove model source, reloading model if it removed source had been loaded
:param modelSource: The model source to remove
"""
self._modelSources.remove(modelSource)
if modelSource.isLoaded():
self._reload()
|
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
from conary.lib import log
from conary.local import database
from conary.conaryclient import cmdline
from conary.repository import changeset, filecontainer
def listRollbacks(db, cfg):
return formatRollbacks(cfg, db.getRollbackStack().iter(), stream=sys.stdout)
def versionFormat(cfg, version, defaultLabel = None):
"""Format the version according to the options in the cfg object"""
if cfg.fullVersions:
return str(version)
if cfg.showLabels:
ret = "%s/%s" % (version.branch().label(), version.trailingRevision())
return ret
if defaultLabel and (version.branch().label() == defaultLabel):
return str(version.trailingRevision())
ret = "%s/%s" % (version.branch().label(), version.trailingRevision())
return ret
def verStr(cfg, version, flavor, defaultLabel = None):
if defaultLabel is None:
defaultLabel = cfg.installLabel
ret = versionFormat(cfg, version, defaultLabel = defaultLabel)
if cfg.fullFlavors:
return "%s[%s]" % (ret, str(flavor))
return ret
def formatRollbacks(cfg, rollbacks, stream=None):
# Formatter function
if stream is None:
stream = sys.stdout
# Display template
templ = "\t%9s: %s %s\n"
# Shortcut
w_ = stream.write
for (rollbackName, rb) in rollbacks:
w_("%s:\n" % rollbackName)
for cs in rb.iterChangeSets():
newList = []
for pkg in cs.iterNewTroveList():
newList.append((pkg.getName(),
pkg.getOldVersion(), pkg.getOldFlavor(),
pkg.getNewVersion(), pkg.getNewFlavor()))
oldList = [ x[0:3] for x in cs.getOldTroveList() ]
newList.sort()
# looks for components-of-packages and collapse those into the
# package itself (just like update does)
compByPkg = {}
for info in newList:
name = info[0]
if ':' in name:
pkg, component = name.split(':')
pkgInfo = (pkg,) + info[1:]
else:
pkgInfo = info
component = None
l = compByPkg.setdefault(pkgInfo, [])
l.append(component)
oldList.sort()
for info in newList:
(name, oldVersion, oldFlavor, newVersion, newFlavor) = info
if ':' in name:
pkgInfo = (name.split(':')[0],) + info[1:]
if None in compByPkg[pkgInfo]:
# this component was displayed with its package
continue
if info in compByPkg:
comps = [":" + x for x in compByPkg[info] if x is not None]
if comps:
name += '(%s)' % " ".join(comps)
if newVersion.onLocalLabel():
# Don't display changes to local branch
continue
if not oldVersion:
w_(templ % ('erased', name,
verStr(cfg, newVersion, newFlavor)))
else:
ov = oldVersion.trailingRevision()
nv = newVersion.trailingRevision()
if newVersion.onRollbackLabel() and ov == nv:
# Avoid displaying changes to rollback branch
continue
pn = "%s -> %s" % (verStr(cfg, newVersion, newFlavor),
verStr(cfg, oldVersion, oldFlavor,
defaultLabel =
newVersion.branch().label()))
w_(templ % ('updated', name, pn))
compByPkg = {}
for name, version, flavor in oldList:
if ':' in name:
pkg, component = name.split(':')
else:
pkg = name
component = None
l = compByPkg.setdefault((pkg, version, flavor), [])
l.append(component)
for (name, version, flavor) in oldList:
if ':' in name:
pkgInfo = (name.split(':')[0], version, flavor)
if None in compByPkg[pkgInfo]:
# this component was displayed with its package
continue
if (name, version, flavor) in compByPkg:
comps = [ ":" + x
for x in compByPkg[(name, version, flavor)]
if x is not None ]
if comps:
name += '(%s)' % " ".join(comps)
w_(templ % ('installed', name, verStr(cfg, version, flavor)))
w_('\n')
def formatRollbacksAsUpdate(cfg, rollbackList):
updateTempl = " %-7s %s %s"
templ = " %-7s %s=%s"
print 'The following actions will be performed:'
for idx, rb in enumerate(rollbackList):
print 'Job %s of %s' % (idx + 1, len(rollbackList))
newList = []
oldList = []
for cs in rb.iterChangeSets():
for pkg in cs.iterNewTroveList():
newList.append((pkg.getName(),
pkg.getOldVersion(), pkg.getOldFlavor(),
pkg.getNewVersion(), pkg.getNewFlavor()))
oldList += [ x[0:3] for x in cs.getOldTroveList() ]
newList.sort()
# looks for components-of-packages and collapse those into the
# package itself (just like update does)
compByPkg = {}
for info in newList:
name = info[0]
if ':' in name:
pkg, component = name.split(':')
pkgInfo = (pkg,) + info[1:]
else:
pkgInfo = info
component = None
l = compByPkg.setdefault(pkgInfo, [])
l.append(component)
oldList.sort()
for info in newList:
(name, oldVersion, oldFlavor, newVersion, newFlavor) = info
if ':' in name:
pkgInfo = (name.split(':')[0],) + info[1:]
if None in compByPkg[pkgInfo]:
# this component was displayed with its package
continue
if info in compByPkg:
comps = [":" + x for x in compByPkg[info] if x is not None]
if comps:
name += '(%s)' % " ".join(comps)
if newVersion.onLocalLabel():
# Don't display changes to local branch
continue
if not oldVersion:
print(templ % ('Install', name,
verStr(cfg, newVersion, newFlavor)))
else:
ov = oldVersion.trailingRevision()
nv = newVersion.trailingRevision()
if newVersion.onRollbackLabel() and ov == nv:
# Avoid displaying changes to rollback branch
continue
pn = "(%s -> %s)" % (verStr(cfg, oldVersion, newFlavor),
verStr(cfg, newVersion, oldFlavor,
defaultLabel =
newVersion.branch().label()))
print(updateTempl % ('Update', name, pn))
compByPkg = {}
for name, version, flavor in oldList:
if ':' in name:
pkg, component = name.split(':')
else:
pkg = name
component = None
l = compByPkg.setdefault((pkg, version, flavor), [])
l.append(component)
for (name, version, flavor) in oldList:
if ':' in name:
pkgInfo = (name.split(':')[0], version, flavor)
if None in compByPkg[pkgInfo]:
# this component was displayed with its package
continue
if (name, version, flavor) in compByPkg:
comps = [ ":" + x
for x in compByPkg[(name, version, flavor)]
if x is not None ]
if comps:
name += '(%s)' % " ".join(comps)
print(templ % ('Erase', name, verStr(cfg, version, flavor)))
return 0
def applyRollback(client, rollbackSpec, returnOnError = False, **kwargs):
"""
Apply a rollback.
See L{conary.conaryclient.ConaryClient.applyRollback} for a description of
the arguments for this function.
"""
client.checkWriteableRoot()
# Record the transaction counter, to make sure the state of the database
# didn't change while we were computing the rollback list.
transactionCounter = client.db.getTransactionCounter()
log.syslog.command()
showInfoOnly = kwargs.pop('showInfoOnly', False)
defaults = dict(replaceFiles = False,
transactionCounter = transactionCounter,
lazyCache = client.lzCache)
defaults.update(kwargs)
rollbackStack = client.db.getRollbackStack()
rollbackList = rollbackStack.getList()
if rollbackSpec.startswith('r.'):
try:
i = rollbackList.index(rollbackSpec)
except ValueError:
log.error("rollback '%s' not present" % rollbackSpec)
if returnOnError:
return 1
raise database.RollbackDoesNotExist(rollbackSpec)
rollbacks = rollbackList[i:]
rollbacks.reverse()
else:
try:
rollbackCount = int(rollbackSpec)
except ValueError:
log.error("integer rollback count expected instead of '%s'" %
rollbackSpec)
if returnOnError:
return 1
raise database.RollbackDoesNotExist(rollbackSpec)
if rollbackCount < 1:
log.error("rollback count must be positive")
if returnOnError:
return 1
raise database.RollbackDoesNotExist(rollbackSpec)
elif rollbackCount > len(rollbackList):
log.error("rollback count higher then number of rollbacks "
"available")
if returnOnError:
return 1
raise database.RollbackDoesNotExist(rollbackSpec)
rollbacks = rollbackList[-rollbackCount:]
rollbacks.reverse()
capsuleChangeSet = changeset.ReadOnlyChangeSet()
for path in defaults.pop('capsuleChangesets', []):
if os.path.isdir(path):
pathList = [ os.path.join(path, x) for x in os.listdir(path) ]
else:
pathList = [ path ]
for p in pathList:
if not os.path.isfile(p):
continue
try:
cs = changeset.ChangeSetFromFile(p)
except filecontainer.BadContainer:
continue
capsuleChangeSet.merge(cs)
defaults['capsuleChangeSet'] = capsuleChangeSet
#-- Show only information and return
if showInfoOnly or client.cfg.interactive:
rollbackList = [ rollbackStack.getRollback(x) for x in rollbacks if rollbackStack.hasRollback(x) ]
formatRollbacksAsUpdate(client.cfg, rollbackList)
if showInfoOnly:
return 0
#-- Interactive input (default behaviour)
if client.cfg.interactive:
okay = cmdline.askYn('continue with rollback? [y/N]', default=False)
if not okay:
return 1
try:
client.db.applyRollbackList(client.getRepos(), rollbacks, **defaults)
except database.RollbackError, e:
log.error("%s", e)
if returnOnError:
return 1
raise
log.syslog.commandComplete()
return 0
def removeRollbacks(db, rollbackSpec):
rollbackStack = db.getRollbackStack()
rollbackList = rollbackStack.getList()
if rollbackSpec.startswith('r.'):
try:
i = rollbackList.index(rollbackSpec)
except:
log.error("rollback '%s' not present" % rollbackSpec)
return 1
rollbacks = rollbackList[:i + 1]
else:
try:
rollbackCount = int(rollbackSpec)
except:
log.error("integer rollback count expected instead of '%s'" %
rollbackSpec)
return 1
if rollbackCount < 1:
log.error("rollback count must be positive")
return 1
elif rollbackCount > len(rollbackList):
log.error("rollback count higher then number of rollbacks "
"available")
return 1
rollbacks = rollbackList[:rollbackCount]
for rb in rollbacks:
rollbackStack.remove(rb)
return 0
#{ Classes used for the serialization of postrollback scripts.
class RollbackScriptsError(Exception):
"Generic class for rollback scripts exceptions"
class _RollbackScripts(object):
_KEY_JOB = 'job'
_KEY_INDEX = 'index'
_KEY_OLD_COMPAT_CLASS = 'oldCompatibilityClass'
_KEY_NEW_COMPAT_CLASS = 'newCompatibilityClass'
_KEYS = set([_KEY_JOB, _KEY_INDEX, _KEY_OLD_COMPAT_CLASS,
_KEY_NEW_COMPAT_CLASS])
_metaFileNameTemplate = 'post-scripts.meta'
_scriptFileNameTemplate = 'post-script.%d'
def __init__(self):
# Each item is a tuple (job, script, oldCompatClass, newCompatClass)
self._items = []
def add(self, job, script, oldCompatClass, newCompatClass, index=None):
if index is None:
index = len(self._items)
self._items.append((index, job, script, oldCompatClass, newCompatClass))
return self
def __iter__(self):
return iter(self._items)
def getCreatedFiles(self, dir):
"Returns the files that will be created on save"
ret = set()
ret.add(self._getMDFileName(dir))
for idx, job, script, oldCompatClass, newCompatClass in self:
fname = self._getScriptFileName(dir, idx)
ret.add(fname)
return ret
def save(self, dir):
# Save metadata
stream = self._openFile(self._getMDFileName(dir))
self.saveMeta(stream)
stream.close()
for idx, job, script, oldCompatClass, newCompatClass in self:
# Save individual scripts
fname = self._getScriptFileName(dir, idx)
self._openFile(fname).write(script)
def saveMeta(self, stream):
for idx, job, script, oldCompatClass, newCompatClass in self:
if idx > 0:
# Add the double-newline as a group separator
stream.write('\n')
lines = self._serializeMeta(idx, job, oldCompatClass,
newCompatClass)
for line in lines:
stream.write(line)
stream.write('\n')
@classmethod
def load(cls, dir):
ret = cls()
group = []
try:
stream = file(cls._getMDFileName(dir))
except IOError, e:
raise RollbackScriptsError("Open error: %s: %s: %s" %
(e.errno, e.filename, e.strerror))
while 1:
line = stream.readline()
sline = line.strip()
if not sline:
# Empty line (either from a double-newline or from EOF)
if group:
cls._finalize(dir, group, ret)
if line:
# Double-newline
continue
# EOF
break
group.append(sline)
return ret
@classmethod
def _finalize(cls, dir, group, rbs):
idx, g = cls._parseMeta(group)
del group[:]
if g is not None:
try:
scfile = file(cls._getScriptFileName(dir, idx))
except IOError:
# If a script is missing, oh well...
return
rbs.add(g[0], scfile.read(), g[1], g[2], index=idx)
@classmethod
def _serializeVF(cls, version, flavor):
if version is None:
return ''
if flavor is None or not str(flavor):
return str(version)
return "%s[%s]" % (version, flavor)
@classmethod
def _serializeJob(cls, job):
return "%s=%s--%s" % (job[0],
cls._serializeVF(*job[1]),
cls._serializeVF(*job[2]))
@classmethod
def _serializeMeta(cls, idx, job, oldCompatClass, newCompatClass):
lines = []
lines.append('%s: %d' % (cls._KEY_INDEX, idx))
lines.append('%s: %s' % (cls._KEY_JOB, cls._serializeJob(job)))
lines.append('%s: %s' % (cls._KEY_OLD_COMPAT_CLASS, oldCompatClass))
lines.append('%s: %s' % (cls._KEY_NEW_COMPAT_CLASS, newCompatClass))
return lines
@classmethod
def _parseMeta(cls, lines):
ret = {}
for line in lines:
arr = line.split(': ', 1)
if len(arr) != 2:
continue
if arr[0] not in cls._KEYS:
continue
ret[arr[0]] = arr[1]
if cls._KEYS.difference(ret.keys()):
# Missing key
return None
job = cmdline.parseChangeList([ret[cls._KEY_JOB]])[0]
oldCompatClass = cls._toInt(ret[cls._KEY_OLD_COMPAT_CLASS])
newCompatClass = cls._toInt(ret[cls._KEY_NEW_COMPAT_CLASS])
try:
idx = int(ret[cls._KEY_INDEX])
except ValueError:
return None
return idx, (job, oldCompatClass, newCompatClass)
@classmethod
def _toInt(cls, value):
if value == 'None':
return None
try:
return int(value)
except ValueError:
return None
@classmethod
def _openFile(cls, fileName):
flags = os.O_WRONLY | os.O_CREAT
try:
fd = os.open(fileName, flags, 0600)
except OSError, e:
raise RollbackScriptsError("Open error: %s: %s: %s" %
(e.errno, e.filename, e.strerror))
return os.fdopen(fd, "w")
@classmethod
def _getMDFileName(cls, dir):
return os.path.join(dir, cls._metaFileNameTemplate)
@classmethod
def _getScriptFileName(cls, dir, idx):
return os.path.join(dir, cls._scriptFileNameTemplate % idx)
#}
|
|
import logging
from functools import wraps
from io import FileIO
from os import path
from urlparse import parse_qs, urlparse
from iso8601 import parse_date
from munch import munchify
from restkit import BasicAuth, Resource, request
from restkit.errors import ResourceNotFound
from retrying import retry
from simplejson import dumps, loads
from .exceptions import InvalidResponse, NoToken
logger = logging.getLogger(__name__)
IGNORE_PARAMS = ('uri', 'path')
def verify_file(fn):
@wraps(fn)
def wrapper(self, file_, *args, **kwargs):
if isinstance(file_, basestring):
# Using FileIO here instead of open()
# to be able to override the filename
# which is later used when uploading the file.
#
# Explanation:
#
# 1) Restkit reads the filename
# from "name" attribute of a file-like object,
# there is no other way to specify a filename;
#
# 2) The attribute may contain the full path to file,
# which does not work well as a filename;
#
# 3) The attribute is readonly when using open(),
# unlike FileIO object.
file_ = FileIO(file_, 'rb')
file_.name = path.basename(file_.name)
if hasattr(file_, 'read'):
# A file-like object must have 'read' method
return fn(self, file_, *args, **kwargs)
else:
raise TypeError('Expected either a string '
'containing a path to file or a '
'file-like object, got {}'.format(type(file_)))
return wrapper
class APIBaseClient(Resource):
"""base class for API"""
def __init__(self, key,
host_url,
api_version,
resource,
params=None,
**kwargs):
super(APIBaseClient, self).__init__(
host_url,
filters=[BasicAuth(key, "")],
**kwargs
)
self.prefix_path = '/api/{}/{}'.format(api_version, resource)
if not isinstance(params, dict):
params = {"mode": "_all_"}
self.params = params
self.headers = {"Content-Type": "application/json"}
# To perform some operations (e.g. create a tender)
# we first need to obtain a cookie. For that reason,
# here we send a HEAD request to a neutral URL.
self.head('/api/{}/spore'.format(api_version))
def request(self, method, path=None, payload=None, headers=None,
params_dict=None, **params):
_headers = dict(self.headers)
_headers.update(headers or {})
try:
response = super(APIBaseClient, self).request(
method, path=path, payload=payload, headers=_headers,
params_dict=params_dict, **params
)
if 'Set-Cookie' in response.headers:
self.headers['Cookie'] = response.headers['Set-Cookie']
return response
except ResourceNotFound as e:
if 'Set-Cookie' in e.response.headers:
self.headers['Cookie'] = e.response.headers['Set-Cookie']
raise e
def patch(self, path=None, payload=None, headers=None,
params_dict=None, **params):
""" HTTP PATCH
- payload: string passed to the body of the request
- path: string additionnal path to the uri
- headers: dict, optionnal headers that will
be added to HTTP request.
- params: Optionnal parameterss added to the request
"""
return self.request("PATCH", path=path, payload=payload,
headers=headers, params_dict=params_dict, **params)
def delete(self, path=None, headers=None):
""" HTTP DELETE
- path: string additionnal path to the uri
- headers: dict, optionnal headers that will
be added to HTTP request.
- params: Optionnal parameterss added to the request
"""
return self.request("DELETE", path=path, headers=headers)
def _update_params(self, params):
for key in params:
if key not in IGNORE_PARAMS:
self.params[key] = params[key]
def _create_resource_item(self, url, payload, headers={}):
headers.update(self.headers)
response_item = self.post(
url, headers=headers, payload=dumps(payload)
)
if response_item.status_int == 201:
return munchify(loads(response_item.body_string()))
raise InvalidResponse
def _get_resource_item(self, url, headers={}):
headers.update(self.headers)
response_item = self.get(url, headers=headers)
if response_item.status_int == 200:
return munchify(loads(response_item.body_string()))
raise InvalidResponse
def _patch_resource_item(self, url, payload, headers={}):
headers.update(self.headers)
response_item = self.patch(
url, headers=headers, payload=dumps(payload)
)
if response_item.status_int == 200:
return munchify(loads(response_item.body_string()))
raise InvalidResponse
def _upload_resource_file(self, url, data, headers={}, method='post'):
file_headers = {}
file_headers.update(self.headers)
file_headers.update(headers)
file_headers['Content-Type'] = "multipart/form-data"
response_item = getattr(self, method)(
url, headers=file_headers, payload=data
)
if response_item.status_int in (201, 200):
return munchify(loads(response_item.body_string()))
raise InvalidResponse
def _delete_resource_item(self, url, headers={}):
response_item = self.delete(url, headers=headers)
if response_item.status_int == 200:
return munchify(loads(response_item.body_string()))
raise InvalidResponse
class TendersClient(APIBaseClient):
"""client for tenders"""
def __init__(self, key,
host_url="https://api-sandbox.openprocurement.org",
api_version='2.0',
params=None,
resource='tenders'):
super(TendersClient, self).__init__(key, host_url, api_version, resource, params)
###########################################################################
# GET ITEMS LIST API METHODS
###########################################################################
@retry(stop_max_attempt_number=5)
def get_tenders(self, params={}, feed='changes'):
params['feed'] = feed
try:
self._update_params(params)
response = self.get(
self.prefix_path,
params_dict=self.params)
if response.status_int == 200:
tender_list = munchify(loads(response.body_string()))
self._update_params(tender_list.next_page)
return tender_list.data
except ResourceNotFound:
del self.params['offset']
raise
raise InvalidResponse
def get_latest_tenders(self, date, tender_id):
iso_dt = parse_date(date)
dt = iso_dt.strftime("%Y-%m-%d")
tm = iso_dt.strftime("%H:%M:%S")
response = self._get_resource_item(
'{}?offset={}T{}&opt_fields=tender_id&mode=test'.format(
self.prefix_path,
dt,
tm
)
)
if response.status_int == 200:
tender_list = munchify(loads(response.body_string()))
self._update_params(tender_list.next_page)
return tender_list.data
raise InvalidResponse
def _get_tender_resource_list(self, tender, items_name):
return self._get_resource_item(
'{}/{}/{}'.format(self.prefix_path, tender.data.id, items_name),
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
def get_questions(self, tender, params={}):
return self._get_tender_resource_list(tender, "questions")
def get_documents(self, tender, params={}):
return self._get_tender_resource_list(tender, "documents")
def get_awards_documents(self, tender, award_id, params={}):
return self._get_resource_item(
'{}/{}/awards/{}/documents'.format(self.prefix_path, tender.data.id, award_id),
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
def get_qualification_documents(self, tender, qualification_id, params={}):
return self._get_resource_item(
'{}/{}/qualifications/{}/documents'.format(self.prefix_path, tender.data.id, qualification_id),
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
def get_awards(self, tender, params={}):
return self._get_tender_resource_list(tender, "awards")
def get_lots(self, tender, params={}):
return self._get_tender_resource_list(tender, "lots")
###########################################################################
# CREATE ITEM API METHODS
###########################################################################
def _create_tender_resource_item(self, tender, item_obj, items_name):
return self._create_resource_item(
'{}/{}/{}'.format(self.prefix_path, tender.data.id, items_name),
item_obj,
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
def create_tender(self, tender):
return self._create_resource_item(self.prefix_path, tender)
def create_question(self, tender, question):
return self._create_tender_resource_item(tender, question, "questions")
def create_bid(self, tender, bid):
return self._create_tender_resource_item(tender, bid, "bids")
def create_lot(self, tender, lot):
return self._create_tender_resource_item(tender, lot, "lots")
def create_award(self, tender, award):
return self._create_tender_resource_item(tender, award, "awards")
def create_cancellation(self, tender, cancellation):
return self._create_tender_resource_item(tender, cancellation, "cancellations")
def create_complaint(self, tender, complaint):
return self._create_tender_resource_item(tender, complaint, "complaints")
def create_award_complaint(self, tender, complaint, award_id):
return self._create_resource_item(
'{}/{}/{}'.format(self.prefix_path, tender.data.id, "awards/{0}/complaints".format(award_id)),
complaint,
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
def create_thin_document(self, tender, document_data):
return self._create_resource_item(
'{}/{}/documents'.format(
self.prefix_path,
tender.data.id
),
document_data,
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
###########################################################################
# GET ITEM API METHODS
###########################################################################
def get_tender(self, id):
return self._get_resource_item('{}/{}'.format(self.prefix_path, id))
def _get_tender_resource_item(self, tender, item_id, items_name,
access_token=""):
if access_token:
headers = {'X-Access-Token': access_token}
else:
headers = {'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
return self._get_resource_item(
'{}/{}/{}/{}'.format(self.prefix_path,
tender.data.id,
items_name,
item_id),
headers=headers
)
def get_question(self, tender, question_id):
return self._get_tender_resource_item(tender, question_id, "questions")
def get_bid(self, tender, bid_id, access_token):
return self._get_tender_resource_item(tender, bid_id, "bids",
access_token)
def get_lot(self, tender, lot_id):
return self._get_tender_resource_item(tender, lot_id, "lots")
def get_file(self, tender, url, access_token=None):
parsed_url = urlparse(url)
headers = {}
if access_token:
headers = {'X-Access-Token': access_token}
headers.update(self.headers)
response_item = self.get(parsed_url.path,
headers=headers,
params_dict=parse_qs(parsed_url.query))
if response_item.status_int == 302:
response_obj = request(response_item.headers['location'])
if response_obj.status_int == 200:
return response_obj.body_string(), \
response_obj.headers['Content-Disposition'] \
.split("; filename=")[1].strip('"')
raise InvalidResponse
def extract_credentials(self, id):
return self._get_resource_item('{}/{}/extract_credentials'.format(self.prefix_path, id))
###########################################################################
# PATCH ITEM API METHODS
###########################################################################
def _patch_tender_resource_item(self, tender, item_obj, items_name):
return self._patch_resource_item(
'{}/{}/{}/{}'.format(
self.prefix_path, tender.data.id, items_name, item_obj['data']['id']
),
payload=item_obj,
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
def patch_tender(self, tender):
return self._patch_resource_item(
'{}/{}'.format(self.prefix_path, tender["data"]["id"]),
payload=tender,
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
def patch_question(self, tender, question):
return self._patch_tender_resource_item(tender, question, "questions")
def patch_bid(self, tender, bid):
return self._patch_tender_resource_item(tender, bid, "bids")
def patch_bid_document(self, tender, document_data, bid_id, document_id):
return self._patch_resource_item(
'{}/{}/{}/{}/documents/{}'.format(
self.prefix_path, tender.data.id, "bids", bid_id, document_id
),
payload=document_data,
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
def patch_award(self, tender, award):
return self._patch_tender_resource_item(tender, award, "awards")
def patch_award_document(self, tender, document_data, award_id, document_id):
return self._patch_resource_item(
'{}/{}/{}/{}/documents/{}'.format(
self.prefix_path, tender.data.id, "awards", award_id, document_id
),
payload=document_data,
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
def patch_cancellation(self, tender, cancellation):
return self._patch_tender_resource_item(tender, cancellation, "cancellations")
def patch_cancellation_document(self, tender, cancellation, cancellation_id, cancellation_doc_id):
return self._patch_resource_item(
'{}/{}/{}/{}/documents/{}'.format(
self.prefix_path, tender.data.id, "cancellations", cancellation_id, cancellation_doc_id
),
payload=cancellation,
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
def patch_complaint(self, tender, complaint):
return self._patch_tender_resource_item(tender, complaint, "complaints")
def patch_award_complaint(self, tender, complaint, award_id):
return self._patch_resource_item(
'{}/{}/awards/{}/complaints/{}'.format(
self.prefix_path, tender.data.id, award_id, complaint.data.id
),
payload=complaint,
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
def patch_lot(self, tender, lot):
return self._patch_tender_resource_item(tender, lot, "lots")
def patch_document(self, tender, document):
return self._patch_tender_resource_item(tender, document, "documents")
def patch_qualification(self, tender, qualification):
return self._patch_tender_resource_item(tender, qualification, "qualifications")
def patch_contract(self, tender, contract):
return self._patch_tender_resource_item(tender, contract, "contracts")
def patch_contract_document(self, tender, document_data, contract_id, document_id):
return self._patch_resource_item(
'{}/{}/{}/{}/documents/{}'.format(
self.prefix_path, tender.data.id, "contracts", contract_id, document_id
),
payload=document_data,
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
def patch_credentials(self, id, access_token):
return self._patch_resource_item('{}/{}/credentials'.format(self.prefix_path, id),
payload={},
headers={'X-Access-Token': access_token})
###########################################################################
# UPLOAD FILE API METHODS
###########################################################################
@verify_file
def upload_document(self, file_, tender):
return self._upload_resource_file(
'{}/{}/documents'.format(
self.prefix_path,
tender.data.id
),
data={"file": file_},
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
@verify_file
def upload_bid_document(self, file_, tender, bid_id, doc_type="documents"):
return self._upload_resource_file(
'{}/{}/bids/{}/{}'.format(
self.prefix_path,
tender.data.id,
bid_id,
doc_type
),
data={"file": file_},
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
@verify_file
def update_bid_document(self, file_, tender, bid_id, document_id, doc_type="documents"):
return self._upload_resource_file(
'{}/{}/bids/{}/{}/{}'.format(
self.prefix_path,
tender.data.id,
bid_id,
doc_type,
document_id
),
data={"file": file_},
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')},
method='put'
)
@verify_file
def upload_cancellation_document(self, file_, tender, cancellation_id):
return self._upload_resource_file(
'{}/{}/cancellations/{}/documents'.format(
self.prefix_path,
tender.data.id,
cancellation_id
),
data={"file": file_},
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
@verify_file
def update_cancellation_document(self, file_, tender, cancellation_id, document_id):
return self._upload_resource_file(
'{}/{}/cancellations/{}/documents/{}'.format(
self.prefix_path,
tender.data.id,
cancellation_id,
document_id
),
data={"file": file_},
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')},
method='put'
)
@verify_file
def upload_complaint_document(self, file_, tender, complaint_id):
return self._upload_resource_file(
'{}/{}/complaints/{}/documents'.format(
self.prefix_path,
tender.data.id,
complaint_id),
data={"file": file_},
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
@verify_file
def upload_award_complaint_document(self, file_, tender, award_id, complaint_id):
return self._upload_resource_file(
'{}/{}/awards/{}/complaints/{}/documents'.format(
self.prefix_path,
tender.data.id,
award_id,
complaint_id),
data={"file": file_},
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
@verify_file
def upload_qualification_document(self, file_, tender, qualification_id):
return self._upload_resource_file(
'{}/{}/qualifications/{}/documents'.format(
self.prefix_path,
tender.data.id,
qualification_id
),
data={"file": file_},
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
@verify_file
def upload_award_document(self, file_, tender, award_id, doc_type="documents"):
return self._upload_resource_file(
'{}/{}/awards/{}/{}'.format(
self.prefix_path,
tender.data.id,
award_id,
doc_type
),
data={"file": file_},
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
@verify_file
def upload_contract_document(self, file_, tender, contract_id, doc_type="documents"):
return self._upload_resource_file(
'{}/{}/contracts/{}/documents'.format(
self.prefix_path,
tender.data.id,
contract_id,
doc_type
),
data={"file": file_},
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
###########################################################################
# DELETE ITEMS LIST API METHODS
###########################################################################
def delete_bid(self, tender, bid, access_token=None):
logger.info("delete_lot is deprecated. In next update this function will takes bid_id and access_token instead bid.")
if isinstance(bid, basestring):
bid_id = bid
access_token = access_token
else:
bid_id = bid.data.id
access_token = getattr(getattr(bid, 'access', ''), 'token', '')
return self._delete_resource_item(
'{}/{}/bids/{}'.format(
self.prefix_path,
tender.data.id,
bid_id
),
headers={'X-Access-Token': access_token}
)
def delete_lot(self, tender, lot):
logger.info("delete_lot is deprecated. In next update this function will takes lot_id instead lot.")
if isinstance(lot, basestring):
lot_id = lot
else:
lot_id = lot.data.id
return self._delete_resource_item(
'{}/{}/lots/{}'.format(
self.prefix_path,
tender.data.id,
lot_id
),
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
###########################################################################
class Client(TendersClient):
"""client for tenders for backward compatibility"""
class TendersClientSync(TendersClient):
def sync_tenders(self, params={}, extra_headers={}):
params['feed'] = 'changes'
self.headers.update(extra_headers)
response = self.get(self.prefix_path, params_dict=params)
if response.status_int == 200:
tender_list = munchify(loads(response.body_string()))
return tender_list
@retry(stop_max_attempt_number=5)
def get_tender(self, id, extra_headers={}):
self.headers.update(extra_headers)
return super(TendersClientSync, self).get_tender(id)
class EDRClient(Resource):
""" Client for validate members by EDR """
def __init__(self, host_url, api_version, username, password, **kwargs):
prefix_path = '{}/api/{}'.format(host_url, api_version)
super(EDRClient, self).__init__(prefix_path,
filters=[BasicAuth(username, password)],
**kwargs)
self.headers = {"Content-Type": "application/json"}
def request(self, method, path=None, payload=None, headers=None,
params_dict=None, **params):
_headers = dict(self.headers)
_headers.update(headers or {})
try:
response = super(EDRClient, self).request(
method, path=path, payload=payload, headers=_headers,
params_dict=params_dict, **params
)
if 'Set-Cookie' in response.headers:
self.headers['Cookie'] = response.headers['Set-Cookie']
return response
except ResourceNotFound as e:
if 'Set-Cookie' in e.response.headers:
self.headers['Cookie'] = e.response.headers['Set-Cookie']
raise e
def verify_member(self, edrpou, headers=None):
response = self.request("GET", "/verify",
params_dict={'id': edrpou},
headers=headers)
if response.status_int == 200:
return munchify(loads(response.body_string()))
raise InvalidResponse
|
|
# Copyright 2012 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a list of all the functions that are not auto-generated.
# It contains all the meta data that describes the function.
templated_type_symbol_map = {
'bool' : 'b',
'int8_t' : 'a',
'int16_t' : 's',
'int32_t' : 'i',
'int64_t' : 'l',
'float' : 'f',
'double' : 'd',
'string' : 'NS_11StringValueE',
'timestamp' : 'NS_14TimestampValueE'
}
# Generates the BE symbol for the Compute Function class_name::fn_name<templated_type>.
# Does not handle varargs.
# TODO: this is a stopgap. ComputeFunctions are being removed and we can use the
# symbol lookup code in the BE.
def symbol(class_name, fn_name, templated_type = None):
sym = '_ZN6impala'
sym += str(len(class_name)) + class_name
sym += str(len(fn_name)) + fn_name
if templated_type == None:
sym += 'EPNS_4ExprEPNS_8TupleRowE'
else:
sym += 'I'
sym += templated_type_symbol_map[templated_type]
sym += 'EEPvPNS_4ExprEPNS_8TupleRowE'
return sym
# The format is:
# [sql aliases], <return_type>, [<args>], <backend symbol>,
# With an optional
# <prepare symbol>, <close symbol>
#
# 'sql aliases' are the function names that can be used from sql. There must be at least
# one per function.
#
# The symbol can be empty for functions that are not yet implemented or are special-cased
# in Expr::CreateExpr() (i.e., functions that are implemented via a custom Expr class
# rather than a single function).
visible_functions = [
[['udf_pi'], 'DOUBLE', [], 'impala::UdfBuiltins::Pi'],
[['udf_abs'], 'DOUBLE', ['DOUBLE'], 'impala::UdfBuiltins::Abs'],
[['udf_lower'], 'STRING', ['STRING'], 'impala::UdfBuiltins::Lower'],
[['max_int'], 'INT', [],
'_ZN6impala11UdfBuiltins6MaxIntEPN10impala_udf15FunctionContextE'],
[['max_tinyint'], 'TINYINT', [],
'_ZN6impala11UdfBuiltins10MaxTinyIntEPN10impala_udf15FunctionContextE'],
[['max_smallint'], 'SMALLINT', [],
'_ZN6impala11UdfBuiltins11MaxSmallIntEPN10impala_udf15FunctionContextE'],
[['max_bigint'], 'BIGINT', [],
'_ZN6impala11UdfBuiltins9MaxBigIntEPN10impala_udf15FunctionContextE'],
[['min_int'], 'INT', [],
'_ZN6impala11UdfBuiltins6MinIntEPN10impala_udf15FunctionContextE'],
[['min_tinyint'], 'TINYINT', [],
'_ZN6impala11UdfBuiltins10MinTinyIntEPN10impala_udf15FunctionContextE'],
[['min_smallint'], 'SMALLINT', [],
'_ZN6impala11UdfBuiltins11MinSmallIntEPN10impala_udf15FunctionContextE'],
[['min_bigint'], 'BIGINT', [],
'_ZN6impala11UdfBuiltins9MinBigIntEPN10impala_udf15FunctionContextE'],
[['is_nan'], 'BOOLEAN', ['DOUBLE'],
'_ZN6impala11UdfBuiltins5IsNanEPN10impala_udf15FunctionContextERKNS1_9DoubleValE'],
[['is_inf'], 'BOOLEAN', ['DOUBLE'],
'_ZN6impala11UdfBuiltins5IsInfEPN10impala_udf15FunctionContextERKNS1_9DoubleValE'],
[['trunc'], 'TIMESTAMP', ['TIMESTAMP', 'STRING'],
'_ZN6impala11UdfBuiltins5TruncEPN10impala_udf15FunctionContextERKNS1_12TimestampValERKNS1_9StringValE',
'_ZN6impala11UdfBuiltins12TruncPrepareEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE',
'_ZN6impala11UdfBuiltins10TruncCloseEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE'],
# Don't add an entry for EXTRACT(STRING, TIMESTAMP). STRINGs may be used to represent
# TIMESTAMPs meaning EXTRACT(STRING, STRING) is valid. If EXTRACT(STRING, TIMESTAMP)
# is added, it takes precedence over the existing EXTRACT(TIMESTAMP, STRING)
# which could break users.
[['extract'], 'INT', ['TIMESTAMP', 'STRING'],
'_ZN6impala11UdfBuiltins7ExtractEPN10impala_udf15FunctionContextERKNS1_12TimestampValERKNS1_9StringValE',
'_ZN6impala11UdfBuiltins21SwappedExtractPrepareEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE',
'_ZN6impala11UdfBuiltins12ExtractCloseEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE'],
[['date_part'], 'INT', ['STRING', 'TIMESTAMP'],
'_ZN6impala11UdfBuiltins7ExtractEPN10impala_udf15FunctionContextERKNS1_9StringValERKNS1_12TimestampValE',
'_ZN6impala11UdfBuiltins14ExtractPrepareEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE',
'_ZN6impala11UdfBuiltins12ExtractCloseEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE'],
[['madlib_encode_vector'], 'STRING', ['STRING'],
'_ZN6impala11UdfBuiltins12EncodeVectorEPN10impala_udf15FunctionContextERKNS1_9StringValE'],
[['madlib_decode_vector'], 'STRING', ['STRING'],
'_ZN6impala11UdfBuiltins12DecodeVectorEPN10impala_udf15FunctionContextERKNS1_9StringValE'],
[['madlib_print_vector'], 'STRING', ['STRING'],
'_ZN6impala11UdfBuiltins11PrintVectorEPN10impala_udf15FunctionContextERKNS1_9StringValE'],
[['madlib_vector'], 'STRING', ['DOUBLE', '...'],
'_ZN6impala11UdfBuiltins8ToVectorEPN10impala_udf15FunctionContextEiPKNS1_9DoubleValE'],
[['madlib_vector_get'], 'DOUBLE', ['BIGINT', 'STRING'],
'_ZN6impala11UdfBuiltins9VectorGetEPN10impala_udf15FunctionContextERKNS1_9BigIntValERKNS1_9StringValE'],
# Timestamp functions
[['unix_timestamp'], 'BIGINT', ['STRING'], '_ZN6impala18TimestampFunctions14UnixFromStringEPN10impala_udf15FunctionContextERKNS1_9StringValE'],
[['year'], 'INT', ['TIMESTAMP'], '_ZN6impala18TimestampFunctions4YearEPN10impala_udf15FunctionContextERKNS1_12TimestampValE'],
[['month'], 'INT', ['TIMESTAMP'], '_ZN6impala18TimestampFunctions5MonthEPN10impala_udf15FunctionContextERKNS1_12TimestampValE'],
[['dayofweek'], 'INT', ['TIMESTAMP'], '_ZN6impala18TimestampFunctions9DayOfWeekEPN10impala_udf15FunctionContextERKNS1_12TimestampValE'],
[['day', 'dayofmonth'], 'INT', ['TIMESTAMP'], '_ZN6impala18TimestampFunctions10DayOfMonthEPN10impala_udf15FunctionContextERKNS1_12TimestampValE'],
[['dayofyear'], 'INT', ['TIMESTAMP'], '_ZN6impala18TimestampFunctions9DayOfYearEPN10impala_udf15FunctionContextERKNS1_12TimestampValE'],
[['weekofyear'], 'INT', ['TIMESTAMP'], '_ZN6impala18TimestampFunctions10WeekOfYearEPN10impala_udf15FunctionContextERKNS1_12TimestampValE'],
[['hour'], 'INT', ['TIMESTAMP'], '_ZN6impala18TimestampFunctions4HourEPN10impala_udf15FunctionContextERKNS1_12TimestampValE'],
[['minute'], 'INT', ['TIMESTAMP'], '_ZN6impala18TimestampFunctions6MinuteEPN10impala_udf15FunctionContextERKNS1_12TimestampValE'],
[['second'], 'INT', ['TIMESTAMP'], '_ZN6impala18TimestampFunctions6SecondEPN10impala_udf15FunctionContextERKNS1_12TimestampValE'],
[['to_date'], 'STRING', ['TIMESTAMP'], '_ZN6impala18TimestampFunctions6ToDateEPN10impala_udf15FunctionContextERKNS1_12TimestampValE'],
[['dayname'], 'STRING', ['TIMESTAMP'], '_ZN6impala18TimestampFunctions7DayNameEPN10impala_udf15FunctionContextERKNS1_12TimestampValE'],
[['years_add'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf6IntValEN5boost9date_time14years_durationINS4_9gregorian21greg_durations_configEEELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['years_add'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf9BigIntValEN5boost9date_time14years_durationINS4_9gregorian21greg_durations_configEEELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['years_sub'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf6IntValEN5boost9date_time14years_durationINS4_9gregorian21greg_durations_configEEELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['years_sub'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf9BigIntValEN5boost9date_time14years_durationINS4_9gregorian21greg_durations_configEEELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['months_add', 'add_months'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf6IntValEN5boost9date_time15months_durationINS4_9gregorian21greg_durations_configEEELb1EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['months_add', 'add_months'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf9BigIntValEN5boost9date_time15months_durationINS4_9gregorian21greg_durations_configEEELb1EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['months_sub'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf6IntValEN5boost9date_time15months_durationINS4_9gregorian21greg_durations_configEEELb1EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['months_sub'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf9BigIntValEN5boost9date_time15months_durationINS4_9gregorian21greg_durations_configEEELb1EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['weeks_add'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf6IntValEN5boost9gregorian14weeks_durationELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKS7_RKT0_'],
[['weeks_add'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf9BigIntValEN5boost9gregorian14weeks_durationELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKS7_RKT0_'],
[['weeks_sub'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf6IntValEN5boost9gregorian14weeks_durationELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKS7_RKT0_'],
[['weeks_sub'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf9BigIntValEN5boost9gregorian14weeks_durationELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKS7_RKT0_'],
[['days_add', 'date_add', 'adddate'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf6IntValEN5boost9gregorian13date_durationELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKS7_RKT0_'],
[['days_add', 'date_add', 'adddate'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf9BigIntValEN5boost9gregorian13date_durationELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKS7_RKT0_'],
[['days_sub', 'date_sub', 'subdate'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf6IntValEN5boost9gregorian13date_durationELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKS7_RKT0_'],
[['days_sub', 'date_sub', 'subdate'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf9BigIntValEN5boost9gregorian13date_durationELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKS7_RKT0_'],
[['hours_add'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf6IntValEN5boost10posix_time5hoursELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKS7_RKT0_'],
[['hours_add'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf9BigIntValEN5boost10posix_time5hoursELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKS7_RKT0_'],
[['hours_sub'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf6IntValEN5boost10posix_time5hoursELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKS7_RKT0_'],
[['hours_sub'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf9BigIntValEN5boost10posix_time5hoursELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKS7_RKT0_'],
[['minutes_add'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf6IntValEN5boost10posix_time7minutesELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKS7_RKT0_'],
[['minutes_add'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf9BigIntValEN5boost10posix_time7minutesELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKS7_RKT0_'],
[['minutes_sub'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf6IntValEN5boost10posix_time7minutesELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKS7_RKT0_'],
[['minutes_sub'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf9BigIntValEN5boost10posix_time7minutesELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKS7_RKT0_'],
[['seconds_add'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf6IntValEN5boost10posix_time7secondsELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKS7_RKT0_'],
[['seconds_add'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf9BigIntValEN5boost10posix_time7secondsELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKS7_RKT0_'],
[['seconds_sub'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf6IntValEN5boost10posix_time7secondsELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKS7_RKT0_'],
[['seconds_sub'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf9BigIntValEN5boost10posix_time7secondsELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKS7_RKT0_'],
[['milliseconds_add'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf6IntValEN5boost9date_time18subsecond_durationINS4_10posix_time13time_durationELl1000EEELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['milliseconds_add'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf9BigIntValEN5boost9date_time18subsecond_durationINS4_10posix_time13time_durationELl1000EEELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['milliseconds_sub'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf6IntValEN5boost9date_time18subsecond_durationINS4_10posix_time13time_durationELl1000EEELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['milliseconds_sub'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf9BigIntValEN5boost9date_time18subsecond_durationINS4_10posix_time13time_durationELl1000EEELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['microseconds_add'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf6IntValEN5boost9date_time18subsecond_durationINS4_10posix_time13time_durationELl1000000EEELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['microseconds_add'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf9BigIntValEN5boost9date_time18subsecond_durationINS4_10posix_time13time_durationELl1000000EEELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['microseconds_sub'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf6IntValEN5boost9date_time18subsecond_durationINS4_10posix_time13time_durationELl1000000EEELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['microseconds_sub'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf9BigIntValEN5boost9date_time18subsecond_durationINS4_10posix_time13time_durationELl1000000EEELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['nanoseconds_add'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf6IntValEN5boost9date_time18subsecond_durationINS4_10posix_time13time_durationELl1000000000EEELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['nanoseconds_add'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf9BigIntValEN5boost9date_time18subsecond_durationINS4_10posix_time13time_durationELl1000000000EEELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['nanoseconds_sub'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf6IntValEN5boost9date_time18subsecond_durationINS4_10posix_time13time_durationELl1000000000EEELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['nanoseconds_sub'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf9BigIntValEN5boost9date_time18subsecond_durationINS4_10posix_time13time_durationELl1000000000EEELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['datediff'], 'INT', ['TIMESTAMP', 'TIMESTAMP'], '_ZN6impala18TimestampFunctions8DateDiffEPN10impala_udf15FunctionContextERKNS1_12TimestampValES6_'],
[['unix_timestamp'], 'BIGINT', [], '_ZN6impala18TimestampFunctions4UnixEPN10impala_udf15FunctionContextE'],
[['unix_timestamp'], 'BIGINT', ['TIMESTAMP'], '_ZN6impala18TimestampFunctions4UnixEPN10impala_udf15FunctionContextERKNS1_12TimestampValE'],
[['unix_timestamp'], 'BIGINT', ['STRING', 'STRING'], '_ZN6impala18TimestampFunctions4UnixEPN10impala_udf15FunctionContextERKNS1_9StringValES6_',
'_ZN6impala18TimestampFunctions22UnixAndFromUnixPrepareEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE',
'_ZN6impala18TimestampFunctions20UnixAndFromUnixCloseEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE'],
[['from_unixtime'], 'STRING', ['INT'],
'_ZN6impala18TimestampFunctions8FromUnixIN10impala_udf6IntValEEENS2_9StringValEPNS2_15FunctionContextERKT_'],
[['from_unixtime'], 'STRING', ['INT', 'STRING'],
'_ZN6impala18TimestampFunctions8FromUnixIN10impala_udf6IntValEEENS2_9StringValEPNS2_15FunctionContextERKT_RKS4_',
'_ZN6impala18TimestampFunctions22UnixAndFromUnixPrepareEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE',
'_ZN6impala18TimestampFunctions20UnixAndFromUnixCloseEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE'],
[['from_unixtime'], 'STRING', ['BIGINT'],
'_ZN6impala18TimestampFunctions8FromUnixIN10impala_udf9BigIntValEEENS2_9StringValEPNS2_15FunctionContextERKT_'],
[['from_unixtime'], 'STRING', ['BIGINT', 'STRING'],
'_ZN6impala18TimestampFunctions8FromUnixIN10impala_udf9BigIntValEEENS2_9StringValEPNS2_15FunctionContextERKT_RKS4_',
'_ZN6impala18TimestampFunctions22UnixAndFromUnixPrepareEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE',
'_ZN6impala18TimestampFunctions20UnixAndFromUnixCloseEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE'],
[['now', 'current_timestamp'], 'TIMESTAMP', [], '_ZN6impala18TimestampFunctions3NowEPN10impala_udf15FunctionContextE'],
[['from_utc_timestamp'], 'TIMESTAMP', ['TIMESTAMP', 'STRING'],
"impala::TimestampFunctions::FromUtc"],
[['to_utc_timestamp'], 'TIMESTAMP', ['TIMESTAMP', 'STRING'],
"impala::TimestampFunctions::ToUtc"],
# Math builtin functions
[['pi'], 'DOUBLE', [], 'impala::MathFunctions::Pi'],
[['e'], 'DOUBLE', [], 'impala::MathFunctions::E'],
[['abs'], 'BIGINT', ['BIGINT'], 'impala::MathFunctions::Abs'],
[['abs'], 'DOUBLE', ['DOUBLE'], 'impala::MathFunctions::Abs'],
[['abs'], 'FLOAT', ['FLOAT'], 'impala::MathFunctions::Abs'],
[['abs'], 'INT', ['INT'], 'impala::MathFunctions::Abs'],
[['abs'], 'SMALLINT', ['SMALLINT'], 'impala::MathFunctions::Abs'],
[['abs'], 'TINYINT', ['TINYINT'], 'impala::MathFunctions::Abs'],
[['sign'], 'FLOAT', ['DOUBLE'], 'impala::MathFunctions::Sign'],
[['sin'], 'DOUBLE', ['DOUBLE'], 'impala::MathFunctions::Sin'],
[['asin'], 'DOUBLE', ['DOUBLE'], 'impala::MathFunctions::Asin'],
[['cos'], 'DOUBLE', ['DOUBLE'], 'impala::MathFunctions::Cos'],
[['acos'], 'DOUBLE', ['DOUBLE'], 'impala::MathFunctions::Acos'],
[['tan'], 'DOUBLE', ['DOUBLE'], 'impala::MathFunctions::Tan'],
[['atan'], 'DOUBLE', ['DOUBLE'], 'impala::MathFunctions::Atan'],
[['atan2'], 'DOUBLE', ['DOUBLE','DOUBLE'], 'impala::MathFunctions::Atan2'],
[['cosh'], 'DOUBLE', ['DOUBLE'], 'impala::MathFunctions::Cosh'],
[['tanh'], 'DOUBLE', ['DOUBLE'], 'impala::MathFunctions::Tanh'],
[['sinh'], 'DOUBLE', ['DOUBLE'], 'impala::MathFunctions::Sinh'],
[['cot'], 'DOUBLE', ['DOUBLE'], 'impala::MathFunctions::Cot'],
[['radians'], 'DOUBLE', ['DOUBLE'], 'impala::MathFunctions::Radians'],
[['degrees'], 'DOUBLE', ['DOUBLE'], 'impala::MathFunctions::Degrees'],
[['ceil', 'ceiling', 'dceil'], 'BIGINT', ['DOUBLE'], 'impala::MathFunctions::Ceil'],
[['floor'], 'BIGINT', ['DOUBLE'], 'impala::MathFunctions::Floor'],
[['truncate','dtrunc'], 'BIGINT', ['DOUBLE'], 'impala::MathFunctions::Truncate'],
[['round','dround'], 'BIGINT', ['DOUBLE'], 'impala::MathFunctions::Round'],
[['round','dround'], 'DOUBLE', ['DOUBLE', 'INT'], 'impala::MathFunctions::RoundUpTo'],
[['exp', 'dexp'], 'DOUBLE', ['DOUBLE'], 'impala::MathFunctions::Exp'],
[['ln','dlog1'], 'DOUBLE', ['DOUBLE'], 'impala::MathFunctions::Ln'],
[['log10','dlog10'], 'DOUBLE', ['DOUBLE'], 'impala::MathFunctions::Log10'],
[['log2'], 'DOUBLE', ['DOUBLE'], 'impala::MathFunctions::Log2'],
[['log'], 'DOUBLE', ['DOUBLE', 'DOUBLE'], 'impala::MathFunctions::Log'],
[['pow', 'power','dpow','fpow'], 'DOUBLE', ['DOUBLE', 'DOUBLE'], 'impala::MathFunctions::Pow'],
[['sqrt','dsqrt'], 'DOUBLE', ['DOUBLE'], 'impala::MathFunctions::Sqrt'],
[['rand','random'], 'DOUBLE', [], 'impala::MathFunctions::Rand',
'_ZN6impala13MathFunctions11RandPrepareEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE'],
[['factorial'], 'BIGINT', ['TINYINT'], 'impala::Operators::Factorial_TinyIntVal'],
[['factorial'], 'BIGINT', ['SMALLINT'], 'impala::Operators::Factorial_SmallIntVal'],
[['factorial'], 'BIGINT', ['INT'], 'impala::Operators::Factorial_IntVal'],
[['factorial'], 'BIGINT', ['BIGINT'], 'impala::Operators::Factorial_BigIntVal'],
[['rand'], 'DOUBLE', ['BIGINT'], 'impala::MathFunctions::RandSeed',
'_ZN6impala13MathFunctions11RandPrepareEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE'],
[['bin'], 'STRING', ['BIGINT'], 'impala::MathFunctions::Bin'],
[['hex'], 'STRING', ['BIGINT'], 'impala::MathFunctions::HexInt'],
[['hex'], 'STRING', ['STRING'], 'impala::MathFunctions::HexString'],
[['unhex'], 'STRING', ['STRING'], 'impala::MathFunctions::Unhex'],
[['conv'], 'STRING', ['BIGINT', 'TINYINT', 'TINYINT'],
'impala::MathFunctions::ConvInt'],
[['conv'], 'STRING', ['STRING', 'TINYINT', 'TINYINT'],
'impala::MathFunctions::ConvString'],
[['pmod'], 'BIGINT', ['BIGINT', 'BIGINT'], 'impala::MathFunctions::PmodBigInt'],
[['pmod'], 'DOUBLE', ['DOUBLE', 'DOUBLE'], 'impala::MathFunctions::PmodDouble'],
[['fmod'], 'FLOAT', ['FLOAT', 'FLOAT'], 'impala::MathFunctions::FmodFloat'],
[['fmod'], 'DOUBLE', ['DOUBLE', 'DOUBLE'], 'impala::MathFunctions::FmodDouble'],
[['mod'], 'TINYINT', ['TINYINT', 'TINYINT'], 'impala::Operators::Mod_TinyIntVal_TinyIntVal'],
[['mod'], 'SMALLINT', ['SMALLINT', 'SMALLINT'], 'impala::Operators::Mod_SmallIntVal_SmallIntVal'],
[['mod'], 'INT', ['INT', 'INT'], 'impala::Operators::Mod_IntVal_IntVal'],
[['mod'], 'BIGINT', ['BIGINT', 'BIGINT'], 'impala::Operators::Mod_BigIntVal_BigIntVal'],
[['mod'], 'FLOAT', ['FLOAT', 'FLOAT'], 'impala::MathFunctions::FmodFloat'],
[['mod'], 'DOUBLE', ['DOUBLE', 'DOUBLE'], 'impala::MathFunctions::FmodDouble'],
[['mod'], 'DECIMAL', ['DECIMAL', 'DECIMAL'], 'impala::DecimalOperators::Mod_DecimalVal_DecimalVal'],
[['positive'], 'TINYINT', ['TINYINT'],
'_ZN6impala13MathFunctions8PositiveIN10impala_udf10TinyIntValEEET_PNS2_15FunctionContextERKS4_'],
[['positive'], 'SMALLINT', ['SMALLINT'],
'_ZN6impala13MathFunctions8PositiveIN10impala_udf11SmallIntValEEET_PNS2_15FunctionContextERKS4_'],
[['positive'], 'INT', ['INT'],
'_ZN6impala13MathFunctions8PositiveIN10impala_udf6IntValEEET_PNS2_15FunctionContextERKS4_'],
[['positive'], 'BIGINT', ['BIGINT'],
'_ZN6impala13MathFunctions8PositiveIN10impala_udf9BigIntValEEET_PNS2_15FunctionContextERKS4_'],
[['positive'], 'FLOAT', ['FLOAT'],
'_ZN6impala13MathFunctions8PositiveIN10impala_udf8FloatValEEET_PNS2_15FunctionContextERKS4_'],
[['positive'], 'DOUBLE', ['DOUBLE'],
'_ZN6impala13MathFunctions8PositiveIN10impala_udf9DoubleValEEET_PNS2_15FunctionContextERKS4_'],
[['positive'], 'DECIMAL', ['DECIMAL'],
'_ZN6impala13MathFunctions8PositiveIN10impala_udf10DecimalValEEET_PNS2_15FunctionContextERKS4_'],
[['negative'], 'TINYINT', ['TINYINT'],
'_ZN6impala13MathFunctions8NegativeIN10impala_udf10TinyIntValEEET_PNS2_15FunctionContextERKS4_'],
[['negative'], 'SMALLINT', ['SMALLINT'],
'_ZN6impala13MathFunctions8NegativeIN10impala_udf11SmallIntValEEET_PNS2_15FunctionContextERKS4_'],
[['negative'], 'INT', ['INT'],
'_ZN6impala13MathFunctions8NegativeIN10impala_udf6IntValEEET_PNS2_15FunctionContextERKS4_'],
[['negative'], 'BIGINT', ['BIGINT'],
'_ZN6impala13MathFunctions8NegativeIN10impala_udf9BigIntValEEET_PNS2_15FunctionContextERKS4_'],
[['negative'], 'FLOAT', ['FLOAT'],
'_ZN6impala13MathFunctions8NegativeIN10impala_udf8FloatValEEET_PNS2_15FunctionContextERKS4_'],
[['negative'], 'DOUBLE', ['DOUBLE'],
'_ZN6impala13MathFunctions8NegativeIN10impala_udf9DoubleValEEET_PNS2_15FunctionContextERKS4_'],
[['negative'], 'DECIMAL', ['DECIMAL'],
'_ZN6impala13MathFunctions8NegativeIN10impala_udf10DecimalValEEET_PNS2_15FunctionContextERKS4_'],
[['quotient'], 'BIGINT', ['BIGINT', 'BIGINT'],
'impala::MathFunctions::QuotientBigInt'],
[['quotient'], 'BIGINT', ['DOUBLE', 'DOUBLE'],
'impala::MathFunctions::QuotientDouble'],
[['least'], 'TINYINT', ['TINYINT', '...'],
'_ZN6impala13MathFunctions13LeastGreatestIN10impala_udf10TinyIntValELb1EEET_PNS2_15FunctionContextEiPKS4_'],
[['least'], 'SMALLINT', ['SMALLINT', '...'],
'_ZN6impala13MathFunctions13LeastGreatestIN10impala_udf11SmallIntValELb1EEET_PNS2_15FunctionContextEiPKS4_'],
[['least'], 'INT', ['INT', '...'],
'_ZN6impala13MathFunctions13LeastGreatestIN10impala_udf6IntValELb1EEET_PNS2_15FunctionContextEiPKS4_'],
[['least'], 'BIGINT', ['BIGINT', '...'],
'_ZN6impala13MathFunctions13LeastGreatestIN10impala_udf9BigIntValELb1EEET_PNS2_15FunctionContextEiPKS4_'],
[['least'], 'FLOAT', ['FLOAT', '...'],
'_ZN6impala13MathFunctions13LeastGreatestIN10impala_udf8FloatValELb1EEET_PNS2_15FunctionContextEiPKS4_'],
[['least'], 'DOUBLE', ['DOUBLE', '...'],
'_ZN6impala13MathFunctions13LeastGreatestIN10impala_udf9DoubleValELb1EEET_PNS2_15FunctionContextEiPKS4_'],
[['least'], 'TIMESTAMP', ['TIMESTAMP', '...'],
'_ZN6impala13MathFunctions13LeastGreatestILb1EEEN10impala_udf12TimestampValEPNS2_15FunctionContextEiPKS3_'],
[['least'], 'STRING', ['STRING', '...'],
'_ZN6impala13MathFunctions13LeastGreatestILb1EEEN10impala_udf9StringValEPNS2_15FunctionContextEiPKS3_'],
[['least'], 'DECIMAL', ['DECIMAL', '...'],
'_ZN6impala13MathFunctions13LeastGreatestILb1EEEN10impala_udf10DecimalValEPNS2_15FunctionContextEiPKS3_'],
[['greatest'], 'TINYINT', ['TINYINT', '...'],
'_ZN6impala13MathFunctions13LeastGreatestIN10impala_udf10TinyIntValELb0EEET_PNS2_15FunctionContextEiPKS4_'],
[['greatest'], 'SMALLINT', ['SMALLINT', '...'],
'_ZN6impala13MathFunctions13LeastGreatestIN10impala_udf11SmallIntValELb0EEET_PNS2_15FunctionContextEiPKS4_'],
[['greatest'], 'INT', ['INT', '...'],
'_ZN6impala13MathFunctions13LeastGreatestIN10impala_udf6IntValELb0EEET_PNS2_15FunctionContextEiPKS4_'],
[['greatest'], 'BIGINT', ['BIGINT', '...'],
'_ZN6impala13MathFunctions13LeastGreatestIN10impala_udf9BigIntValELb0EEET_PNS2_15FunctionContextEiPKS4_'],
[['greatest'], 'FLOAT', ['FLOAT', '...'],
'_ZN6impala13MathFunctions13LeastGreatestIN10impala_udf8FloatValELb0EEET_PNS2_15FunctionContextEiPKS4_'],
[['greatest'], 'DOUBLE', ['DOUBLE', '...'],
'_ZN6impala13MathFunctions13LeastGreatestIN10impala_udf9DoubleValELb0EEET_PNS2_15FunctionContextEiPKS4_'],
[['greatest'], 'TIMESTAMP', ['TIMESTAMP', '...'],
'_ZN6impala13MathFunctions13LeastGreatestILb0EEEN10impala_udf12TimestampValEPNS2_15FunctionContextEiPKS3_'],
[['greatest'], 'STRING', ['STRING', '...'],
'_ZN6impala13MathFunctions13LeastGreatestILb0EEEN10impala_udf9StringValEPNS2_15FunctionContextEiPKS3_'],
[['greatest'], 'DECIMAL', ['DECIMAL', '...'],
'_ZN6impala13MathFunctions13LeastGreatestILb0EEEN10impala_udf10DecimalValEPNS2_15FunctionContextEiPKS3_'],
# Decimal Functions
# TODO: oracle has decimal support for transcendental functions (e.g. sin()) to very
# high precisions. Do we need them? It's unclear if other databases do the same.
[['precision'], 'INT', ['DECIMAL'], 'impala::DecimalFunctions::Precision'],
[['scale'], 'INT', ['DECIMAL'], 'impala::DecimalFunctions::Scale'],
[['abs'], 'DECIMAL', ['DECIMAL'], 'impala::DecimalFunctions::Abs'],
[['ceil', 'ceiling'], 'DECIMAL', ['DECIMAL'], 'impala::DecimalFunctions::Ceil'],
[['floor'], 'DECIMAL', ['DECIMAL'], 'impala::DecimalFunctions::Floor'],
[['round'], 'DECIMAL', ['DECIMAL'], 'impala::DecimalFunctions::Round'],
[['round'], 'DECIMAL', ['DECIMAL', 'TINYINT'], 'impala::DecimalFunctions::RoundTo'],
[['round'], 'DECIMAL', ['DECIMAL', 'SMALLINT'], 'impala::DecimalFunctions::RoundTo'],
[['round'], 'DECIMAL', ['DECIMAL', 'INT'], 'impala::DecimalFunctions::RoundTo'],
[['round'], 'DECIMAL', ['DECIMAL', 'BIGINT'], 'impala::DecimalFunctions::RoundTo'],
[['truncate'], 'DECIMAL', ['DECIMAL'], 'impala::DecimalFunctions::Truncate'],
[['truncate'], 'DECIMAL', ['DECIMAL', 'TINYINT'],
'impala::DecimalFunctions::TruncateTo'],
[['truncate'], 'DECIMAL', ['DECIMAL', 'SMALLINT'],
'impala::DecimalFunctions::TruncateTo'],
[['truncate'], 'DECIMAL', ['DECIMAL', 'INT'],
'impala::DecimalFunctions::TruncateTo'],
[['truncate'], 'DECIMAL', ['DECIMAL', 'BIGINT'],
'impala::DecimalFunctions::TruncateTo'],
# String builtin functions
[['substr', 'substring'], 'STRING', ['STRING', 'BIGINT'],
'impala::StringFunctions::Substring'],
[['substr', 'substring'], 'STRING', ['STRING', 'BIGINT', 'BIGINT'],
'impala::StringFunctions::Substring'],
# left and right are key words, leave them out for now.
[['strleft'], 'STRING', ['STRING', 'BIGINT'], 'impala::StringFunctions::Left'],
[['strright'], 'STRING', ['STRING', 'BIGINT'], 'impala::StringFunctions::Right'],
[['space'], 'STRING', ['BIGINT'], 'impala::StringFunctions::Space'],
[['repeat'], 'STRING', ['STRING', 'BIGINT'], 'impala::StringFunctions::Repeat'],
[['lpad'], 'STRING', ['STRING', 'BIGINT', 'STRING'], 'impala::StringFunctions::Lpad'],
[['rpad'], 'STRING', ['STRING', 'BIGINT', 'STRING'], 'impala::StringFunctions::Rpad'],
[['length'], 'INT', ['STRING'], 'impala::StringFunctions::Length'],
[['length'], 'INT', ['CHAR'], 'impala::StringFunctions::CharLength'],
[['char_length'], 'INT', ['STRING'], 'impala::StringFunctions::Length'],
[['character_length'], 'INT', ['STRING'], 'impala::StringFunctions::Length'],
[['lower', 'lcase'], 'STRING', ['STRING'], 'impala::StringFunctions::Lower'],
[['upper', 'ucase'], 'STRING', ['STRING'], 'impala::StringFunctions::Upper'],
[['initcap'], 'STRING', ['STRING'], 'impala::StringFunctions::InitCap'],
[['reverse'], 'STRING', ['STRING'], 'impala::StringFunctions::Reverse'],
[['translate'], 'STRING', ['STRING', 'STRING', 'STRING'],
'impala::StringFunctions::Translate'],
[['trim'], 'STRING', ['STRING'], 'impala::StringFunctions::Trim'],
[['ltrim'], 'STRING', ['STRING'], 'impala::StringFunctions::Ltrim'],
[['rtrim'], 'STRING', ['STRING'], 'impala::StringFunctions::Rtrim'],
[['ascii'], 'INT', ['STRING'], 'impala::StringFunctions::Ascii'],
[['instr'], 'INT', ['STRING', 'STRING'], 'impala::StringFunctions::Instr'],
[['locate'], 'INT', ['STRING', 'STRING'], 'impala::StringFunctions::Locate'],
[['locate'], 'INT', ['STRING', 'STRING', 'BIGINT'],
'impala::StringFunctions::LocatePos'],
[['regexp_extract'], 'STRING', ['STRING', 'STRING', 'BIGINT'],
'impala::StringFunctions::RegexpExtract',
'_ZN6impala15StringFunctions13RegexpPrepareEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE',
'_ZN6impala15StringFunctions11RegexpCloseEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE'],
[['regexp_replace'], 'STRING', ['STRING', 'STRING', 'STRING'],
'impala::StringFunctions::RegexpReplace',
'_ZN6impala15StringFunctions13RegexpPrepareEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE',
'_ZN6impala15StringFunctions11RegexpCloseEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE'],
[['concat'], 'STRING', ['STRING', '...'], 'impala::StringFunctions::Concat'],
[['concat_ws'], 'STRING', ['STRING', 'STRING', '...'],
'impala::StringFunctions::ConcatWs'],
[['find_in_set'], 'INT', ['STRING', 'STRING'], 'impala::StringFunctions::FindInSet'],
[['parse_url'], 'STRING', ['STRING', 'STRING'], 'impala::StringFunctions::ParseUrl',
'_ZN6impala15StringFunctions15ParseUrlPrepareEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE',
'_ZN6impala15StringFunctions13ParseUrlCloseEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE'],
[['parse_url'], 'STRING', ['STRING', 'STRING', 'STRING'], 'impala::StringFunctions::ParseUrlKey',
'_ZN6impala15StringFunctions15ParseUrlPrepareEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE',
'_ZN6impala15StringFunctions13ParseUrlCloseEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE'],
# Netezza compatibility char functions
[['chr'], 'STRING', ['INT'], 'impala::StringFunctions::Chr'],
[['btrim'], 'STRING', ['STRING'], 'impala::StringFunctions::Trim'],
[['btrim'], 'STRING', ['STRING', 'STRING'], 'impala::StringFunctions::BTrimString', '_ZN6impala15StringFunctions12BTrimPrepareEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE', '_ZN6impala15StringFunctions10BTrimCloseEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE'],
# Conditional Functions
# Some of these have empty symbols because the BE special-cases them based on the
# function name
[['if'], 'BOOLEAN', ['BOOLEAN', 'BOOLEAN', 'BOOLEAN'], ''],
[['if'], 'TINYINT', ['BOOLEAN', 'TINYINT', 'TINYINT'], ''],
[['if'], 'SMALLINT', ['BOOLEAN', 'SMALLINT', 'SMALLINT'], ''],
[['if'], 'INT', ['BOOLEAN', 'INT', 'INT'], ''],
[['if'], 'BIGINT', ['BOOLEAN', 'BIGINT', 'BIGINT'], ''],
[['if'], 'FLOAT', ['BOOLEAN', 'FLOAT', 'FLOAT'], ''],
[['if'], 'DOUBLE', ['BOOLEAN', 'DOUBLE', 'DOUBLE'], ''],
[['if'], 'STRING', ['BOOLEAN', 'STRING', 'STRING'], ''],
[['if'], 'TIMESTAMP', ['BOOLEAN', 'TIMESTAMP', 'TIMESTAMP'], ''],
[['if'], 'DECIMAL', ['BOOLEAN', 'DECIMAL', 'DECIMAL'], ''],
[['nullif'], 'BOOLEAN', ['BOOLEAN', 'BOOLEAN'], ''],
[['nullif'], 'TINYINT', ['TINYINT', 'TINYINT'], ''],
[['nullif'], 'SMALLINT', ['SMALLINT', 'SMALLINT'], ''],
[['nullif'], 'INT', ['INT', 'INT'], ''],
[['nullif'], 'BIGINT', ['BIGINT', 'BIGINT'], ''],
[['nullif'], 'FLOAT', ['FLOAT', 'FLOAT'], ''],
[['nullif'], 'DOUBLE', ['DOUBLE', 'DOUBLE'], ''],
[['nullif'], 'STRING', ['STRING', 'STRING'], ''],
[['nullif'], 'TIMESTAMP', ['TIMESTAMP', 'TIMESTAMP'], ''],
[['nullif'], 'DECIMAL', ['DECIMAL', 'DECIMAL'], ''],
[['zeroifnull'], 'TINYINT', ['TINYINT'], 'impala::ConditionalFunctions::ZeroIfNull'],
[['zeroifnull'], 'SMALLINT', ['SMALLINT'], 'impala::ConditionalFunctions::ZeroIfNull'],
[['zeroifnull'], 'INT', ['INT'], 'impala::ConditionalFunctions::ZeroIfNull'],
[['zeroifnull'], 'BIGINT', ['BIGINT'], 'impala::ConditionalFunctions::ZeroIfNull'],
[['zeroifnull'], 'FLOAT', ['FLOAT'], 'impala::ConditionalFunctions::ZeroIfNull'],
[['zeroifnull'], 'DOUBLE', ['DOUBLE'], 'impala::ConditionalFunctions::ZeroIfNull'],
[['zeroifnull'], 'DECIMAL', ['DECIMAL'], 'impala::ConditionalFunctions::ZeroIfNull'],
[['nullifzero'], 'TINYINT', ['TINYINT'], 'impala::ConditionalFunctions::NullIfZero'],
[['nullifzero'], 'SMALLINT', ['SMALLINT'], 'impala::ConditionalFunctions::NullIfZero'],
[['nullifzero'], 'INT', ['INT'], 'impala::ConditionalFunctions::NullIfZero'],
[['nullifzero'], 'BIGINT', ['BIGINT'], 'impala::ConditionalFunctions::NullIfZero'],
[['nullifzero'], 'FLOAT', ['FLOAT'], 'impala::ConditionalFunctions::NullIfZero'],
[['nullifzero'], 'DOUBLE', ['DOUBLE'], 'impala::ConditionalFunctions::NullIfZero'],
[['nullifzero'], 'DECIMAL', ['DECIMAL'], 'impala::ConditionalFunctions::NullIfZero'],
[['isnull', 'ifnull', 'nvl'], 'BOOLEAN', ['BOOLEAN', 'BOOLEAN'], ''],
[['isnull', 'ifnull', 'nvl'], 'TINYINT', ['TINYINT', 'TINYINT'], ''],
[['isnull', 'ifnull', 'nvl'], 'SMALLINT', ['SMALLINT', 'SMALLINT'], ''],
[['isnull', 'ifnull', 'nvl'], 'INT', ['INT', 'INT'], ''],
[['isnull', 'ifnull', 'nvl'], 'BIGINT', ['BIGINT', 'BIGINT'], ''],
[['isnull', 'ifnull', 'nvl'], 'FLOAT', ['FLOAT', 'FLOAT'], ''],
[['isnull', 'ifnull', 'nvl'], 'DOUBLE', ['DOUBLE', 'DOUBLE'], ''],
[['isnull', 'ifnull', 'nvl'], 'STRING', ['STRING', 'STRING'], ''],
[['isnull', 'ifnull', 'nvl'], 'TIMESTAMP', ['TIMESTAMP', 'TIMESTAMP'], ''],
[['isnull', 'ifnull', 'nvl'], 'DECIMAL', ['DECIMAL', 'DECIMAL'], ''],
[['coalesce'], 'BOOLEAN', ['BOOLEAN', '...'], ''],
[['coalesce'], 'TINYINT', ['TINYINT', '...'], ''],
[['coalesce'], 'SMALLINT', ['SMALLINT', '...'], ''],
[['coalesce'], 'INT', ['INT', '...'], ''],
[['coalesce'], 'BIGINT', ['BIGINT', '...'], ''],
[['coalesce'], 'FLOAT', ['FLOAT', '...'], ''],
[['coalesce'], 'DOUBLE', ['DOUBLE', '...'], ''],
[['coalesce'], 'STRING', ['STRING', '...'], ''],
[['coalesce'], 'TIMESTAMP', ['TIMESTAMP', '...'], ''],
[['coalesce'], 'DECIMAL', ['DECIMAL', '...'], ''],
[['istrue'], 'BOOLEAN', ['BOOLEAN'], 'impala::ConditionalFunctions::IsTrue'],
[['isnottrue'], 'BOOLEAN', ['BOOLEAN'], 'impala::ConditionalFunctions::IsNotTrue'],
[['isfalse'], 'BOOLEAN', ['BOOLEAN'], 'impala::ConditionalFunctions::IsFalse'],
[['isnotfalse'], 'BOOLEAN', ['BOOLEAN'], 'impala::ConditionalFunctions::IsNotFalse'],
# Utility functions
[['current_database'], 'STRING', [], 'impala::UtilityFunctions::CurrentDatabase'],
[['user'], 'STRING', [], 'impala::UtilityFunctions::User'],
[['effective_user'], 'STRING', [], 'impala::UtilityFunctions::EffectiveUser'],
[['sleep'], 'BOOLEAN', ['INT'], 'impala::UtilityFunctions::Sleep'],
[['pid'], 'INT', [], 'impala::UtilityFunctions::Pid'],
[['version'], 'STRING', [], 'impala::UtilityFunctions::Version'],
[['typeOf'], 'STRING', ['BOOLEAN'], '_ZN6impala16UtilityFunctions6TypeOfIN10impala_udf10BooleanValEEENS2_9StringValEPNS2_15FunctionContextERKT_'],
[['typeOf'], 'STRING', ['TINYINT'], '_ZN6impala16UtilityFunctions6TypeOfIN10impala_udf10TinyIntValEEENS2_9StringValEPNS2_15FunctionContextERKT_'],
[['typeOf'], 'STRING', ['SMALLINT'], '_ZN6impala16UtilityFunctions6TypeOfIN10impala_udf11SmallIntValEEENS2_9StringValEPNS2_15FunctionContextERKT_'],
[['typeOf'], 'STRING', ['INT'], '_ZN6impala16UtilityFunctions6TypeOfIN10impala_udf6IntValEEENS2_9StringValEPNS2_15FunctionContextERKT_'],
[['typeOf'], 'STRING', ['BIGINT'], '_ZN6impala16UtilityFunctions6TypeOfIN10impala_udf9BigIntValEEENS2_9StringValEPNS2_15FunctionContextERKT_'],
[['typeOf'], 'STRING', ['FLOAT'], '_ZN6impala16UtilityFunctions6TypeOfIN10impala_udf8FloatValEEENS2_9StringValEPNS2_15FunctionContextERKT_'],
[['typeOf'], 'STRING', ['DOUBLE'], '_ZN6impala16UtilityFunctions6TypeOfIN10impala_udf9DoubleValEEENS2_9StringValEPNS2_15FunctionContextERKT_'],
[['typeOf'], 'STRING', ['CHAR'], '_ZN6impala16UtilityFunctions6TypeOfIN10impala_udf9StringValEEES3_PNS2_15FunctionContextERKT_'],
[['typeOf'], 'STRING', ['VARCHAR'], '_ZN6impala16UtilityFunctions6TypeOfIN10impala_udf9StringValEEES3_PNS2_15FunctionContextERKT_'],
[['typeOf'], 'STRING', ['STRING'], '_ZN6impala16UtilityFunctions6TypeOfIN10impala_udf9StringValEEES3_PNS2_15FunctionContextERKT_'],
[['typeOf'], 'STRING', ['TIMESTAMP'], '_ZN6impala16UtilityFunctions6TypeOfIN10impala_udf12TimestampValEEENS2_9StringValEPNS2_15FunctionContextERKT_'],
[['typeOf'], 'STRING', ['DECIMAL'], '_ZN6impala16UtilityFunctions6TypeOfIN10impala_udf10DecimalValEEENS2_9StringValEPNS2_15FunctionContextERKT_'],
[['fnv_hash'], 'BIGINT', ['TINYINT'],
'_ZN6impala16UtilityFunctions7FnvHashIN10impala_udf10TinyIntValEEENS2_9BigIntValEPNS2_15FunctionContextERKT_'],
[['fnv_hash'], 'BIGINT', ['SMALLINT'],
'_ZN6impala16UtilityFunctions7FnvHashIN10impala_udf11SmallIntValEEENS2_9BigIntValEPNS2_15FunctionContextERKT_'],
[['fnv_hash'], 'BIGINT', ['INT'],
'_ZN6impala16UtilityFunctions7FnvHashIN10impala_udf6IntValEEENS2_9BigIntValEPNS2_15FunctionContextERKT_'],
[['fnv_hash'], 'BIGINT', ['BIGINT'],
'_ZN6impala16UtilityFunctions7FnvHashIN10impala_udf9BigIntValEEES3_PNS2_15FunctionContextERKT_'],
[['fnv_hash'], 'BIGINT', ['FLOAT'],
'_ZN6impala16UtilityFunctions7FnvHashIN10impala_udf8FloatValEEENS2_9BigIntValEPNS2_15FunctionContextERKT_'],
[['fnv_hash'], 'BIGINT', ['DOUBLE'],
'_ZN6impala16UtilityFunctions7FnvHashIN10impala_udf9DoubleValEEENS2_9BigIntValEPNS2_15FunctionContextERKT_'],
[['fnv_hash'], 'BIGINT', ['STRING'],
'_ZN6impala16UtilityFunctions13FnvHashStringEPN10impala_udf15FunctionContextERKNS1_9StringValE'],
[['fnv_hash'], 'BIGINT', ['TIMESTAMP'],
'_ZN6impala16UtilityFunctions16FnvHashTimestampEPN10impala_udf15FunctionContextERKNS1_12TimestampValE'],
[['fnv_hash'], 'BIGINT', ['DECIMAL'],
'_ZN6impala16UtilityFunctions14FnvHashDecimalEPN10impala_udf15FunctionContextERKNS1_10DecimalValE'],
# (Non)NullValue functions
[['nullvalue'], 'BOOLEAN', ['BOOLEAN'], '_ZN6impala15IsNullPredicate6IsNullIN10impala_udf10BooleanValEEES3_PNS2_15FunctionContextERKT_'],
[['nullvalue'], 'BOOLEAN', ['TINYINT'], '_ZN6impala15IsNullPredicate6IsNullIN10impala_udf10TinyIntValEEENS2_10BooleanValEPNS2_15FunctionContextERKT_'],
[['nullvalue'], 'BOOLEAN', ['SMALLINT'], '_ZN6impala15IsNullPredicate6IsNullIN10impala_udf11SmallIntValEEENS2_10BooleanValEPNS2_15FunctionContextERKT_'],
[['nullvalue'], 'BOOLEAN', ['INT'], '_ZN6impala15IsNullPredicate6IsNullIN10impala_udf6IntValEEENS2_10BooleanValEPNS2_15FunctionContextERKT_'],
[['nullvalue'], 'BOOLEAN', ['BIGINT'], '_ZN6impala15IsNullPredicate6IsNullIN10impala_udf9BigIntValEEENS2_10BooleanValEPNS2_15FunctionContextERKT_'],
[['nullvalue'], 'BOOLEAN', ['FLOAT'], '_ZN6impala15IsNullPredicate6IsNullIN10impala_udf8FloatValEEENS2_10BooleanValEPNS2_15FunctionContextERKT_'],
[['nullvalue'], 'BOOLEAN', ['DOUBLE'], '_ZN6impala15IsNullPredicate6IsNullIN10impala_udf9DoubleValEEENS2_10BooleanValEPNS2_15FunctionContextERKT_'],
[['nullvalue'], 'BOOLEAN', ['STRING'], '_ZN6impala15IsNullPredicate6IsNullIN10impala_udf9StringValEEENS2_10BooleanValEPNS2_15FunctionContextERKT_'],
[['nullvalue'], 'BOOLEAN', ['TIMESTAMP'], '_ZN6impala15IsNullPredicate6IsNullIN10impala_udf12TimestampValEEENS2_10BooleanValEPNS2_15FunctionContextERKT_'],
[['nullvalue'], 'BOOLEAN', ['DECIMAL'], '_ZN6impala15IsNullPredicate6IsNullIN10impala_udf10DecimalValEEENS2_10BooleanValEPNS2_15FunctionContextERKT_'],
[['nonnullvalue'], 'BOOLEAN', ['BOOLEAN'], '_ZN6impala15IsNullPredicate9IsNotNullIN10impala_udf10BooleanValEEES3_PNS2_15FunctionContextERKT_'],
[['nonnullvalue'], 'BOOLEAN', ['TINYINT'], '_ZN6impala15IsNullPredicate9IsNotNullIN10impala_udf10TinyIntValEEENS2_10BooleanValEPNS2_15FunctionContextERKT_'],
[['nonnullvalue'], 'BOOLEAN', ['SMALLINT'], '_ZN6impala15IsNullPredicate9IsNotNullIN10impala_udf11SmallIntValEEENS2_10BooleanValEPNS2_15FunctionContextERKT_'],
[['nonnullvalue'], 'BOOLEAN', ['INT'], '_ZN6impala15IsNullPredicate9IsNotNullIN10impala_udf6IntValEEENS2_10BooleanValEPNS2_15FunctionContextERKT_'],
[['nonnullvalue'], 'BOOLEAN', ['BIGINT'], '_ZN6impala15IsNullPredicate9IsNotNullIN10impala_udf9BigIntValEEENS2_10BooleanValEPNS2_15FunctionContextERKT_'],
[['nonnullvalue'], 'BOOLEAN', ['FLOAT'], '_ZN6impala15IsNullPredicate9IsNotNullIN10impala_udf8FloatValEEENS2_10BooleanValEPNS2_15FunctionContextERKT_'],
[['nonnullvalue'], 'BOOLEAN', ['DOUBLE'], '_ZN6impala15IsNullPredicate9IsNotNullIN10impala_udf9DoubleValEEENS2_10BooleanValEPNS2_15FunctionContextERKT_'],
[['nonnullvalue'], 'BOOLEAN', ['STRING'], '_ZN6impala15IsNullPredicate9IsNotNullIN10impala_udf9StringValEEENS2_10BooleanValEPNS2_15FunctionContextERKT_'],
[['nonnullvalue'], 'BOOLEAN', ['TIMESTAMP'], '_ZN6impala15IsNullPredicate9IsNotNullIN10impala_udf12TimestampValEEENS2_10BooleanValEPNS2_15FunctionContextERKT_'],
[['nonnullvalue'], 'BOOLEAN', ['DECIMAL'], '_ZN6impala15IsNullPredicate9IsNotNullIN10impala_udf10DecimalValEEENS2_10BooleanValEPNS2_15FunctionContextERKT_'],
# Bit and Byte functions
# For functions corresponding to builtin operators, we can reuse the implementations
[['bitand'], 'TINYINT', ['TINYINT', 'TINYINT'], 'impala::Operators::Bitand_TinyIntVal_TinyIntVal'],
[['bitand'], 'SMALLINT', ['SMALLINT', 'SMALLINT'], 'impala::Operators::Bitand_SmallIntVal_SmallIntVal'],
[['bitand'], 'INT', ['INT', 'INT'], 'impala::Operators::Bitand_IntVal_IntVal'],
[['bitand'], 'BIGINT', ['BIGINT', 'BIGINT'], 'impala::Operators::Bitand_BigIntVal_BigIntVal'],
[['bitor'], 'TINYINT', ['TINYINT', 'TINYINT'], 'impala::Operators::Bitor_TinyIntVal_TinyIntVal'],
[['bitor'], 'SMALLINT', ['SMALLINT', 'SMALLINT'], 'impala::Operators::Bitor_SmallIntVal_SmallIntVal'],
[['bitor'], 'INT', ['INT', 'INT'], 'impala::Operators::Bitor_IntVal_IntVal'],
[['bitor'], 'BIGINT', ['BIGINT', 'BIGINT'], 'impala::Operators::Bitor_BigIntVal_BigIntVal'],
[['bitxor'], 'TINYINT', ['TINYINT', 'TINYINT'], 'impala::Operators::Bitxor_TinyIntVal_TinyIntVal'],
[['bitxor'], 'SMALLINT', ['SMALLINT', 'SMALLINT'], 'impala::Operators::Bitxor_SmallIntVal_SmallIntVal'],
[['bitxor'], 'INT', ['INT', 'INT'], 'impala::Operators::Bitxor_IntVal_IntVal'],
[['bitxor'], 'BIGINT', ['BIGINT', 'BIGINT'], 'impala::Operators::Bitxor_BigIntVal_BigIntVal'],
[['bitnot'], 'TINYINT', ['TINYINT'], 'impala::Operators::Bitnot_TinyIntVal'],
[['bitnot'], 'SMALLINT', ['SMALLINT'], 'impala::Operators::Bitnot_SmallIntVal'],
[['bitnot'], 'INT', ['INT'], 'impala::Operators::Bitnot_IntVal'],
[['bitnot'], 'BIGINT', ['BIGINT'], 'impala::Operators::Bitnot_BigIntVal'],
[['countset'], 'INT', ['TINYINT'], '_ZN6impala16BitByteFunctions8CountSetIN10impala_udf10TinyIntValEEENS2_6IntValEPNS2_15FunctionContextERKT_'],
[['countset'], 'INT', ['SMALLINT'], '_ZN6impala16BitByteFunctions8CountSetIN10impala_udf11SmallIntValEEENS2_6IntValEPNS2_15FunctionContextERKT_'],
[['countset'], 'INT', ['INT'], '_ZN6impala16BitByteFunctions8CountSetIN10impala_udf6IntValEEES3_PNS2_15FunctionContextERKT_'],
[['countset'], 'INT', ['BIGINT'], '_ZN6impala16BitByteFunctions8CountSetIN10impala_udf9BigIntValEEENS2_6IntValEPNS2_15FunctionContextERKT_'],
[['countset'], 'INT', ['TINYINT', 'INT'], '_ZN6impala16BitByteFunctions8CountSetIN10impala_udf10TinyIntValEEENS2_6IntValEPNS2_15FunctionContextERKT_RKS4_'],
[['countset'], 'INT', ['SMALLINT', 'INT'], '_ZN6impala16BitByteFunctions8CountSetIN10impala_udf11SmallIntValEEENS2_6IntValEPNS2_15FunctionContextERKT_RKS4_'],
[['countset'], 'INT', ['INT', 'INT'], '_ZN6impala16BitByteFunctions8CountSetIN10impala_udf6IntValEEES3_PNS2_15FunctionContextERKT_RKS3_'],
[['countset'], 'INT', ['BIGINT', 'INT'], '_ZN6impala16BitByteFunctions8CountSetIN10impala_udf9BigIntValEEENS2_6IntValEPNS2_15FunctionContextERKT_RKS4_'],
[['getbit'], 'TINYINT', ['TINYINT', 'INT'], '_ZN6impala16BitByteFunctions6GetBitIN10impala_udf10TinyIntValEEES3_PNS2_15FunctionContextERKT_RKNS2_6IntValE'],
[['getbit'], 'TINYINT', ['SMALLINT', 'INT'], '_ZN6impala16BitByteFunctions6GetBitIN10impala_udf11SmallIntValEEENS2_10TinyIntValEPNS2_15FunctionContextERKT_RKNS2_6IntValE'],
[['getbit'], 'TINYINT', ['INT', 'INT'], '_ZN6impala16BitByteFunctions6GetBitIN10impala_udf6IntValEEENS2_10TinyIntValEPNS2_15FunctionContextERKT_RKS3_'],
[['getbit'], 'TINYINT', ['BIGINT', 'INT'], '_ZN6impala16BitByteFunctions6GetBitIN10impala_udf9BigIntValEEENS2_10TinyIntValEPNS2_15FunctionContextERKT_RKNS2_6IntValE'],
[['rotateleft'], 'TINYINT', ['TINYINT', 'INT'], 'impala::BitByteFunctions::RotateLeft'],
[['rotateleft'], 'SMALLINT', ['SMALLINT', 'INT'], 'impala::BitByteFunctions::RotateLeft'],
[['rotateleft'], 'INT', ['INT', 'INT'], 'impala::BitByteFunctions::RotateLeft'],
[['rotateleft'], 'BIGINT', ['BIGINT', 'INT'], 'impala::BitByteFunctions::RotateLeft'],
[['rotateright'], 'TINYINT', ['TINYINT', 'INT'], 'impala::BitByteFunctions::RotateRight'],
[['rotateright'], 'SMALLINT', ['SMALLINT', 'INT'], 'impala::BitByteFunctions::RotateRight'],
[['rotateright'], 'INT', ['INT', 'INT'], 'impala::BitByteFunctions::RotateRight'],
[['rotateright'], 'BIGINT', ['BIGINT', 'INT'], 'impala::BitByteFunctions::RotateRight'],
[['setbit'], 'TINYINT', ['TINYINT', 'INT'], '_ZN6impala16BitByteFunctions6SetBitIN10impala_udf10TinyIntValEEET_PNS2_15FunctionContextERKS4_RKNS2_6IntValE'],
[['setbit'], 'SMALLINT', ['SMALLINT', 'INT'], '_ZN6impala16BitByteFunctions6SetBitIN10impala_udf11SmallIntValEEET_PNS2_15FunctionContextERKS4_RKNS2_6IntValE'],
[['setbit'], 'INT', ['INT', 'INT'], '_ZN6impala16BitByteFunctions6SetBitIN10impala_udf6IntValEEET_PNS2_15FunctionContextERKS4_RKS3_'],
[['setbit'], 'BIGINT', ['BIGINT', 'INT'], '_ZN6impala16BitByteFunctions6SetBitIN10impala_udf9BigIntValEEET_PNS2_15FunctionContextERKS4_RKNS2_6IntValE'],
[['setbit'], 'TINYINT', ['TINYINT', 'INT', 'INT'], '_ZN6impala16BitByteFunctions6SetBitIN10impala_udf10TinyIntValEEET_PNS2_15FunctionContextERKS4_RKNS2_6IntValESB_'],
[['setbit'], 'SMALLINT', ['SMALLINT', 'INT', 'INT'], '_ZN6impala16BitByteFunctions6SetBitIN10impala_udf11SmallIntValEEET_PNS2_15FunctionContextERKS4_RKNS2_6IntValESB_'],
[['setbit'], 'INT', ['INT', 'INT', 'INT'], '_ZN6impala16BitByteFunctions6SetBitIN10impala_udf6IntValEEET_PNS2_15FunctionContextERKS4_RKS3_SA_'],
[['setbit'], 'BIGINT', ['BIGINT', 'INT', 'INT'], '_ZN6impala16BitByteFunctions6SetBitIN10impala_udf9BigIntValEEET_PNS2_15FunctionContextERKS4_RKNS2_6IntValESB_'],
[['shiftleft'], 'TINYINT', ['TINYINT', 'INT'], 'impala::BitByteFunctions::ShiftLeft'],
[['shiftleft'], 'SMALLINT', ['SMALLINT', 'INT'], 'impala::BitByteFunctions::ShiftLeft'],
[['shiftleft'], 'INT', ['INT', 'INT'], 'impala::BitByteFunctions::ShiftLeft'],
[['shiftleft'], 'BIGINT', ['BIGINT', 'INT'], 'impala::BitByteFunctions::ShiftLeft'],
[['shiftright'], 'TINYINT', ['TINYINT', 'INT'], 'impala::BitByteFunctions::ShiftRight'],
[['shiftright'], 'SMALLINT', ['SMALLINT', 'INT'], 'impala::BitByteFunctions::ShiftRight'],
[['shiftright'], 'INT', ['INT', 'INT'], 'impala::BitByteFunctions::ShiftRight'],
[['shiftright'], 'BIGINT', ['BIGINT', 'INT'], 'impala::BitByteFunctions::ShiftRight'],
]
invisible_functions = [
[['months_add_interval'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf6IntValEN5boost9date_time15months_durationINS4_9gregorian21greg_durations_configEEELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['months_add_interval'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf9BigIntValEN5boost9date_time15months_durationINS4_9gregorian21greg_durations_configEEELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['months_sub_interval'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf6IntValEN5boost9date_time15months_durationINS4_9gregorian21greg_durations_configEEELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['months_sub_interval'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf9BigIntValEN5boost9date_time15months_durationINS4_9gregorian21greg_durations_configEEELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
]
|
|
#!/usr/bin/env python
'''
NAME
latex2png - Converts LaTeX source to PNG file
SYNOPSIS
latex2png [options] INFILE
DESCRIPTION
This filter reads LaTeX source text from the input file
INFILE (or stdin if INFILE is -) and renders it to PNG image file.
Typically used to render math equations.
Requires latex(1), dvipng(1) commands and LaTeX math packages.
OPTIONS
-D DPI
Set the output resolution to DPI dots per inch. Use this option to
scale the output image size.
-o OUTFILE
The file name of the output file. If not specified the output file is
named like INFILE but with a .png file name extension.
-m
Skip if the PNG output file is newer that than the INFILE.
Compares timestamps on INFILE and OUTFILE. If
INFILE is - (stdin) then compares MD5 checksum stored in file
named like OUTFILE but with a .md5 file name extension.
The .md5 file is created if the -m option is used and the
INFILE is - (stdin).
-v
Verbosely print processing information to stderr.
--help, -h
Print this documentation.
--version
Print program version number.
SEE ALSO
latex(1), dvipng(1)
AUTHOR
Written by Stuart Rackham, <srackham@gmail.com>
The code was inspired by Kjell Magne Fauske's code:
http://fauskes.net/nb/htmleqII/
See also:
http://www.amk.ca/python/code/mt-math
http://code.google.com/p/latexmath2png/
COPYING
Copyright (C) 2010 Stuart Rackham. Free use of this software is
granted under the terms of the MIT License.
'''
# Suppress warning: "the md5 module is deprecated; use hashlib instead"
import warnings
warnings.simplefilter('ignore',DeprecationWarning)
import os, sys, tempfile, md5
VERSION = '0.1.0'
# Include LaTeX packages and commands here.
TEX_HEADER = r'''\documentclass{article}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{amssymb}
\usepackage{bm}
\newcommand{\mx}[1]{\mathbf{\bm{#1}}} % Matrix command
\newcommand{\vc}[1]{\mathbf{\bm{#1}}} % Vector command
\newcommand{\T}{\text{T}} % Transpose
\pagestyle{empty}
\begin{document}'''
TEX_FOOTER = r'''\end{document}'''
# Globals.
verbose = False
class EApp(Exception): pass # Application specific exception.
def print_stderr(line):
sys.stderr.write(line + os.linesep)
def print_verbose(line):
if verbose:
print_stderr(line)
def run(cmd):
global verbose
if verbose:
cmd += ' 1>&2'
else:
cmd += ' 2>%s 1>&2' % os.devnull
print_verbose('executing: %s' % cmd)
if os.system(cmd):
raise EApp, 'failed command: %s' % cmd
def latex2png(infile, outfile, dpi, modified):
'''Convert LaTeX input file infile to PNG file named outfile.'''
outfile = os.path.abspath(outfile)
outdir = os.path.dirname(outfile)
if not os.path.isdir(outdir):
raise EApp, 'directory does not exist: %s' % outdir
texfile = tempfile.mktemp(suffix='.tex', dir=os.path.dirname(outfile))
basefile = os.path.splitext(texfile)[0]
dvifile = basefile + '.dvi'
temps = [basefile + ext for ext in ('.tex','.dvi', '.aux', '.log')]
skip = False
if infile == '-':
tex = sys.stdin.read()
checksum = md5.new(tex).digest()
f = os.path.splitext(outfile)[0] + '.md5'
if modified:
if os.path.isfile(f) and os.path.isfile(outfile) and \
checksum == open(f,'rb').read():
skip = True
open(f,'wb').write(checksum)
else:
if not os.path.isfile(infile):
raise EApp, 'input file does not exist: %s' % infile
tex = open(infile).read()
if modified and os.path.isfile(outfile) and \
os.path.getmtime(infile) <= os.path.getmtime(outfile):
skip = True
if skip:
print_verbose('skipped: no change: %s' % outfile)
return
tex = '%s\n%s\n%s\n' % (TEX_HEADER, tex.strip(), TEX_FOOTER)
print_verbose('tex:\n%s' % tex)
open(texfile, 'w').write(tex)
saved_pwd = os.getcwd()
os.chdir(outdir)
try:
# Compile LaTeX document to DVI file.
run('latex %s' % texfile)
# Convert DVI file to PNG.
cmd = 'dvipng'
if dpi:
cmd += ' -D %s' % dpi
cmd += ' -T tight -x 1000 -z 9 -bg Transparent -o "%s" "%s"' \
% (outfile,dvifile)
run(cmd)
finally:
os.chdir(saved_pwd)
for f in temps:
if os.path.isfile(f):
print_verbose('deleting: %s' % f)
os.remove(f)
def usage(msg=''):
if msg:
print_stderr(msg)
print_stderr('\n'
'usage:\n'
' latex2png [options] INFILE\n'
'\n'
'options:\n'
' -D DPI\n'
' -o OUTFILE\n'
' -m\n'
' -v\n'
' --help\n'
' --version')
def main():
# Process command line options.
global verbose
dpi = None
outfile = None
modified = False
import getopt
opts,args = getopt.getopt(sys.argv[1:], 'D:o:mhv', ['help','version'])
for o,v in opts:
if o in ('--help','-h'):
print __doc__
sys.exit(0)
if o =='--version':
print('latex2png version %s' % (VERSION,))
sys.exit(0)
if o == '-D': dpi = v
if o == '-o': outfile = v
if o == '-m': modified = True
if o == '-v': verbose = True
if len(args) != 1:
usage()
sys.exit(1)
infile = args[0]
if dpi and not dpi.isdigit():
usage('invalid DPI')
sys.exit(1)
if outfile is None:
if infile == '-':
usage('OUTFILE must be specified')
sys.exit(1)
outfile = os.path.splitext(infile)[0] + '.png'
# Do the work.
latex2png(infile, outfile, dpi, modified)
# Print something to suppress asciidoc 'no output from filter' warnings.
if infile == '-':
sys.stdout.write(' ')
if __name__ == "__main__":
try:
main()
except SystemExit:
raise
except KeyboardInterrupt:
sys.exit(1)
except Exception, e:
print_stderr("%s: %s" % (os.path.basename(sys.argv[0]), str(e)))
sys.exit(1)
|
|
# Copyright 2012 Nebula, Inc.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from oslo.utils import timeutils
from keystoneclient.i18n import _
from keystoneclient import service_catalog
# gap, in seconds, to determine whether the given token is about to expire
STALE_TOKEN_DURATION = 30
class AccessInfo(dict):
"""Encapsulates a raw authentication token from keystone.
Provides helper methods for extracting useful values from that token.
"""
@classmethod
def factory(cls, resp=None, body=None, region_name=None, auth_token=None,
**kwargs):
"""Create AccessInfo object given a successful auth response & body
or a user-provided dict.
"""
# FIXME(jamielennox): Passing region_name is deprecated. Provide an
# appropriate warning.
auth_ref = None
if body is not None or len(kwargs):
if AccessInfoV3.is_valid(body, **kwargs):
if resp and not auth_token:
auth_token = resp.headers['X-Subject-Token']
# NOTE(jamielennox): these return AccessInfo because they
# already have auth_token installed on them.
if body:
if region_name:
body['token']['region_name'] = region_name
return AccessInfoV3(auth_token, **body['token'])
else:
return AccessInfoV3(auth_token, **kwargs)
elif AccessInfoV2.is_valid(body, **kwargs):
if body:
if region_name:
body['access']['region_name'] = region_name
auth_ref = AccessInfoV2(**body['access'])
else:
auth_ref = AccessInfoV2(**kwargs)
else:
raise NotImplementedError(_('Unrecognized auth response'))
else:
auth_ref = AccessInfoV2(**kwargs)
if auth_token:
auth_ref.auth_token = auth_token
return auth_ref
def __init__(self, *args, **kwargs):
super(AccessInfo, self).__init__(*args, **kwargs)
self.service_catalog = service_catalog.ServiceCatalog.factory(
resource_dict=self, region_name=self._region_name)
@property
def _region_name(self):
return self.get('region_name')
def will_expire_soon(self, stale_duration=None):
"""Determines if expiration is about to occur.
:returns: boolean : true if expiration is within the given duration
"""
stale_duration = (STALE_TOKEN_DURATION if stale_duration is None
else stale_duration)
norm_expires = timeutils.normalize_time(self.expires)
# (gyee) should we move auth_token.will_expire_soon() to timeutils
# instead of duplicating code here?
soon = (timeutils.utcnow() + datetime.timedelta(
seconds=stale_duration))
return norm_expires < soon
@classmethod
def is_valid(cls, body, **kwargs):
"""Determines if processing v2 or v3 token given a successful
auth body or a user-provided dict.
:returns: boolean : true if auth body matches implementing class
"""
raise NotImplementedError()
def has_service_catalog(self):
"""Returns true if the authorization token has a service catalog.
:returns: boolean
"""
raise NotImplementedError()
@property
def auth_token(self):
"""Returns the token_id associated with the auth request, to be used
in headers for authenticating OpenStack API requests.
:returns: str
"""
return self['auth_token']
@auth_token.setter
def auth_token(self, value):
self['auth_token'] = value
@auth_token.deleter
def auth_token(self):
try:
del self['auth_token']
except KeyError:
pass
@property
def expires(self):
"""Returns the token expiration (as datetime object)
:returns: datetime
"""
raise NotImplementedError()
@property
def issued(self):
"""Returns the token issue time (as datetime object)
:returns: datetime
"""
raise NotImplementedError()
@property
def username(self):
"""Returns the username associated with the authentication request.
Follows the pattern defined in the V2 API of first looking for 'name',
returning that if available, and falling back to 'username' if name
is unavailable.
:returns: str
"""
raise NotImplementedError()
@property
def user_id(self):
"""Returns the user id associated with the authentication request.
:returns: str
"""
raise NotImplementedError()
@property
def user_domain_id(self):
"""Returns the domain id of the user associated with the authentication
request.
For v2, it always returns 'default' which may be different from the
Keystone configuration.
:returns: str
"""
raise NotImplementedError()
@property
def user_domain_name(self):
"""Returns the domain name of the user associated with the
authentication request.
For v2, it always returns 'Default' which may be different from the
Keystone configuration.
:returns: str
"""
raise NotImplementedError()
@property
def role_ids(self):
"""Returns a list of role ids of the user associated with the
authentication request.
:returns: a list of strings of role ids
"""
raise NotImplementedError()
@property
def role_names(self):
"""Returns a list of role names of the user associated with the
authentication request.
:returns: a list of strings of role names
"""
raise NotImplementedError()
@property
def domain_name(self):
"""Returns the domain name associated with the authentication token.
:returns: str or None (if no domain associated with the token)
"""
raise NotImplementedError()
@property
def domain_id(self):
"""Returns the domain id associated with the authentication token.
:returns: str or None (if no domain associated with the token)
"""
raise NotImplementedError()
@property
def project_name(self):
"""Returns the project name associated with the authentication request.
:returns: str or None (if no project associated with the token)
"""
raise NotImplementedError()
@property
def tenant_name(self):
"""Synonym for project_name."""
return self.project_name
@property
def scoped(self):
"""Returns true if the authorization token was scoped to a tenant
(project), and contains a populated service catalog.
This is deprecated, use project_scoped instead.
:returns: bool
"""
raise NotImplementedError()
@property
def project_scoped(self):
"""Returns true if the authorization token was scoped to a tenant
(project).
:returns: bool
"""
raise NotImplementedError()
@property
def domain_scoped(self):
"""Returns true if the authorization token was scoped to a domain.
:returns: bool
"""
raise NotImplementedError()
@property
def trust_id(self):
"""Returns the trust id associated with the authentication token.
:returns: str or None (if no trust associated with the token)
"""
raise NotImplementedError()
@property
def trust_scoped(self):
"""Returns true if the authorization token was scoped as delegated in a
trust, via the OS-TRUST v3 extension.
:returns: bool
"""
raise NotImplementedError()
@property
def trustee_user_id(self):
"""Returns the trustee user id associated with a trust.
:returns: str or None (if no trust associated with the token)
"""
raise NotImplementedError()
@property
def trustor_user_id(self):
"""Returns the trustor user id associated with a trust.
:returns: str or None (if no trust associated with the token)
"""
raise NotImplementedError()
@property
def project_id(self):
"""Returns the project ID associated with the authentication
request, or None if the authentication request wasn't scoped to a
project.
:returns: str or None (if no project associated with the token)
"""
raise NotImplementedError()
@property
def tenant_id(self):
"""Synonym for project_id."""
return self.project_id
@property
def project_domain_id(self):
"""Returns the domain id of the project associated with the
authentication request.
For v2, it returns 'default' if a project is scoped or None which may
be different from the keystone configuration.
:returns: str
"""
raise NotImplementedError()
@property
def project_domain_name(self):
"""Returns the domain name of the project associated with the
authentication request.
For v2, it returns 'Default' if a project is scoped or None which may
be different from the keystone configuration.
:returns: str
"""
raise NotImplementedError()
@property
def auth_url(self):
"""Returns a tuple of URLs from publicURL and adminURL for the service
'identity' from the service catalog associated with the authorization
request. If the authentication request wasn't scoped to a tenant
(project), this property will return None.
DEPRECATED: this doesn't correctly handle region name. You should fetch
it from the service catalog yourself.
:returns: tuple of urls
"""
raise NotImplementedError()
@property
def management_url(self):
"""Returns the first adminURL for 'identity' from the service catalog
associated with the authorization request, or None if the
authentication request wasn't scoped to a tenant (project).
DEPRECATED: this doesn't correctly handle region name. You should fetch
it from the service catalog yourself.
:returns: tuple of urls
"""
raise NotImplementedError()
@property
def version(self):
"""Returns the version of the auth token from identity service.
:returns: str
"""
return self.get('version')
@property
def oauth_access_token_id(self):
"""Return the access token ID if OAuth authentication used.
:returns: str or None.
"""
raise NotImplementedError()
@property
def oauth_consumer_id(self):
"""Return the consumer ID if OAuth authentication used.
:returns: str or None.
"""
raise NotImplementedError()
@property
def is_federated(self):
"""Returns true if federation was used to get the token.
:returns: boolean
"""
raise NotImplementedError()
class AccessInfoV2(AccessInfo):
"""An object for encapsulating a raw v2 auth token from identity
service.
"""
def __init__(self, *args, **kwargs):
super(AccessInfo, self).__init__(*args, **kwargs)
self.update(version='v2.0')
self.service_catalog = service_catalog.ServiceCatalog.factory(
resource_dict=self,
token=self['token']['id'],
region_name=self._region_name)
@classmethod
def is_valid(cls, body, **kwargs):
if body:
return 'access' in body
elif kwargs:
return kwargs.get('version') == 'v2.0'
else:
return False
def has_service_catalog(self):
return 'serviceCatalog' in self
@AccessInfo.auth_token.getter
def auth_token(self):
try:
return super(AccessInfoV2, self).auth_token
except KeyError:
return self['token']['id']
@property
def expires(self):
return timeutils.parse_isotime(self['token']['expires'])
@property
def issued(self):
return timeutils.parse_isotime(self['token']['issued_at'])
@property
def username(self):
return self['user'].get('name', self['user'].get('username'))
@property
def user_id(self):
return self['user']['id']
@property
def user_domain_id(self):
return 'default'
@property
def user_domain_name(self):
return 'Default'
@property
def role_ids(self):
return self.get('metadata', {}).get('roles', [])
@property
def role_names(self):
return [r['name'] for r in self['user'].get('roles', [])]
@property
def domain_name(self):
return None
@property
def domain_id(self):
return None
@property
def project_name(self):
try:
tenant_dict = self['token']['tenant']
except KeyError:
pass
else:
return tenant_dict.get('name')
# pre grizzly
try:
return self['user']['tenantName']
except KeyError:
pass
# pre diablo, keystone only provided a tenantId
try:
return self['token']['tenantId']
except KeyError:
pass
@property
def scoped(self):
if ('serviceCatalog' in self
and self['serviceCatalog']
and 'tenant' in self['token']):
return True
return False
@property
def project_scoped(self):
return 'tenant' in self['token']
@property
def domain_scoped(self):
return False
@property
def trust_id(self):
return self.get('trust', {}).get('id')
@property
def trust_scoped(self):
return 'trust' in self
@property
def trustee_user_id(self):
return self.get('trust', {}).get('trustee_user_id')
@property
def trustor_user_id(self):
# this information is not available in the v2 token bug: #1331882
return None
@property
def project_id(self):
try:
tenant_dict = self['token']['tenant']
except KeyError:
pass
else:
return tenant_dict.get('id')
# pre grizzly
try:
return self['user']['tenantId']
except KeyError:
pass
# pre diablo
try:
return self['token']['tenantId']
except KeyError:
pass
@property
def project_domain_id(self):
if self.project_id:
return 'default'
@property
def project_domain_name(self):
if self.project_id:
return 'Default'
@property
def auth_url(self):
# FIXME(jamielennox): this is deprecated in favour of retrieving it
# from the service catalog. Provide a warning.
if self.service_catalog:
return self.service_catalog.get_urls(service_type='identity',
endpoint_type='publicURL',
region_name=self._region_name)
else:
return None
@property
def management_url(self):
# FIXME(jamielennox): this is deprecated in favour of retrieving it
# from the service catalog. Provide a warning.
if self.service_catalog:
return self.service_catalog.get_urls(service_type='identity',
endpoint_type='adminURL',
region_name=self._region_name)
else:
return None
@property
def oauth_access_token_id(self):
return None
@property
def oauth_consumer_id(self):
return None
@property
def is_federated(self):
return False
class AccessInfoV3(AccessInfo):
"""An object for encapsulating a raw v3 auth token from identity
service.
"""
def __init__(self, token, *args, **kwargs):
super(AccessInfo, self).__init__(*args, **kwargs)
self.update(version='v3')
self.service_catalog = service_catalog.ServiceCatalog.factory(
resource_dict=self,
token=token,
region_name=self._region_name)
if token:
self.auth_token = token
@classmethod
def is_valid(cls, body, **kwargs):
if body:
return 'token' in body
elif kwargs:
return kwargs.get('version') == 'v3'
else:
return False
def has_service_catalog(self):
return 'catalog' in self
@property
def is_federated(self):
return 'OS-FEDERATION' in self['user']
@property
def expires(self):
return timeutils.parse_isotime(self['expires_at'])
@property
def issued(self):
return timeutils.parse_isotime(self['issued_at'])
@property
def user_id(self):
return self['user']['id']
@property
def user_domain_id(self):
try:
return self['user']['domain']['id']
except KeyError:
if self.is_federated:
return None
raise
@property
def user_domain_name(self):
try:
return self['user']['domain']['name']
except KeyError:
if self.is_federated:
return None
raise
@property
def role_ids(self):
return [r['id'] for r in self.get('roles', [])]
@property
def role_names(self):
return [r['name'] for r in self.get('roles', [])]
@property
def username(self):
return self['user']['name']
@property
def domain_name(self):
domain = self.get('domain')
if domain:
return domain['name']
@property
def domain_id(self):
domain = self.get('domain')
if domain:
return domain['id']
@property
def project_id(self):
project = self.get('project')
if project:
return project['id']
@property
def project_domain_id(self):
project = self.get('project')
if project:
return project['domain']['id']
@property
def project_domain_name(self):
project = self.get('project')
if project:
return project['domain']['name']
@property
def project_name(self):
project = self.get('project')
if project:
return project['name']
@property
def scoped(self):
return ('catalog' in self and self['catalog'] and 'project' in self)
@property
def project_scoped(self):
return 'project' in self
@property
def domain_scoped(self):
return 'domain' in self
@property
def trust_id(self):
return self.get('OS-TRUST:trust', {}).get('id')
@property
def trust_scoped(self):
return 'OS-TRUST:trust' in self
@property
def trustee_user_id(self):
return self.get('OS-TRUST:trust', {}).get('trustee_user', {}).get('id')
@property
def trustor_user_id(self):
return self.get('OS-TRUST:trust', {}).get('trustor_user', {}).get('id')
@property
def auth_url(self):
# FIXME(jamielennox): this is deprecated in favour of retrieving it
# from the service catalog. Provide a warning.
if self.service_catalog:
return self.service_catalog.get_urls(service_type='identity',
endpoint_type='public',
region_name=self._region_name)
else:
return None
@property
def management_url(self):
# FIXME(jamielennox): this is deprecated in favour of retrieving it
# from the service catalog. Provide a warning.
if self.service_catalog:
return self.service_catalog.get_urls(service_type='identity',
endpoint_type='admin',
region_name=self._region_name)
else:
return None
@property
def oauth_access_token_id(self):
return self.get('OS-OAUTH1', {}).get('access_token_id')
@property
def oauth_consumer_id(self):
return self.get('OS-OAUTH1', {}).get('consumer_id')
|
|
# Python Tools for Visual Studio
# Copyright(c) Microsoft Corporation
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the License); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY
# IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
#
# See the Apache Version 2.0 License for specific language governing
# permissions and limitations under the License.
__author__ = "Microsoft Corporation <ptvshelp@microsoft.com>"
__version__ = "3.0.0.0"
from __future__ import absolute_import, print_function, with_statement
import ctypes
import datetime
import os
import re
import struct
import sys
import traceback
from xml.dom import minidom
try:
from cStringIO import StringIO
BytesIO = StringIO
except ImportError:
from io import StringIO, BytesIO
try:
from thread import start_new_thread
except ImportError:
from _thread import start_new_thread
__version__ = '3.0.0'
if sys.version_info[0] == 3:
def to_str(value):
return value.decode(sys.getfilesystemencoding())
else:
def to_str(value):
return value.encode(sys.getfilesystemencoding())
# http://www.fastcgi.com/devkit/doc/fcgi-spec.html#S3
FCGI_VERSION_1 = 1
FCGI_HEADER_LEN = 8
FCGI_BEGIN_REQUEST = 1
FCGI_ABORT_REQUEST = 2
FCGI_END_REQUEST = 3
FCGI_PARAMS = 4
FCGI_STDIN = 5
FCGI_STDOUT = 6
FCGI_STDERR = 7
FCGI_DATA = 8
FCGI_GET_VALUES = 9
FCGI_GET_VALUES_RESULT = 10
FCGI_UNKNOWN_TYPE = 11
FCGI_MAXTYPE = FCGI_UNKNOWN_TYPE
FCGI_NULL_REQUEST_ID = 0
FCGI_KEEP_CONN = 1
FCGI_RESPONDER = 1
FCGI_AUTHORIZER = 2
FCGI_FILTER = 3
FCGI_REQUEST_COMPLETE = 0
FCGI_CANT_MPX_CONN = 1
FCGI_OVERLOADED = 2
FCGI_UNKNOWN_ROLE = 3
FCGI_MAX_CONNS = "FCGI_MAX_CONNS"
FCGI_MAX_REQS = "FCGI_MAX_REQS"
FCGI_MPXS_CONNS = "FCGI_MPXS_CONNS"
class FastCgiRecord(object):
"""Represents a FastCgiRecord. Encapulates the type, role, flags. Holds
onto the params which we will receive and update later."""
def __init__(self, type, req_id, role, flags):
self.type = type
self.req_id = req_id
self.role = role
self.flags = flags
self.params = {}
def __repr__(self):
return '<FastCgiRecord(%d, %d, %d, %d)>' % (self.type,
self.req_id,
self.role,
self.flags)
#typedef struct {
# unsigned char version;
# unsigned char type;
# unsigned char requestIdB1;
# unsigned char requestIdB0;
# unsigned char contentLengthB1;
# unsigned char contentLengthB0;
# unsigned char paddingLength;
# unsigned char reserved;
# unsigned char contentData[contentLength];
# unsigned char paddingData[paddingLength];
#} FCGI_Record;
class _ExitException(Exception):
pass
if sys.version_info[0] >= 3:
# indexing into byte strings gives us an int, so
# ord is unnecessary on Python 3
def ord(x):
return x
def chr(x):
return bytes((x, ))
def wsgi_decode(x):
return x.decode('iso-8859-1')
def wsgi_encode(x):
return x.encode('iso-8859-1')
def fs_encode(x):
return x
def exception_with_traceback(exc_value, exc_tb):
return exc_value.with_traceback(exc_tb)
zero_bytes = bytes
else:
# Replace the builtin open with one that supports an encoding parameter
from codecs import open
def wsgi_decode(x):
return x
def wsgi_encode(x):
return x
def fs_encode(x):
return x if isinstance(x, str) else x.encode(sys.getfilesystemencoding())
def exception_with_traceback(exc_value, exc_tb):
# x.with_traceback() is not supported on 2.x
return exc_value
bytes = str
def zero_bytes(length):
return '\x00' * length
def read_fastcgi_record(stream):
"""reads the main fast cgi record"""
data = stream.read(8) # read record
if not data:
# no more data, our other process must have died...
raise _ExitException()
fcgi_ver, reqtype, req_id, content_size, padding_len, _ = struct.unpack('>BBHHBB', data)
content = stream.read(content_size) # read content
stream.read(padding_len)
if fcgi_ver != FCGI_VERSION_1:
raise Exception('Unknown fastcgi version %s' % fcgi_ver)
processor = REQUEST_PROCESSORS.get(reqtype)
if processor is not None:
return processor(stream, req_id, content)
# unknown type requested, send response
log('Unknown request type %s' % reqtype)
send_response(stream, req_id, FCGI_UNKNOWN_TYPE, chr(reqtype) + zero_bytes(7))
return None
def read_fastcgi_begin_request(stream, req_id, content):
"""reads the begin request body and updates our _REQUESTS table to include
the new request"""
# typedef struct {
# unsigned char roleB1;
# unsigned char roleB0;
# unsigned char flags;
# unsigned char reserved[5];
# } FCGI_BeginRequestBody;
# TODO: Ignore request if it exists
res = FastCgiRecord(
FCGI_BEGIN_REQUEST,
req_id,
(ord(content[0]) << 8) | ord(content[1]), # role
ord(content[2]), # flags
)
_REQUESTS[req_id] = res
def read_encoded_int(content, offset):
i = struct.unpack_from('>B', content, offset)[0]
if i < 0x80:
return offset + 1, i
return offset + 4, struct.unpack_from('>I', content, offset)[0] & ~0x80000000
def read_fastcgi_keyvalue_pairs(content, offset):
"""Reads a FastCGI key/value pair stream"""
offset, name_len = read_encoded_int(content, offset)
offset, value_len = read_encoded_int(content, offset)
name = content[offset:(offset + name_len)]
offset += name_len
value = content[offset:(offset + value_len)]
offset += value_len
return offset, name, value
def get_encoded_int(i):
"""Writes the length of a single name for a key or value in a key/value
stream"""
if i <= 0x7f:
return struct.pack('>B', i)
elif i < 0x80000000:
return struct.pack('>I', i | 0x80000000)
else:
raise ValueError('cannot encode value %s (%x) because it is too large' % (i, i))
def write_fastcgi_keyvalue_pairs(pairs):
"""Creates a FastCGI key/value stream and returns it as a byte string"""
parts = []
for raw_key, raw_value in pairs.items():
key = wsgi_encode(raw_key)
value = wsgi_encode(raw_value)
parts.append(get_encoded_int(len(key)))
parts.append(get_encoded_int(len(value)))
parts.append(key)
parts.append(value)
return bytes().join(parts)
# Keys in this set will be stored in the record without modification but with a
# 'wsgi.' prefix. The original key will have the decoded version.
# (Following mod_wsgi from http://wsgi.readthedocs.org/en/latest/python3.html)
RAW_VALUE_NAMES = {
'SCRIPT_NAME' : 'wsgi.script_name',
'PATH_INFO' : 'wsgi.path_info',
'QUERY_STRING' : 'wsgi.query_string',
'HTTP_X_ORIGINAL_URL' : 'wfastcgi.http_x_original_url',
}
def read_fastcgi_params(stream, req_id, content):
if not content:
return None
offset = 0
res = _REQUESTS[req_id].params
while offset < len(content):
offset, name, value = read_fastcgi_keyvalue_pairs(content, offset)
name = wsgi_decode(name)
raw_name = RAW_VALUE_NAMES.get(name)
if raw_name:
res[raw_name] = value
res[name] = wsgi_decode(value)
def read_fastcgi_input(stream, req_id, content):
"""reads FastCGI std-in and stores it in wsgi.input passed in the
wsgi environment array"""
res = _REQUESTS[req_id].params
if 'wsgi.input' not in res:
res['wsgi.input'] = content
else:
res['wsgi.input'] += content
if not content:
# we've hit the end of the input stream, time to process input...
return _REQUESTS[req_id]
def read_fastcgi_data(stream, req_id, content):
"""reads FastCGI data stream and publishes it as wsgi.data"""
res = _REQUESTS[req_id].params
if 'wsgi.data' not in res:
res['wsgi.data'] = content
else:
res['wsgi.data'] += content
def read_fastcgi_abort_request(stream, req_id, content):
"""reads the wsgi abort request, which we ignore, we'll send the
finish execution request anyway..."""
pass
def read_fastcgi_get_values(stream, req_id, content):
"""reads the fastcgi request to get parameter values, and immediately
responds"""
offset = 0
request = {}
while offset < len(content):
offset, name, value = read_fastcgi_keyvalue_pairs(content, offset)
request[name] = value
response = {}
if FCGI_MAX_CONNS in request:
response[FCGI_MAX_CONNS] = '1'
if FCGI_MAX_REQS in request:
response[FCGI_MAX_REQS] = '1'
if FCGI_MPXS_CONNS in request:
response[FCGI_MPXS_CONNS] = '0'
send_response(
stream,
req_id,
FCGI_GET_VALUES_RESULT,
write_fastcgi_keyvalue_pairs(response)
)
# Our request processors for different FastCGI protocol requests. Only those
# requests that we receive are defined here.
REQUEST_PROCESSORS = {
FCGI_BEGIN_REQUEST : read_fastcgi_begin_request,
FCGI_ABORT_REQUEST : read_fastcgi_abort_request,
FCGI_PARAMS : read_fastcgi_params,
FCGI_STDIN : read_fastcgi_input,
FCGI_DATA : read_fastcgi_data,
FCGI_GET_VALUES : read_fastcgi_get_values
}
def log(txt):
"""Logs messages to a log file if WSGI_LOG env var is defined."""
log_file = os.environ.get('WSGI_LOG')
if log_file:
with open(log_file, 'a+', encoding='utf-8') as f:
txt = txt.replace('\r\n', '\n')
f.write('%s: %s%s' % (datetime.datetime.now(), txt, '' if txt.endswith('\n') else '\n'))
def maybe_log(txt):
"""Logs messages to a log file if WSGI_LOG env var is defined, and does not
raise exceptions if logging fails."""
try:
log(txt)
except:
pass
def send_response(stream, req_id, resp_type, content, streaming=True):
"""sends a response w/ the given id, type, and content to the server.
If the content is streaming then an empty record is sent at the end to
terminate the stream"""
if not isinstance(content, bytes):
raise TypeError("content must be encoded before sending: %r" % content)
offset = 0
while True:
len_remaining = max(min(len(content) - offset, 0xFFFF), 0)
data = struct.pack(
'>BBHHBB',
FCGI_VERSION_1, # version
resp_type, # type
req_id, # requestIdB1:B0
len_remaining, # contentLengthB1:B0
0, # paddingLength
0, # reserved
) + content[offset:(offset + len_remaining)]
offset += len_remaining
os.write(stream.fileno(), data)
if len_remaining == 0 or not streaming:
break
stream.flush()
def get_environment(dir):
web_config = os.path.join(dir, 'Web.config')
if not os.path.exists(web_config):
return {}
d = {}
doc = minidom.parse(web_config)
config = doc.getElementsByTagName('configuration')
for configSection in config:
appSettings = configSection.getElementsByTagName('appSettings')
for appSettingsSection in appSettings:
values = appSettingsSection.getElementsByTagName('add')
for curAdd in values:
key = curAdd.getAttribute('key')
value = curAdd.getAttribute('value')
if key and value is not None:
d[key.strip()] = value
return d
ReadDirectoryChangesW = ctypes.windll.kernel32.ReadDirectoryChangesW
ReadDirectoryChangesW.restype = ctypes.c_uint32
ReadDirectoryChangesW.argtypes = [
ctypes.c_void_p, # HANDLE hDirectory
ctypes.c_void_p, # LPVOID lpBuffer
ctypes.c_uint32, # DWORD nBufferLength
ctypes.c_uint32, # BOOL bWatchSubtree
ctypes.c_uint32, # DWORD dwNotifyFilter
ctypes.POINTER(ctypes.c_uint32), # LPDWORD lpBytesReturned
ctypes.c_void_p, # LPOVERLAPPED lpOverlapped
ctypes.c_void_p # LPOVERLAPPED_COMPLETION_ROUTINE lpCompletionRoutine
]
try:
from _winapi import (CreateFile, CloseHandle, GetLastError, ExitProcess,
WaitForSingleObject, INFINITE, OPEN_EXISTING)
except ImportError:
CreateFile = ctypes.windll.kernel32.CreateFileW
CreateFile.restype = ctypes.c_void_p
CreateFile.argtypes = [
ctypes.c_wchar_p, # lpFilename
ctypes.c_uint32, # dwDesiredAccess
ctypes.c_uint32, # dwShareMode
ctypes.c_void_p, # LPSECURITY_ATTRIBUTES,
ctypes.c_uint32, # dwCreationDisposition,
ctypes.c_uint32, # dwFlagsAndAttributes,
ctypes.c_void_p # hTemplateFile
]
CloseHandle = ctypes.windll.kernel32.CloseHandle
CloseHandle.argtypes = [ctypes.c_void_p]
GetLastError = ctypes.windll.kernel32.GetLastError
GetLastError.restype = ctypes.c_uint32
ExitProcess = ctypes.windll.kernel32.ExitProcess
ExitProcess.restype = ctypes.c_void_p
ExitProcess.argtypes = [ctypes.c_uint32]
WaitForSingleObject = ctypes.windll.kernel32.WaitForSingleObject
WaitForSingleObject.argtypes = [ctypes.c_void_p, ctypes.c_uint32]
WaitForSingleObject.restype = ctypes.c_uint32
OPEN_EXISTING = 3
INFINITE = -1
FILE_LIST_DIRECTORY = 1
FILE_SHARE_READ = 0x00000001
FILE_SHARE_WRITE = 0x00000002
FILE_SHARE_DELETE = 0x00000004
FILE_FLAG_BACKUP_SEMANTICS = 0x02000000
MAX_PATH = 260
FILE_NOTIFY_CHANGE_LAST_WRITE = 0x10
ERROR_NOTIFY_ENUM_DIR = 1022
INVALID_HANDLE_VALUE = 0xFFFFFFFF
class FILE_NOTIFY_INFORMATION(ctypes.Structure):
_fields_ = [('NextEntryOffset', ctypes.c_uint32),
('Action', ctypes.c_uint32),
('FileNameLength', ctypes.c_uint32),
('Filename', ctypes.c_wchar)]
_ON_EXIT_TASKS = None
def run_exit_tasks():
global _ON_EXIT_TASKS
maybe_log("Running on_exit tasks")
while _ON_EXIT_TASKS:
tasks, _ON_EXIT_TASKS = _ON_EXIT_TASKS, []
for t in tasks:
try:
t()
except Exception:
maybe_log("Error in exit task: " + traceback.format_exc())
def on_exit(task):
global _ON_EXIT_TASKS
if _ON_EXIT_TASKS is None:
_ON_EXIT_TASKS = tasks = []
try:
evt = int(os.getenv('_FCGI_SHUTDOWN_EVENT_'))
except (TypeError, ValueError):
maybe_log("Could not wait on event %s" % os.getenv('_FCGI_SHUTDOWN_EVENT_'))
else:
def _wait_for_exit():
WaitForSingleObject(evt, INFINITE)
run_exit_tasks()
ExitProcess(0)
start_new_thread(_wait_for_exit, ())
_ON_EXIT_TASKS.append(task)
def start_file_watcher(path, restart_regex):
if restart_regex is None:
restart_regex = ".*((\\.py)|(\\.config))$"
elif not restart_regex:
# restart regex set to empty string, no restart behavior
return
def enum_changes(path):
"""Returns a generator that blocks until a change occurs, then yields
the filename of the changed file.
Yields an empty string and stops if the buffer overruns, indicating that
too many files were changed."""
buffer = ctypes.create_string_buffer(32 * 1024)
bytes_ret = ctypes.c_uint32()
try:
the_dir = CreateFile(
path,
FILE_LIST_DIRECTORY,
FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
0,
OPEN_EXISTING,
FILE_FLAG_BACKUP_SEMANTICS,
0,
)
except OSError:
maybe_log("Unable to create watcher")
return
if not the_dir or the_dir == INVALID_HANDLE_VALUE:
maybe_log("Unable to create watcher")
return
while True:
ret_code = ReadDirectoryChangesW(
the_dir,
buffer,
ctypes.sizeof(buffer),
True,
FILE_NOTIFY_CHANGE_LAST_WRITE,
ctypes.byref(bytes_ret),
None,
None,
)
if ret_code:
cur_pointer = ctypes.addressof(buffer)
while True:
fni = ctypes.cast(cur_pointer, ctypes.POINTER(FILE_NOTIFY_INFORMATION))
# FileName is not null-terminated, so specifying length is mandatory.
filename = ctypes.wstring_at(cur_pointer + 12, fni.contents.FileNameLength // 2)
yield filename
if fni.contents.NextEntryOffset == 0:
break
cur_pointer = cur_pointer + fni.contents.NextEntryOffset
elif GetLastError() == ERROR_NOTIFY_ENUM_DIR:
CloseHandle(the_dir)
yield ''
return
else:
CloseHandle(the_dir)
return
log('wfastcgi.py will restart when files in %s are changed: %s' % (path, restart_regex))
def watcher(path, restart):
for filename in enum_changes(path):
if not filename:
log('wfastcgi.py exiting because the buffer was full')
run_exit_tasks()
ExitProcess(0)
elif restart.match(filename):
log('wfastcgi.py exiting because %s has changed, matching %s' % (filename, restart_regex))
# we call ExitProcess directly to quickly shutdown the whole process
# because sys.exit(0) won't have an effect on the main thread.
run_exit_tasks()
ExitProcess(0)
restart = re.compile(restart_regex)
start_new_thread(watcher, (path, restart))
def get_wsgi_handler(handler_name):
if not handler_name:
raise Exception('WSGI_HANDLER env var must be set')
if not isinstance(handler_name, str):
handler_name = to_str(handler_name)
module_name, _, callable_name = handler_name.rpartition('.')
should_call = callable_name.endswith('()')
callable_name = callable_name[:-2] if should_call else callable_name
name_list = [(callable_name, should_call)]
handler = None
last_tb = ''
while module_name:
try:
handler = __import__(module_name, fromlist=[name_list[0][0]])
last_tb = ''
for name, should_call in name_list:
handler = getattr(handler, name)
if should_call:
handler = handler()
break
except ImportError:
module_name, _, callable_name = module_name.rpartition('.')
should_call = callable_name.endswith('()')
callable_name = callable_name[:-2] if should_call else callable_name
name_list.insert(0, (callable_name, should_call))
handler = None
last_tb = ': ' + traceback.format_exc()
if handler is None:
raise ValueError('"%s" could not be imported%s' % (handler_name, last_tb))
return handler
def read_wsgi_handler(physical_path):
env = get_environment(physical_path)
os.environ.update(env)
for path in (v for k, v in env.items() if k.lower() == 'pythonpath'):
# Expand environment variables manually.
expanded_path = re.sub(
'%(\\w+?)%',
lambda m: os.getenv(m.group(1), ''),
path
)
sys.path.extend(fs_encode(p) for p in expanded_path.split(';') if p)
handler = get_wsgi_handler(os.getenv('WSGI_HANDLER'))
instr_key = env.get("APPINSIGHTS_INSTRUMENTATIONKEY")
if instr_key:
try:
# Attempt the import after updating sys.path- sites must
# include applicationinsights themselves.
from applicationinsights.requests import WSGIApplication
except ImportError:
maybe_log("Failed to import applicationinsights: " + traceback.format_exc())
pass
else:
handler = WSGIApplication(instr_key, handler)
# Ensure we will flush any remaining events when we exit
on_exit(handler.client.flush)
return env, handler
class handle_response(object):
"""A context manager for handling the response. This will ensure that
exceptions in the handler are correctly reported, and the FastCGI request is
properly terminated.
"""
def __init__(self, stream, record, get_output, get_errors):
self.stream = stream
self.record = record
self._get_output = get_output
self._get_errors = get_errors
self.error_message = ''
self.fatal_errors = False
self.physical_path = ''
self.header_bytes = None
self.sent_headers = False
def __enter__(self):
record = self.record
record.params['wsgi.input'] = BytesIO(record.params['wsgi.input'])
record.params['wsgi.version'] = (1, 0)
record.params['wsgi.url_scheme'] = 'https' if record.params.get('HTTPS', '').lower() == 'on' else 'http'
record.params['wsgi.multiprocess'] = True
record.params['wsgi.multithread'] = False
record.params['wsgi.run_once'] = False
self.physical_path = record.params.get('APPL_PHYSICAL_PATH', os.path.dirname(__file__))
if 'HTTP_X_ORIGINAL_URL' in record.params:
# We've been re-written for shared FastCGI hosting, so send the
# original URL as PATH_INFO.
record.params['PATH_INFO'] = record.params['HTTP_X_ORIGINAL_URL']
record.params['wsgi.path_info'] = record.params['wfastcgi.http_x_original_url']
# PATH_INFO is not supposed to include the query parameters, so remove them
record.params['PATH_INFO'] = record.params['PATH_INFO'].partition('?')[0]
record.params['wsgi.path_info'] = record.params['wsgi.path_info'].partition(wsgi_encode('?'))[0]
return self
def __exit__(self, exc_type, exc_value, exc_tb):
# Send any error message on FCGI_STDERR.
if exc_type and exc_type is not _ExitException:
error_msg = "%s:\n\n%s\n\nStdOut: %s\n\nStdErr: %s" % (
self.error_message or 'Error occurred',
''.join(traceback.format_exception(exc_type, exc_value, exc_tb)),
self._get_output(),
self._get_errors(),
)
if not self.header_bytes or not self.sent_headers:
self.header_bytes = wsgi_encode('Status: 500 Internal Server Error\r\n')
self.send(FCGI_STDERR, wsgi_encode(error_msg))
# Best effort at writing to the log. It's more important to
# finish the response or the user will only see a generic 500
# error.
maybe_log(error_msg)
# End the request. This has to run in both success and failure cases.
self.send(FCGI_END_REQUEST, zero_bytes(8), streaming=False)
# Remove the request from our global dict
del _REQUESTS[self.record.req_id]
# Suppress all exceptions unless requested
return not self.fatal_errors
@staticmethod
def _decode_header(key, value):
if not isinstance(key, str):
key = wsgi_decode(key)
if not isinstance(value, str):
value = wsgi_decode(value)
return key, value
def start(self, status, headers, exc_info=None):
"""Starts sending the response. The response is ended when the context
manager exits."""
if exc_info:
try:
if self.sent_headers:
# We have to re-raise if we've already started sending data.
raise exception_with_traceback(exc_info[1], exc_info[2])
finally:
exc_info = None
elif self.header_bytes:
raise Exception('start_response has already been called')
if not isinstance(status, str):
status = wsgi_decode(status)
header_text = 'Status: %s\r\n' % status
if headers:
header_text += ''.join('%s: %s\r\n' % handle_response._decode_header(*i) for i in headers)
self.header_bytes = wsgi_encode(header_text + '\r\n')
return lambda content: self.send(FCGI_STDOUT, content)
def send(self, resp_type, content, streaming=True):
'''Sends part of the response.'''
if not self.sent_headers:
if not self.header_bytes:
raise Exception("start_response has not yet been called")
self.sent_headers = True
send_response(self.stream, self.record.req_id, FCGI_STDOUT, self.header_bytes)
self.header_bytes = None
return send_response(self.stream, self.record.req_id, resp_type, content, streaming)
_REQUESTS = {}
def main():
initialized = False
log('wfastcgi.py %s started' % __version__)
log('Python version: %s' % sys.version)
try:
fcgi_stream = sys.stdin.detach() if sys.version_info[0] >= 3 else sys.stdin
try:
import msvcrt
msvcrt.setmode(fcgi_stream.fileno(), os.O_BINARY)
except ImportError:
pass
while True:
record = read_fastcgi_record(fcgi_stream)
if not record:
continue
errors = sys.stderr = sys.__stderr__ = record.params['wsgi.errors'] = StringIO()
output = sys.stdout = sys.__stdout__ = StringIO()
with handle_response(fcgi_stream, record, output.getvalue, errors.getvalue) as response:
if not initialized:
log('wfastcgi.py %s initializing' % __version__)
os.chdir(response.physical_path)
sys.path[0] = '.'
# Initialization errors should be treated as fatal.
response.fatal_errors = True
response.error_message = 'Error occurred while reading WSGI handler'
env, handler = read_wsgi_handler(response.physical_path)
response.error_message = 'Error occurred starting file watcher'
start_file_watcher(response.physical_path, env.get('WSGI_RESTART_FILE_REGEX'))
response.error_message = ''
response.fatal_errors = False
log('wfastcgi.py %s initialized' % __version__)
initialized = True
os.environ.update(env)
# SCRIPT_NAME + PATH_INFO is supposed to be the full path
# (http://www.python.org/dev/peps/pep-0333/) but by default
# (http://msdn.microsoft.com/en-us/library/ms525840(v=vs.90).aspx)
# IIS is sending us the full URL in PATH_INFO, so we need to
# clear the script name here
if 'AllowPathInfoForScriptMappings' not in os.environ:
record.params['SCRIPT_NAME'] = ''
record.params['wsgi.script_name'] = wsgi_encode('')
# Send each part of the response to FCGI_STDOUT.
# Exceptions raised in the handler will be logged by the context
# manager and we will then wait for the next record.
result = handler(record.params, response.start)
try:
for part in result:
if part:
response.send(FCGI_STDOUT, part)
finally:
if hasattr(result, 'close'):
result.close()
except _ExitException:
pass
except Exception:
maybe_log('Unhandled exception in wfastcgi.py: ' + traceback.format_exc())
except BaseException:
maybe_log('Unhandled exception in wfastcgi.py: ' + traceback.format_exc())
raise
finally:
run_exit_tasks()
maybe_log('wfastcgi.py %s closed' % __version__)
def _run_appcmd(args):
from subprocess import check_call, CalledProcessError
if len(sys.argv) > 1 and os.path.isfile(sys.argv[1]):
appcmd = sys.argv[1:]
else:
appcmd = [os.path.join(os.getenv('SystemRoot'), 'system32', 'inetsrv', 'appcmd.exe')]
if not os.path.isfile(appcmd[0]):
print('IIS configuration tool appcmd.exe was not found at', appcmd, file=sys.stderr)
return -1
args = appcmd + args
try:
return check_call(args)
except CalledProcessError as ex:
print('''An error occurred running the command:
%r
Ensure your user has sufficient privileges and try again.''' % args, file=sys.stderr)
return ex.returncode
def enable():
res = _run_appcmd([
"set", "config", "/section:system.webServer/fastCGI",
"/+[fullPath='" + sys.executable + "', arguments='" + __file__ + "', signalBeforeTerminateSeconds='30']"
])
if res == 0:
print('"%s|%s" can now be used as a FastCGI script processor' % (sys.executable, __file__))
return res
def disable():
res = _run_appcmd([
"set", "config", "/section:system.webServer/fastCGI",
"/-[fullPath='" + sys.executable + "', arguments='" + __file__ + "', signalBeforeTerminateSeconds='30']"
])
if res == 0:
print('"%s|%s" is no longer registered for use with FastCGI' % (sys.executable, __file__))
return res
if __name__ == '__main__':
main()
|
|
from glob import glob
import json
import pandas
import requests
import numpy
import pickle
import os
import re
home = os.environ["HOME"]
base = "%s/data/pubmed" %os.environ["LAB"]
outfolder = "%s/methods" %(base)
repo_folder = "%s/repos" %(base)
scripts = "%s/SCRIPT/python/repofish/analysis/methods" %(home)
if not os.path.exists(outfolder):
os.mkdir(outfolder)
files = glob("%s/*.json" %repo_folder)
# Take a look at repo urls to help parsing
urls = []
pmids = []
for f in files:
print "Adding %s to list" %(f)
result = json.load(open(f,'r'))
pubmed_paper = str(result["pmid"])
urls = urls + result["github"]
pmids = pmids + [pubmed_paper] * len(result["github"])
# How many?
len(numpy.unique(pmids))
# 4240
len(urls)
# 6135
# Save to inputs file
inputs = dict()
inputs["urls"] = urls
inputs["pmids"] = pmids
pickle.dump(inputs,open("%s/inputs.pkl" %outfolder,"wb"))
# inputs = pickle.load(open("%s/inputs.pkl" %outfolder,"rb"))
# Zip together
inputs = zip(inputs["pmids"],inputs["urls"])
# Remove links that are just to github
inputs = [x for x in inputs if not re.search("[http|https]://github.com$",x[1])]
inputs = [x for x in inputs if not re.search("[http|https]://www.github.com$",x[1])]
# Find urls that don't match pattern github.com/user/repo
needs_curation = [x for x in inputs if not re.search("[http|https]://(www.)?github.com/.*/.*$",x[1])]
inputs = [x for x in inputs if re.search("[http|https]://(www.)?github.com/.*/.*$",x[1])]
# We will need to parse links based on type
raw_files = []
gists = []
github_io = []
github_io_master = []
rest = []
nbviewer = []
users = []
github_help = []
while len(needs_curation) > 0:
element = needs_curation.pop()
# We've found a gist
if re.search("[http|https]://gist.github.com/.*$",element[1]):
# print "GIST: %s" %element[1]
gists.append(element)
# Github help
elif re.search("[http|https]://help.github.com/.*$",element[1]):
github_help.append(element)
# Github user main pages
elif re.search("[http|https]://github.com/.*$",element[1]):
users.append(element)
# nbviewer
elif re.search("nbviewer",element[1]):
nbviewer.append(element)
# We've found a raw file
elif re.search("[http|https]://raw.github.com/.*$",element[1]):
# print "RAW: %s" %element[1]
raw_files.append(element)
# github io address associated with repo
elif re.search("[http|https]://.*[.]github.io/.*$",element[1]):
github_io.append(element)
elif re.search("[http|https]://.*[.]github.com/.*$",element[1]):
github_io.append(element)
# github io address for entire association
elif re.search("[http|https]://.*[.]github.io$",element[1]):
github_io_master.append(element)
elif re.search("[http|https]://.*[.]github.com$",element[1]):
github_io_master.append(element)
# print "IO: %s" %element[1]
else:
rest.append(element)
# These are ready for parsing
urls = dict()
urls["raw_files"] = raw_files
urls["gists"] = gists # will parse later
urls["repos"] = inputs
urls["github_io"] = github_io_master # we can't obtain specific repos
urls["github_help"] = github_help
urls["github_users"] = users
urls["nbviewer"] = nbviewer # will parse later
# For github_io urls, we need to convert to repo url
while len(github_io) > 0:
element = github_io.pop()
print "Parsing %s, %s more to go!" %(element[1],len(github_io))
url = element[1].replace("http://","").replace("https://","")
user_name = url.split("/")[0].split(".")[0]
repo_name = url.split("/")[1]
new_url = "http://www.github.com/%s/%s" %(user_name,repo_name)
response = requests.get(new_url)
if response.status_code == 200:
element = (element[0],new_url)
urls["repos"].append(element)
else:
rest.append(element)
# Manual curation
len(rest)
# 134
urls["github_io"].append(('26110025', u'https://github.com/qsardb'))
urls["raw_files"].append(('22934238', u'http://imagejs.org/imagejs.js'))
false_hits = [('25071829', u'http://thenextweb.com/dd/2014/03/17/mozilla-science-lab-github-figshare-team-fix-citation-code-academia'),
('23448176', u'http://readwrite.com/2011/06/02/github-has-passed-sourceforge'),
('25940563', u'http://iphylo.blogspot.de/2013/04/time-to-put-taxonomy-into-github.html'),
('25995958', u'https://play.google.com/store/apps/details?id=com.github.browep.thinspo'),
('22291635', u'http://marciovm.com/i-want-a-github-of-science'),
('26836305', u'https://urldefense.proofpoint.com/v2/url?u=https-3A__github.com_dlaszlo88_eLIFE-2DNPM-5FNMRrelaxation&'),
('26981420', u'http://cole-trapnelllab.github.io/cufflinks/igenome_table/index.html'),
('26516857', u'http://msoon.github.io/powermonitor/PowerTool/doc/PowerMonitorManual.pdf'),
('24132163', u'http://cloud.github.com/downloads/hadley/ggplot2/guide-col.pdf'),
('24132163', u'http://cloud.github.com/downloads/hadley/ggplots2/guide-col.pdf'),
('26236402', u'http://atrichlewis42.github.io/synergy-maps'),
('26812047', u'http://guinea-ebov.github.io/code/files/sitreps/hebdo/SitRep_hebdo_Guinee_Semaine13_2015.pdf')
]
urls["false_hits"] = false_hits
rest = [('25849488', u'https://github.com/najoshi/sickle'),
('25984347', u'http://github.com/JackKelly/rfm_ecomanager_logger'),
('26560745', u'https://github.com/jyeatman/AFQ'),
('25559943', u'https://github.com/networkx/networkx'),
('25273974', u'http://github.com/gersteinlab/FunSeq2'),
('26783965', u'http://www.github.com/stschiff/sequenceTools'),
('26783965', u'http://www.github.com/stschiff/rarecoal'),
('26783965', u'http://www.github.com/stschiff/msmc'),
('22772437', u'https://www.github.com/tk2/RetroSeq'),
('23826173', u'https://github.com/michaelbarton/genomer'),
('23826173', u'https://github.com/michaelbarton/chromosome-pfluorescens-r124-plasmid'),
('23826173', u'https://github.com/michaelbarton/chromosome-pfluorescens-r124-genome'),
('23812995', u'http://github.com/kortschak/biogo'),
('25271284', u'https://github.com/picrust/picrust'),
('24324759', u'https://github.com/ekg/smithwaterman'),
('25977800', u'https://github.com/preprocessed-connectomes-project/quality-assessment-protocol'),
('25730631', u'https://github.com/ekg/freebayes'),
('26740918', u'https://github/jyeatman/AFQ'),
('26271043', u'https://github.com/jstjohn/SeqPrep'),
('26271043', u'https://github.com/ekg/vcflib'),
('26575292', u'https://github.com/broadinstitute/picard'),
('25385532', u'https://github.com/rneher/FitnessInference'),
('25875171', u'https://githubcom/najoshi/sickle'),
('24260458', u'https://github.com/picrust/picrust'),
('26628921', u'https://github.molgen.mpg.de/loosolab/admire'),
('26218351', u'https://github.com/McCRIBS/McCRIBS'),
('26089767', u'http://www.github.com/networkx/networkx'),
('25361575', u'https://github.com/molgenis/molgenis'),
('25932347', u'https://github.com/ntncmch/ebola_sierra_leone'),
('26973785', u'https://github.com/doxygen/doxygen'),
('26973785', u'https://github.com/doxygen/doxygen'),
('25549342', u'https://github.com/kinome/kinome.github.io'),
('26897027', u'https://github.com/charite/topodombar'),
('26400485', u'https://github.com/broadinstitute/picard'),
('26242175', u'https://github.com/broadinstitute/picard'),
('25306138', u'https://github.com/broadinstitute/picard'),
('26564201', u'https://github.com/broadinstitute/picard'),
('26496891', u'https://github.com/broadinstitute/picard'),
('25473421', u'https://github.com/broadinstitute/picard'),
('25888430', u'https://github.com/broadinstitute/picard'),
('26872740', u'https://github.com/broadinstitute/picard'),
('26444573', u'https://github.com/broadinstitute/picard'),
('25759012', u'https://github.com/broadinstitute/picard'),
('25859758', u'https://github.com/broadinstitute/picard'),
('26687620', u'https://github.com/broadinstitute/picard'),
('25505934', u'https://github.com/broadinstitute/picard'),
('26315209', u'https://github.com/broadinstitute/picard'),
('25164765', u'https://github.com/broadinstitute/picard'),
('25903198', u'https://github.com/broadinstitute/picard'),
('26527727', u'https://github.com/ENCODE-DCC/pyencoded-tools'),
('20106815', u'https://github.com/mz2/imotifs'),
('25339461', u'https://github.com/broadinstitute/picard'),
('26543846', u'https://github.com/broadinstitute/picard'),
('26125026', u'https://github.com/GMOD/jbrowse'),
('25859288', u'https://github.com/broadinstitute/picard'),
('25451469', u'https://github.com/aglatz/mineral-deposit-segmentation-pipeline'),
('26771513', u'https://github.com/guinea-ebov/guinea-ebov.github.io'),
('26728183', u'https://github.com/broadinstitute/picard'),
('26858705', u'https://github.com/broadinstitute/picard'),
('26510457', u'https://github.com/broadinstitute/picard'),
('26315624', u'https://github.com/broadinstitute/picard'),
('26040329', u'http://www.github.com/networkx/networkx'),
('25903370', u'https://github.com/broadinstitute/picard'),
('26882539', u'https://github.com/dnanexus/dx-toolkit'),
('26296237', u'https://github.com/articlemetrics/articlemetrics.github.io'),
('26296237', u'https://github.com/lagotto/pyalm'),
('25371702', u'https://github.com/lorisfichera/lorisfichera.github.com'),
('25157553', u'https://github.com/sensor2model-group/sensor2model'),
('26846686', u'https://github.com/EPICScotland/Broadwick'),
('25408304', u'https://github.com/SlicerIGT/LumpNav'),
('25892211', u'https://github.com/klusta-team/klustaviewa'),
('24612771', u'https://github.com/faircloth-lab/edittag'),
('26439627', u'https://github.com/dphenriksen/RegionDK'),
('26675891', u'https://github.com/uomsystemsbiology/epidermal_data'),
('23028546', u'https://github.com/FragIt/fragit-main'),
('23020243', u'https://github.com/mikejiang/BioC2015OpenCyto'),
('26671958', u'https://github.com/ntncmch/ebola_sierra_leone'),
('26413745', u'https://github.com/plaque2/plaque2.github.io'),
('26413745', u'https://github.com/plaque2/plaque2.github.io'),
('26751378', u'https://github.com/neurokernel/neurokernel'),
('26484246', u'https://github.com/broadinstitute/picard'),
('26727204', u'https://github.com/broadinstitute/picard'),
('26767617', u'https://github.com/broadinstitute/picard'),
('26136847', u'https://github.com/broadinstitute/picard'),
('26825632', u'https://github.com/broadinstitute/picard'),
('25395669', u'https://github.com/broadinstitute/picard'),
('26681494', u'https://github.com/broadinstitute/picard'),
('26557050', u'https://broadinstitute.github.io/picard'),
('26708082', u'https://github.com/broadinstitute/picard'),
('26061969', u'https://github.com/cytoscape/cytoscape.js'),
('25489744', u'https://github.com/informaton/padaco'),
('26114548', u'http://broadinstittute.github.io/picard'),
('26798323', u'https://github.com/Trinotate/Trinotate'),
('26753127', u'https://github.com/lh3/schemas'),
('24758346', u'https://github.com/daob/JruleMplus'),
('25653582', u'https://github.com/FCP-INDI/C-PAC'),
('21124986', u'https://github.com/A1kmm/sbasetram'),
('23741409', u'https://github.com/cjauvin/pypetree'),
('26053998', u'https://github.com/FCP-INDI/C-PAC'),
('25293757', u'https://github.com/codinghedgehog/phrecon'),
('26692761', u'https://github.com/cole-trapnell-lab/cufflinks'),
('26114585', u'http://broadinstittute.github.io/picard'),
('26262622', u'https://github.com/scipy-lectures/scipy-lecture-notes'),
('25887352', u'https://github.com/broadinstitute/picard'),
('26642925', u'https://github.com/broadinstitute/picard'),
('25239376', u'https://github.com/gemtools/gemtools-examples'),
('25853327', u'https://github.com/broadinstitute/picard'),
('26552596', u'https://github.com/broadinstitute/picard'),
('26327537', u'https://github.com/broadinstitute/picard'),
('26864517', u'https://github.com/broadinstitute/picard'),
('26834993', u'https://github.com/broadinstitute/picard'),
('26395405', u'https://github.com/broadinstitute/picard'),
('26076356', u'https://github.com/broadinstitute/picard'),
('26926343', u'https://github.com/broadinstitute/picard'),
('26149272', u'https://github.com/broadinstitute/picard'),
('25404257', u'https://github.com/broadinstitute/picard'),
('26579211', u'https://github.com/broadinstitute/picard'),
('26980001', u'https://github.com/broadinstitute/picard'),
('25924671', u'https://github.com/broadinstitute/picard'),
('26572163', u'https://github.com/broadinstitute/picard'),
('26289667', u'https://github.com/broadinstitute/picard'),
('25884497', u'https://github.com/cole-trapnell-lab/cufflinks'),
('20298518', u'https://github.com/asad/VFLib'),
('25462216', u'https://github.com/demotu/BMC')]
urls["repos"] = urls["repos"] + rest
pickle.dump(urls,open("%s/inputs_categorized.pkl" %outfolder,"wb"))
#urls = pickle.load(open("%s/inputs_categorized.pkl" %outfolder,"rb"))
jobfile = "%s/parse_repos.job" %(scripts)
filey = open(jobfile,'w')
seen = []
for repo in urls["repos"]:
repo_url = repo[1]
pubmed_paper = repo[0]
if repo_url not in seen:
seen.append(repo_url)
repo_name = repo_url.split("/")[-1]
repo_name = repo_name.strip(").").strip("(")
print repo_name
user_name = repo_url.split("/")[-2]
output_file = "%s/%s_%s_%s_functions.tsv" %(outfolder,user_name,repo_name,pubmed_paper)
if not os.path.exists(output_file):
filey.writelines("python %s/parse_imports.py %s %s %s\n" %(scripts, repo_url, output_file, pubmed_paper))
filey.close()
len(seen)
# 4408
for label,url_list in urls.iteritems():
print "count %s for %s" %(label,len(url_list))
# count repos for 5570
# count github_io for 183
# count nbviewer for 19
# count raw_files for 16
# count false_hits for 12
# count gists for 58
# count github_help for 8
# count github_users for 206
|
|
from unittest.mock import Mock
from io import BytesIO
import tarfile
import os
from tests.base_case import ChatBotTestCase
from chatterbot.trainers import UbuntuCorpusTrainer
class UbuntuCorpusTrainerTestCase(ChatBotTestCase):
"""
Test the Ubuntu Corpus trainer class.
"""
def setUp(self):
super().setUp()
self.trainer = UbuntuCorpusTrainer(
self.chatbot,
ubuntu_corpus_data_directory='./.ubuntu_test_data/',
show_training_progress=False
)
def tearDown(self):
super().tearDown()
self._remove_data()
def _get_data(self):
data1 = (
b'2004-11-04T16:49:00.000Z tom jane Hello\n'
b'2004-11-04T16:49:00.000Z tom jane Is anyone there?\n'
b'2004-11-04T16:49:00.000Z jane Yes\n'
b'\n'
)
data2 = (
b'2004-11-04T16:49:00.000Z tom jane Hello\n'
b'2004-11-04T16:49:00.000Z tom Is anyone there?\n'
b'2004-11-04T16:49:00.000Z jane Yes\n'
b'\n'
)
return data1, data2
def _remove_data(self):
"""
Clean up by removing the corpus data directory.
"""
import shutil
if os.path.exists(self.trainer.data_directory):
shutil.rmtree(self.trainer.data_directory)
def _create_test_corpus(self, data):
"""
Create a small tar in a similar format to the
Ubuntu corpus file in memory for testing.
"""
file_path = os.path.join(self.trainer.data_directory, 'ubuntu_dialogs.tgz')
tar = tarfile.TarFile(file_path, 'w')
tsv1 = BytesIO(data[0])
tsv2 = BytesIO(data[1])
tarinfo = tarfile.TarInfo('dialogs/3/1.tsv')
tarinfo.size = len(data[0])
tar.addfile(tarinfo, fileobj=tsv1)
tarinfo = tarfile.TarInfo('dialogs/3/2.tsv')
tarinfo.size = len(data[1])
tar.addfile(tarinfo, fileobj=tsv2)
tsv1.close()
tsv2.close()
tar.close()
return file_path
def _destroy_test_corpus(self):
"""
Remove the test corpus file.
"""
file_path = os.path.join(self.trainer.data_directory, 'ubuntu_dialogs.tgz')
if os.path.exists(file_path):
os.remove(file_path)
def _mock_get_response(self, *args, **kwargs):
"""
Return a requests.Response object.
"""
import requests
response = requests.Response()
response._content = b'Some response content'
response.headers['content-length'] = len(response.content)
return response
def test_download(self):
"""
Test the download function for the Ubuntu corpus trainer.
"""
import requests
requests.get = Mock(side_effect=self._mock_get_response)
download_url = 'https://example.com/download.tgz'
self.trainer.download(download_url, show_status=False)
file_name = download_url.split('/')[-1]
downloaded_file_path = os.path.join(self.trainer.data_directory, file_name)
requests.get.assert_called_with(download_url, stream=True)
self.assertTrue(os.path.exists(downloaded_file_path))
# Remove the dummy download_url
os.remove(downloaded_file_path)
def test_download_file_exists(self):
"""
Test the case that the corpus file exists.
"""
import requests
file_path = os.path.join(self.trainer.data_directory, 'download.tgz')
open(file_path, 'a').close()
requests.get = Mock(side_effect=self._mock_get_response)
download_url = 'https://example.com/download.tgz'
self.trainer.download(download_url, show_status=False)
# Remove the dummy download_url
os.remove(file_path)
self.assertFalse(requests.get.called)
def test_download_url_not_found(self):
"""
Test the case that the url being downloaded does not exist.
"""
self.skipTest('This test needs to be created.')
def test_extract(self):
"""
Test the extraction of text from a decompressed Ubuntu Corpus file.
"""
file_object_path = self._create_test_corpus(self._get_data())
self.trainer.extract(file_object_path)
self._destroy_test_corpus()
corpus_path = os.path.join(self.trainer.extracted_data_directory, 'dialogs', '3')
self.assertTrue(os.path.exists(self.trainer.extracted_data_directory))
self.assertTrue(os.path.exists(os.path.join(corpus_path, '1.tsv')))
self.assertTrue(os.path.exists(os.path.join(corpus_path, '2.tsv')))
def test_train(self):
"""
Test that the chat bot is trained using data from the Ubuntu Corpus.
"""
self._create_test_corpus(self._get_data())
self.trainer.train()
self._destroy_test_corpus()
response = self.chatbot.get_response('Is anyone there?')
self.assertEqual(response.text, 'Yes')
def test_train_sets_search_text(self):
"""
Test that the chat bot is trained using data from the Ubuntu Corpus.
"""
self._create_test_corpus(self._get_data())
self.trainer.train()
self._destroy_test_corpus()
results = list(self.chatbot.storage.filter(text='Is anyone there?'))
self.assertEqual(len(results), 2)
self.assertEqual(results[0].search_text, 'VERB:anyone NOUN:there')
def test_train_sets_search_in_response_to(self):
"""
Test that the chat bot is trained using data from the Ubuntu Corpus.
"""
self._create_test_corpus(self._get_data())
self.trainer.train()
self._destroy_test_corpus()
results = list(self.chatbot.storage.filter(in_response_to='Is anyone there?'))
self.assertEqual(len(results), 2)
self.assertEqual(results[0].search_in_response_to, 'VERB:anyone NOUN:there')
def test_is_extracted(self):
"""
Test that a check can be done for if the corpus has aleady been extracted.
"""
file_object_path = self._create_test_corpus(self._get_data())
self.trainer.extract(file_object_path)
extracted = self.trainer.is_extracted(self.trainer.extracted_data_directory)
self._destroy_test_corpus()
self.assertTrue(extracted)
def test_is_not_extracted(self):
"""
Test that a check can be done for if the corpus has aleady been extracted.
"""
self._remove_data()
extracted = self.trainer.is_extracted(self.trainer.extracted_data_directory)
self.assertFalse(extracted)
|
|
"""Compatible Relaxation"""
__docformat__ = "restructuredtext en"
import numpy
import scipy
from scipy.linalg import norm
from scipy.sparse import isspmatrix, csr_matrix, spdiags
from pyamg.relaxation import gauss_seidel
__all__ = ['CR','binormalize']
def CR(S, method='habituated',maxiter=20):
"""Use Compatible Relaxation to compute a C/F splitting
Parameters
----------
S : csr_matrix
sparse matrix (n x n) usually matrix A of Ax=b
method : {'habituated','concurrent'}
Method used during relaxation:
- concurrent: GS relaxation on F-points, leaving e_c = 0
- habituated: full relaxation, setting e_c = 0
maxiter : int
maximum number of outer iterations (lambda)
Returns
-------
splitting : array
C/F list of 1's (coarse pt) and 0's (fine pt) (n x 1)
References
----------
.. [1] Livne, O.E., "Coarsening by compatible relaxation."
Numer. Linear Algebra Appl. 11, No. 2-3, 205-227 (2004).
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.classical.cr import CR
>>> A = poisson((20,20),format='csr')
>>> splitting = CR(A)
"""
# parameters (paper notation)
ntests = 3 # (nu) number of random tests to do per iteration
nrelax = 4 # (eta) number of relaxation sweeps per test
smagic = 1.0 # (s) parameter in [1,5] to account for fill-in
gamma = 1.5 # (gamma) cycle index. use 1.5 for 2d
G = 30 # (G) number of equivalence classes (# of bins)
tdepth = 1 # (t) drop depth on parse of L bins
delta = 0 # (delta) drop threshold on parse of L bins
alphai = 0.25 # (alpha_inc) quota increase
# initializations
alpha = 0.0 # coarsening ratio, quota
beta = numpy.inf # quality criterion
beta1 = numpy.inf # quality criterion, older
beta2 = numpy.inf # quality criterion, oldest
n=S.shape[0] # problem size
nC = 0 # number of current Coarse points
rhs = numpy.zeros((n,1)); # rhs for Ae=0
if not isspmatrix(S): raise TypeError('expecting sparse matrix')
S = binormalize(S)
splitting = numpy.zeros( (S.shape[0],1), dtype='intc' )
# out iterations ---------------
for m in range(0,maxiter):
mu = 0.0 # convergence rate
E = numpy.zeros((n,1)) # slowness measure
# random iterations ---------------
for k in range(0,ntests):
e = 0.5*( 1 + scipy.rand(n,1))
e[splitting>0] = 0
enorm = norm(e)
# relaxation iterations ---------------
for l in range(0,nrelax):
if method == 'habituated':
gauss_seidel(S,e,numpy.zeros((n,1)),iterations=1)
e[splitting>0]=0
elif method == 'concurrent':
raise NotImplementedError, 'not implemented: need an F-smoother'
else:
raise NotImplementedError, 'method not recognized: need habituated or concurrent'
enorm_old = enorm
enorm = norm(e)
if enorm <= 1e-14:
# break out of loops
ntests = k
nrelax = l
maxiter = m
# end relax
# check slowness
E = numpy.where( numpy.abs(e)>E, numpy.abs(e), E )
# update convergence rate
mu = mu + enorm/enorm_old
# end random tests
mu = mu/ntests
# work
alpha = float(nC)/n
W = (1 + (smagic-1)*gamma*alpha)/(1-gamma*alpha)
# quality criterion
beta2 = beta1
beta1 = beta
beta = numpy.power(max([mu, 0.1]), 1.0 / W)
# check if we're doing well
if (beta>beta1 and beta1>beta2) or m==(maxiter-1) or max(E)<1e-13:
return splitting.ravel()
# now add points
#
# update limit on additions to splitting (C)
if alpha < 1e-13:
alpha=0.25
else:
alpha = (1-alphai) * alpha + alphai * (1/gamma)
nCmax = numpy.ceil( alpha * n )
L = numpy.ceil( G * E / E.max() ).ravel()
binid=G
# add whole bins (and t-depth nodes) at a time
u = numpy.zeros((n,1))
# TODO This loop may never halt...
# Perhaps loop over nC < nCmax and binid > 0 ?
while nC < nCmax:
if delta > 0:
raise NotImplementedError
if tdepth != 1:
raise NotImplementedError
(roots,) = numpy.where(L==binid)
for root in roots:
if L[root]>=0:
cols = S[root,:].indices
splitting[root] = 1 # add roots
nC += 1
L[cols]=-1
binid -= 1
#L[troots] = -1 # mark t-rings visited
#u[:]=0.0
#u[roots] = 1.0
#for depth in range(0,tdepth):
# u = numpy.abs(S) * u
#(troots,tmp) = numpy.where(u>0)
return splitting.ravel()
def binormalize( A, tol=1e-5, maxiter=10):
"""Binormalize matrix A. Attempt to create unit l_1 norm rows.
Parameters
----------
A : csr_matrix
sparse matrix (n x n)
tol : float
tolerance
x : array
guess at the diagonal
maxiter : int
maximum number of iterations to try
Returns
-------
C : csr_matrix
diagonally scaled A, C=DAD
Notes
-----
- Goal: Scale A so that l_1 norm of the rows are equal to 1:
- B = DAD
- want row sum of B = 1
- easily done with tol=0 if B=DA, but this is not symmetric
- algorithm is O(N log (1.0/tol))
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.classical import binormalize
>>> A = poisson((10,),format='csr')
>>> C = binormalize(A)
References
----------
.. [1] Livne, Golub, "Scaling by Binormalization"
Tech Report SCCM-03-12, SCCM, Stanford, 2003
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.3.1679
"""
if not isspmatrix(A):
raise TypeError('expecting sparse matrix A')
if A.dtype==complex:
raise NotImplementedError('complex A not implemented')
n = A.shape[0]
it = 0
x = numpy.ones((n,1)).ravel()
# 1.
B = A.multiply(A).tocsc() # power(A,2) inconsistent for numpy, scipy.sparse
d=B.diagonal().ravel()
# 2.
beta = B * x
betabar = (1.0/n) * numpy.dot(x,beta)
stdev = rowsum_stdev(x,beta)
#3
while stdev > tol and it < maxiter:
for i in range(0,n):
# solve equation x_i, keeping x_j's fixed
# see equation (12)
c2 = (n-1)*d[i]
c1 = (n-2)*(beta[i] - d[i]*x[i])
c0 = -d[i]*x[i]*x[i] + 2*beta[i]*x[i] - n*betabar
if (-c0 < 1e-14):
print 'warning: A nearly un-binormalizable...'
return A
else:
# see equation (12)
xnew = (2*c0)/(-c1 - numpy.sqrt(c1*c1 - 4*c0*c2))
dx = xnew - x[i]
# here we assume input matrix is symmetric since we grab a row of B
# instead of a column
ii = B.indptr[i]
iii = B.indptr[i+1]
dot_Bcol = numpy.dot(x[B.indices[ii:iii]],B.data[ii:iii])
betabar = betabar + (1.0/n)*dx*(dot_Bcol + beta[i] + d[i]*dx)
beta[B.indices[ii:iii]] += dx*B.data[ii:iii]
x[i] = xnew
stdev = rowsum_stdev(x,beta)
it+=1
# rescale for unit 2-norm
d = numpy.sqrt(x)
D = spdiags( d.ravel(), [0], n,n)
C = D * A * D
C = C.tocsr()
beta = C.multiply(C).sum(axis=1)
scale = numpy.sqrt((1.0/n) * numpy.sum(beta))
return (1/scale)*C
def rowsum_stdev(x,beta):
"""Compute row sum standard deviation
Compute for approximation x, the std dev of the row sums
s(x) = ( 1/n \sum_k (x_k beta_k - betabar)^2 )^(1/2)
with betabar = 1/n dot(beta,x)
Parameters
----------
x : array
beta : array
Returns
-------
s(x)/betabar : float
Notes
-----
equation (7) in Livne/Golub
"""
n=x.size
betabar = (1.0/n) * numpy.dot(x,beta)
stdev = numpy.sqrt((1.0/n)*numpy.sum(numpy.power(numpy.multiply(x,beta) - betabar,2)))
return stdev/betabar
|
|
from __future__ import unicode_literals
import os
import sys
import json
import time
import click
import errno
import shutil
import select
import tarfile
import zipfile
import hashlib
import tempfile
import sysconfig
import subprocess
from io import open
from contextlib import contextmanager
WIN = sys.platform.startswith('win')
FORMATS = ['tar.gz', 'tar.bz2', 'tar', 'zip', 'dir']
INSTALLER = '''\
#!/bin/bash
# This script installs the bundled wheel distribution of %(name)s into
# a provided path where it will end up in a new virtualenv.
set -e
show_usage() {
echo "Usage: ./install.sh [OPTIONS] DST"
}
show_help() {
show_usage
cat << EOF
Installs %(name)s into a new virtualenv that is provided as the DST
parameter. The interpreter to use for this virtualenv can be
overridden by the "-p" parameter.
Options:
--help display this help and exit.
-p --python PYTHON use an alternative Python interpreter
EOF
exit 0
}
param_error() {
show_usage
echo
echo "Error: $1"
exit 1
}
py="%(python)s"
while [ "$#" -gt 0 ]; do
case $1 in
--help) show_help ;;
-p|--python)
if [ "$#" -gt 1 ]; then
py="$2"
shift
else
param_error "$1 option requires an argument"
fi
;;
--python=?*) py=${1#*=} ;;
--) shift; break ;;
-?*) param_error "no such option: $1" ;;
*) break
esac
shift
done
if [ "$1" == "" ]; then
param_error "destination argument is required"
fi
HERE="$(cd "$(dirname "$0")"; pwd)"
DATA_DIR="$HERE/data"
# Ensure Python exists
command -v "$py" &> /dev/null || error "Given python interpreter not found ($py)"
echo 'Setting up virtualenv'
"$py" "$DATA_DIR/virtualenv.py" "$1"
VIRTUAL_ENV="$(cd "$1"; pwd)"
INSTALL_ARGS=''
if [ -f "$DATA_DIR/requirements.txt" ]; then
INSTALL_ARGS="$INSTALL_ARGS"\ -r\ "$DATA_DIR/requirements.txt"
fi
echo "Installing %(name)s"
"$VIRTUAL_ENV/bin/pip" install --pre --no-index \
--find-links "$DATA_DIR" wheel $INSTALL_ARGS %(pkg)s | grep -v '^$'
# Potential post installation
cd "$HERE"
. "$VIRTUAL_ENV/bin/activate"
%(postinstall)s
echo "Done."
'''
class Log(object):
def __init__(self):
self.indentation = 0
def indent(self):
self.indentation += 1
def outdent(self):
self.indentation -= 1
def echo(self, s):
prefix = ' ' * self.indentation
click.echo(prefix + s)
def info(self, fmt, *args, **kwargs):
self.echo(fmt.format(*args, **kwargs))
def error(self, fmt, *args, **kwargs):
return self.info('Error: ' + click.style(fmt, fg='red'),
*args, **kwargs)
def process_stream_output(self, process):
fds = set([process.stdout, process.stderr])
while fds:
for f in select.select(fds, [], [])[0]:
try:
line = f.readline()
if not line:
fds.discard(f)
continue
except OSError as e:
if e.errno != errno.EINTR:
raise
else:
line = line.decode('utf-8')
color = f == process.stdout and 'cyan' or 'yellow'
self.echo(click.style(line.rstrip(), fg=color))
@contextmanager
def indented(self):
self.indent()
try:
yield
finally:
self.outdent()
def autoquote(arg):
if arg.strip() not in (arg, '') or arg.split()[0] != arg or '"' in arg:
arg = '"%s"' % arg.replace('\\', '\\\\').replace('"', '\\"')
return arg
def find_exe(name):
"""Finds an executable first in the virtualenv if available, otherwise
falls back to the global name.
"""
if hasattr(sys, 'real_prefix'):
path = os.path.join(sys.prefix, 'bin', name)
if os.path.isfile(path):
return path
return name
def make_spec(pkg, version=None):
if version is None:
return pkg
if version[:1] in '>=':
return pkg + version
return '%s==%s' % (pkg, version)
def find_closest_package():
node = os.getcwd()
while 1:
if os.path.isfile(os.path.join(node, 'setup.py')):
return node
parent = os.path.dirname(node)
if node == parent:
break
node = parent
raise click.UsageError('Cannot discover package, you need to be explicit.')
def get_cache_dir(app_name):
if WIN:
folder = os.environ.get('LOCALAPPDATA')
if folder is None:
folder = os.path.expanduser('~')
app_name = '.' + app_name
return os.path.join(folder, app_name, 'Cache')
if sys.platform == 'darwin':
return os.path.join(os.path.expanduser(
'~/Library/Caches'), app_name)
return os.path.join(
os.environ.get('XDG_CONFIG_HOME', os.path.expanduser('~/.cache')),
app_name)
def get_default_wheel_cache():
return get_cache_dir('platter')
class Builder(object):
def __init__(self, log, path, output, python=None,
virtualenv_version=None, wheel_version=None,
pip_options=None, no_download=None, wheel_cache=None,
requirements=None):
self.log = log
self.path = os.path.abspath(path)
self.output = output
if python is None:
python = sys.executable
self.python = python
self.virtualenv_version = virtualenv_version
self.wheel_version = wheel_version
if wheel_cache is not None:
wheel_cache = os.path.abspath(wheel_cache)
self.wheel_cache = wheel_cache
if requirements is not None:
requirements = os.path.abspath(requirements)
self.requirements = requirements
self.no_download = no_download
self.pip_options = list(pip_options or ())
self.scratchpads = []
def get_pip_options(self):
rv = self.pip_options
if self.wheel_cache and os.path.isdir(self.wheel_cache):
rv = rv + ['-f', self.wheel_cache]
if self.no_download:
rv = rv + ['--no-index']
return rv
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.cleanup()
def make_scratchpad(self, name='generic'):
sp = tempfile.mkdtemp(suffix='-' + name)
self.scratchpads.append(sp)
self.log.info('Created scratchpad in {}', sp)
return sp
def execute(self, cmd, args=None, capture=False):
cmdline = [cmd]
cmdline.extend(args or ())
self.log.info('Executing {}', ' '.join(map(autoquote, cmdline)))
with self.log.indented():
cl = subprocess.Popen(cmdline, cwd=self.path,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if capture:
rv = cl.communicate()[0]
else:
rv = None
self.log.process_stream_output(cl)
if cl.wait() != 0:
self.log.error('Failed to execute command "%s"' % cmd)
raise click.Abort()
if rv is not None:
return rv.decode('utf-8')
def cleanup(self):
while self.scratchpads:
sp = self.scratchpads.pop()
try:
self.log.info('Cleaning up scratchpad in {}', sp)
shutil.rmtree(sp)
except (OSError, IOError):
pass
def describe_package(self, python):
# Do dummy invoke first to trigger setup requires.
self.log.info('Invoking dummy setup to trigger requirements.')
self.execute(python, ['setup.py', '--version'], capture=True)
rv = self.execute(python, [
'setup.py', '--name', '--version', '--fullname'],
capture=True).strip().splitlines()
platform = sysconfig.get_platform()
if isinstance(platform, bytes):
platform = platform.decode('utf-8')
return {
'name': rv[0],
'version': rv[1],
'platform': platform,
'ident': rv[2],
}
def copy_file(self, filename, target):
if os.path.isdir(target):
target = os.path.join(target, os.path.basename(filename))
shutil.copy2(filename, target)
def place_venv_deps(self, venv_path, data_dir):
self.log.info('Placing virtualenv dependencies')
self.copy_file(os.path.join(venv_path, 'virtualenv.py'),
data_dir)
support_path = os.path.join(venv_path, 'virtualenv_support')
for filename in os.listdir(support_path):
if filename.endswith('.whl'):
self.copy_file(os.path.join(support_path, filename), data_dir)
def build_wheels(self, venv_path, data_dir):
self.log.info('Building wheels')
pip = os.path.join(venv_path, 'bin', 'pip')
with self.log.indented():
self.execute(pip, ['install', '--download', data_dir] +
self.get_pip_options() +
[make_spec('wheel', self.wheel_version)])
cmdline = ['wheel', '--wheel-dir=' + data_dir]
cmdline.extend(self.get_pip_options())
if self.requirements is not None:
cmdline.extend(('-r', self.requirements))
shutil.copy2(self.requirements,
os.path.join(data_dir, 'requirements.txt'))
cmdline.append(self.path)
self.execute(os.path.join(venv_path, 'bin', 'pip'), cmdline)
def setup_build_venv(self, virtualenv):
scratchpad = self.make_scratchpad('venv')
self.log.info('Initializing build virtualenv in {}', scratchpad)
with self.log.indented():
self.execute(self.python,
[os.path.join(virtualenv, 'virtualenv.py'),
scratchpad])
self.execute(os.path.join(scratchpad, 'bin', 'pip'),
['install'] + self.get_pip_options() +
[make_spec('wheel', self.wheel_version)])
return scratchpad
def put_installer(self, scratchpad, pkginfo, install_script_path):
fn = os.path.join(scratchpad, 'install.sh')
with open(install_script_path, encoding='utf-8') as f:
postinstall = f.read().rstrip()
with open(fn, 'w', encoding='utf-8') as f:
f.write((INSTALLER % dict(
name=pkginfo['ident'],
pkg=pkginfo['name'],
python=os.path.basename(self.python),
postinstall=postinstall,
)))
os.chmod(fn, 0o100755)
def put_meta_info(self, scratchpad, pkginfo):
self.log.info('Placing meta information')
with open(os.path.join(scratchpad, 'info.json'), 'w', encoding='utf-8') as f:
f.write(json.dumps(pkginfo, f, indent=2) + '\n')
with open(os.path.join(scratchpad, 'VERSION'), 'w', encoding='utf-8') as f:
f.write(pkginfo['version'] + '\n')
with open(os.path.join(scratchpad, 'PLATFORM'), 'w', encoding='utf-8') as f:
f.write(pkginfo['platform'] + '\n')
with open(os.path.join(scratchpad, 'PACKAGE'), 'w', encoding='utf-8') as f:
f.write(pkginfo['name'] + '\n')
def create_archive(self, scratchpad, pkginfo, format):
base = pkginfo['ident'] + '-' + pkginfo['platform']
try:
os.makedirs(self.output)
except OSError:
pass
if format == 'dir':
rv_fn = os.path.join(self.output, base)
self.log.info('Saving artifact as directory {}', rv_fn)
os.rename(scratchpad, rv_fn)
return rv_fn
archive_name = base + '.' + format
rv_fn = os.path.join(self.output, archive_name)
tmp_fn = os.path.join(self.output, '.' + archive_name)
self.log.info('Creating distribution archive {}', rv_fn)
f = None
try:
if format in ('tar.gz', 'tar.bz2', 'tar'):
if '.' in format:
mode = 'w:' + format.split('.')[1]
else:
mode = 'w'
f = tarfile.open(tmp_fn, mode)
f.add(scratchpad, base)
f.close()
elif format == 'zip':
f = zipfile.ZipFile(tmp_fn, 'w')
for dirpath, dirnames, files in os.walk(scratchpad):
for file in files:
f.write(os.path.join(dirpath, file),
os.path.join(base, dirpath[
len(scratchpad) + 1:], file),
zipfile.ZIP_DEFLATED)
f.close()
os.rename(tmp_fn, rv_fn)
finally:
if f is not None:
f.close()
try:
os.remove(tmp_fn)
except OSError:
pass
return rv_fn
def extract_virtualenv(self):
self.log.info('Downloading and extracting virtualenv bootstrapper')
with self.log.indented():
scratchpad = self.make_scratchpad('venv-tmp')
self.execute(find_exe('pip'), ['install', '--download', scratchpad] +
self.get_pip_options() +
[make_spec('virtualenv', self.virtualenv_version)])
artifact = os.path.join(scratchpad, os.listdir(scratchpad)[0])
if artifact.endswith(('.zip', '.whl')):
f = zipfile.ZipFile(artifact)
else:
f = tarfile.open(artifact)
f.extractall(scratchpad)
f.close()
# We need to detect if we contain a single artifact that is a
# folder in which case we need to use that. Wheels for instance
# do not contain a wrapping folder.
artifacts = os.listdir(scratchpad)
if len(artifacts) == 1:
rv = os.path.join(scratchpad, artifacts[0])
if os.path.isdir(rv):
return rv, artifact
return scratchpad, artifact
def run_build_script(self, scratchpad, venv_path,
build_script, install_script_path):
self.log.info('Invoking build script {}', build_script)
with self.log.indented():
script = '''
. "%(venv)s/bin/activate"
export HERE="%(here)s"
export DATA_DIR="%(here)s/data"
export SOURCE_DIR="%(path)s"
export SCRATCHPAD="%(scratchpad)s"
%(script)s
''' % {
'venv': venv_path,
'script': os.path.abspath(build_script),
'path': self.path,
'here': scratchpad,
'scratchpad': self.make_scratchpad('postbuild'),
}
env = dict(os.environ)
env['INSTALL_SCRIPT'] = install_script_path
c = subprocess.Popen(['sh'], env=env, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=scratchpad)
c.stdin.write(script)
c.stdin.flush()
c.stdin.close()
self.log.process_stream_output(c)
if c.wait() != 0:
self.log.error('Build script failed :(')
raise click.Abort()
def update_wheel_cache(self, wheelhouse, venv_artifact):
self.log.info('Updating wheel cache')
def _place(filename):
basename = os.path.basename(filename)
if os.path.isfile(os.path.join(self.wheel_cache, basename)):
return
self.log.info('Caching {} for future use', basename)
shutil.copy2(filename, os.path.join(self.wheel_cache, basename))
with self.log.indented():
try:
os.makedirs(self.wheel_cache)
except OSError:
pass
for filename in os.listdir(wheelhouse):
if filename[:1] == '.' or not filename.endswith('.whl'):
continue
_place(os.path.join(wheelhouse, filename))
_place(venv_artifact)
def finalize(self, artifact, time):
self.log.info('Done.')
self.log.info('Total time elapsed: %.2fs' % time)
self.log.info('Build artifact successfully created.')
with self.log.indented():
self.log.info('Artifact: {}', artifact)
if not os.path.isfile(artifact):
return
sha1 = hashlib.sha1()
md5 = hashlib.md5()
with open(artifact, 'rb') as f:
while 1:
chunk = f.read(65536)
if not chunk:
break
sha1.update(chunk)
md5.update(chunk)
self.log.info('MD5: {}', md5.hexdigest())
self.log.info('SHA1: {}', sha1.hexdigest())
def build(self, format, prebuild_script=None, postbuild_script=None):
if not os.path.isdir(self.path):
raise click.UsageError('The project path (%s) does not exist'
% self.path)
now = time.time()
venv_src, venv_artifact = self.extract_virtualenv()
venv_path = self.setup_build_venv(venv_src)
local_python = os.path.join(venv_path, 'bin', 'python')
self.log.info('Analyzing package')
pkginfo = self.describe_package(local_python)
with self.log.indented():
self.log.info('Name: {}', pkginfo['name'])
self.log.info('Version: {}', pkginfo['version'])
scratchpad = self.make_scratchpad('buildbase')
data_dir = os.path.join(scratchpad, 'data')
os.makedirs(data_dir)
install_script_path = os.path.join(venv_path, 'install_script')
self.place_venv_deps(venv_src, data_dir)
if prebuild_script is not None:
self.run_build_script(scratchpad, venv_path, prebuild_script,
install_script_path)
self.build_wheels(venv_path, data_dir)
self.put_meta_info(scratchpad, pkginfo)
open(install_script_path, 'a').close()
if postbuild_script is not None:
self.run_build_script(scratchpad, venv_path, postbuild_script,
install_script_path)
if self.wheel_cache:
self.update_wheel_cache(data_dir, venv_artifact)
self.put_installer(scratchpad, pkginfo,
install_script_path)
artifact = self.create_archive(scratchpad, pkginfo, format)
self.cleanup()
self.finalize(artifact, time.time() - now)
@click.group(context_settings={
'auto_envvar_prefix': 'PLATTER'
})
@click.version_option()
def cli():
"""Platter packages up a Python package into a tarball that can install
into a local virtualenv through a bundled install script. The only
requirement on the destination host is a compatible Python installation.
To build a package with platter use run `platter build`:
$ platter build
This will look for the closest Python package. You can also be explicit
and provide the path to it:
$ platter build /path/to/the/project
"""
@cli.command('build')
@click.argument('path', required=False, type=click.Path())
@click.option('--output', type=click.Path(), default='dist',
help='The output folder', show_default=True)
@click.option('-p', '--python', type=click.Path(),
help='The python interpreter to use for building. This '
'interpreter is both used for compiling the packages and also '
'used as default in the generated install script.')
@click.option('--virtualenv-version', help='The version of virtualenv to use. '
'The default is to use the latest stable version from PyPI.',
metavar='SPEC')
@click.option('--pip-option', multiple=True, help='Adds an option to pip. To '
'add multiple options, use this parameter multiple times. '
'Example: --pip-option="--isolated"',
type=click.Path(), metavar='OPT')
@click.option('--wheel-version', help='The version of the wheel package '
'that should be used. Defaults to latest stable from PyPI.',
metavar='SPEC')
@click.option('--format', default='tar.gz', type=click.Choice(FORMATS),
help='The format of the resulting build artifact as file '
'extension. Supported formats: ' + ', '.join(FORMATS),
show_default=True, metavar='EXTENSION')
@click.option('--prebuild-script', type=click.Path(),
help='Path to an optional build script that is invoked in '
'the build folder as first step. This can be used to install '
'build dependencies such as Cython.')
@click.option('--postbuild-script', type=click.Path(),
help='Path to an optional build script that is invoked in '
'the build folder as last step. This can be used to inject '
'additional data into the archive.')
@click.option('--wheel-cache', type=click.Path(),
help='An optional folder where platter should cache wheels '
'instead of the system default. If you do not want to use '
'a wheel cache you can pass the --no-wheel-cache flag.')
@click.option('--no-wheel-cache', is_flag=True,
help='Disables the wheel cache entirely.')
@click.option('--no-download', is_flag=True,
help='Disables the downloading of all dependencies entirely. '
'This will only work if all dependencies have been previously '
'cached. This is primarily useful when you are temporarily '
'disconnected from the internet because it will disable useless '
'network roundtrips.')
@click.option('-r', '--requirements', type=click.Path(),
help='Optionally the path to a requirements file which contains '
'additional packages that should be installed in addition to '
'the main one. This can be useful when you need to pull in '
'optional dependencies.')
def build_cmd(path, output, python, virtualenv_version, wheel_version,
format, pip_option, prebuild_script, postbuild_script,
wheel_cache, no_wheel_cache, no_download, requirements):
"""Builds a platter package. The argument is the path to the package.
If not given it discovers the closest setup.py.
Generally this works by building the provided package into a wheel file
and a wheel for each of the dependencies. The resulting artifacts are
augmented with a virtualenv bootstrapper and an install script and then
archived. Optionally a post build script can be provided that can place
more files in the archive and also provide more install steps.
"""
log = Log()
if path is None:
path = find_closest_package()
log.info('Using package from {}', path)
if no_wheel_cache:
if no_download:
raise click.UsageError('--no-download and --no-cache cannot '
'be used together.')
wheel_cache = None
elif wheel_cache is None:
wheel_cache = get_default_wheel_cache()
if wheel_cache is not None:
log.info('Using wheel cache in {}', wheel_cache)
with Builder(log, path, output, python=python,
virtualenv_version=virtualenv_version,
wheel_version=wheel_version,
pip_options=list(pip_option),
no_download=no_download,
wheel_cache=wheel_cache,
requirements=requirements) as builder:
builder.build(format, prebuild_script=prebuild_script,
postbuild_script=postbuild_script)
@cli.command('clean-cache')
def clean_cache_cmd():
"""This command cleans the wheel cache.
This is useful when the cache got polluted with bad wheels due to a
bug or if the cache grew too large. Note that this only cleans the
wheel cache, it does not clean the download cache of pip.
"""
log = Log()
wheel_cache = get_default_wheel_cache()
log.info('Cleaning cache in {}', wheel_cache)
with log.indented():
if os.path.isdir(wheel_cache):
for fn in os.listdir(wheel_cache):
if os.path.isfile(os.path.join(wheel_cache, fn)):
try:
log.info('Removing', fn)
os.remove(os.path.join(wheel_cache, fn))
except OSError:
pass
log.info('Done')
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
import uuid
from .. import models
class WorkflowsOperations(object):
"""WorkflowsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: The API version. Constant value: "2016-06-01".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-06-01"
self.config = config
def list_by_subscription(
self, top=None, filter=None, custom_headers=None, raw=False, **operation_config):
"""Gets a list of workflows by subscription.
:param top: The number of items to be included in the result.
:type top: int
:param filter: The filter to apply on the operation.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`WorkflowPaged <azure.mgmt.logic.models.WorkflowPaged>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Logic/workflows'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.WorkflowPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.WorkflowPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_by_resource_group(
self, resource_group_name, top=None, filter=None, custom_headers=None, raw=False, **operation_config):
"""Gets a list of workflows by resource group.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param top: The number of items to be included in the result.
:type top: int
:param filter: The filter to apply on the operation.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`WorkflowPaged <azure.mgmt.logic.models.WorkflowPaged>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.WorkflowPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.WorkflowPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def get(
self, resource_group_name, workflow_name, custom_headers=None, raw=False, **operation_config):
"""Gets a workflow.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Workflow <azure.mgmt.logic.models.Workflow>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Workflow', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, workflow_name, workflow, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a workflow.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:param workflow: The workflow.
:type workflow: :class:`Workflow <azure.mgmt.logic.models.Workflow>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Workflow <azure.mgmt.logic.models.Workflow>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(workflow, 'Workflow')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Workflow', response)
if response.status_code == 201:
deserialized = self._deserialize('Workflow', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update(
self, resource_group_name, workflow_name, workflow, custom_headers=None, raw=False, **operation_config):
"""Updates a workflow.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:param workflow: The workflow.
:type workflow: :class:`Workflow <azure.mgmt.logic.models.Workflow>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Workflow <azure.mgmt.logic.models.Workflow>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(workflow, 'Workflow')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Workflow', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, workflow_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a workflow.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def disable(
self, resource_group_name, workflow_name, custom_headers=None, raw=False, **operation_config):
"""Disables a workflow.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/disable'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def enable(
self, resource_group_name, workflow_name, custom_headers=None, raw=False, **operation_config):
"""Enables a workflow.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/enable'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def generate_upgraded_definition(
self, resource_group_name, workflow_name, target_schema_version=None, custom_headers=None, raw=False, **operation_config):
"""Generates the upgraded definition for a workflow.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:param target_schema_version: The target schema version.
:type target_schema_version: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: object
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
parameters = models.GenerateUpgradedDefinitionParameters(target_schema_version=target_schema_version)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/generateUpgradedDefinition'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'GenerateUpgradedDefinitionParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('object', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def validate(
self, resource_group_name, location, workflow_name, workflow, custom_headers=None, raw=False, **operation_config):
"""Validates the workflow definition.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param location: The workflow location.
:type location: str
:param workflow_name: The workflow name.
:type workflow_name: str
:param workflow: The workflow definition.
:type workflow: :class:`Workflow <azure.mgmt.logic.models.Workflow>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/locations/{location}/workflows/{workflowName}/validate'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'location': self._serialize.url("location", location, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(workflow, 'Workflow')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
|
|
# -*- coding: utf-8 -*-
#
# Unit tests for string functions from tdda.referencetest.checkfiles
#
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import division
import unittest
from tdda.referencetest.checkfiles import FilesComparison
class TestInternals(unittest.TestCase):
def test_diff_marker(self):
compare = FilesComparison()
self.assertEqual(compare.diff_marker('ABC', 'XYZ'), '(ABC|XYZ)')
self.assertEqual(compare.diff_marker('ABC:', 'ABC: yes'), 'ABC:(| yes)')
self.assertEqual(compare.diff_marker('', 'AAA'), '(|AAA)')
self.assertEqual(compare.diff_marker('AAA', ''), '(AAA|)')
self.assertEqual(compare.diff_marker('ABC', 'AXC'), 'A(B|X)C')
def test_single_pattern(self):
compare = FilesComparison()
cpatterns = compare.compile_patterns(['gr.*t'])
self.assertTrue(compare.check_patterns(cpatterns,
'great', 'grapefruit'))
def test_unanchored_patterns(self):
compare = FilesComparison()
cpatterns = compare.compile_patterns(['A\d{2}B', 'X[a-z]+Y'])
for actual, expected in [('A22BC', 'A99BC'),
('XappleY', 'XtrafficY'),
('A22BXappleY', 'A99BXtrafficY'),
('froggyA22BXappleY', 'froggyA99BXtrafficY'),
('frA22BXappleYoggy', 'frA99BXtrafficYoggy'),
('A22BA99B', 'A99BA22B')]:
self.assertTrue(compare.check_patterns(cpatterns,
actual, expected),
'%s <--> %s' % (actual, expected))
for actual, expected in [('A222BC', 'A99BC'),
('222BC', 'A99BC'),
('XappleYXappleY', 'XappleY')]:
self.assertFalse(compare.check_patterns(cpatterns,
actual, expected),
'%s <--> %s' % (actual, expected))
def test_anchored_patterns(self):
compare = FilesComparison()
cpatterns = compare.compile_patterns([r'^\d+$'])
for actual, expected in [('2', '222222222222'),
('2', '2'),
('02', '2'),
('2', '23'),
('123', '564')]:
self.assertTrue(compare.check_patterns(cpatterns,
actual, expected),
'%s <--> %s' % (actual, expected))
for actual, expected in [('2', '222222222222a22'),
('', '23'),
('123', '')]:
self.assertFalse(compare.check_patterns(cpatterns,
actual, expected),
'%s <--> %s' % (actual, expected))
def test_grouped_pattern(self):
compare = FilesComparison()
cpatterns = compare.compile_patterns(['(a|an) (grapefruit|apple)'])
self.assertTrue(compare.check_patterns(cpatterns,
'a grapefruit', 'an apple'))
self.assertTrue(compare.check_patterns(cpatterns,
'I have a grapefruit', 'I have an apple'))
self.assertTrue(compare.check_patterns(cpatterns,
'I have a grapefruit and an apple',
'I have an apple and a grapefruit'))
self.assertFalse(compare.check_patterns(cpatterns,
'I have a grapefruit and a banana',
'I have an apple and a grapefruit'))
class TestStrings(unittest.TestCase):
def test_strings_ok(self):
compare = FilesComparison()
self.assertEqual(compare.check_strings([], []), (0, []))
self.assertEqual(compare.check_strings(['abc'], ['abc']), (0, []))
self.assertEqual(compare.check_strings(['ab', 'c'], ['ab', 'c']),
(0, []))
def test_strings_fail(self):
compare = FilesComparison()
self.assertEqual(compare.check_strings([], ['x'], create_temporaries=False),
(1, ['Strings have different numbers of lines, '
'differences start at end of actual string',
'No files available for comparison']))
self.assertEqual(compare.check_strings(['y'], ['x'], create_temporaries=False),
(1, ['1 line is different, starting at line 1',
'No files available for comparison']))
def test_print(self):
msgs = []
compare = FilesComparison(print_fn=lambda x: msgs.append(x))
compare.check_strings(['a'], ['b'], create_temporaries=False)
self.assertEqual(msgs, ['1 line is different, starting at line 1',
'No files available for comparison'])
def test_strip(self):
compare = FilesComparison()
self.assertEqual(compare.check_strings([' abc'], ['abc'],
create_temporaries=False),
(1, ['1 line is different, starting at line 1',
'No files available for comparison']))
self.assertEqual(compare.check_strings([' abc'], ['abc'],
lstrip=True), (0, []))
self.assertEqual(compare.check_strings(['abc '], ['abc'],
rstrip=True), (0, []))
self.assertEqual(compare.check_strings([' abc '], ['abc'],
lstrip=True, rstrip=True),
(0, []))
def test_ignore_substrings(self):
compare = FilesComparison()
self.assertEqual(compare.check_strings(['abc','red', 'banana'],
['abc','blue', 'grapefruit'],
create_temporaries=False),
(1, ['2 lines are different, starting at line 2',
'No files available for comparison']))
self.assertEqual(compare.check_strings(['abc','blue', 'banana'],
['abc','red', 'grapefruit'],
ignore_substrings=['re'],
create_temporaries=False),
(1, ['1 line is different, starting at line 3',
'No files available for comparison',
'Note exclusions:',
' ignore_substrings:',
' re']))
self.assertEqual(compare.check_strings(['abc','red', 'banana'],
['abc','blue', 'grapefruit'],
ignore_substrings=['ue','gra']),
(0, []))
def test_ignore_patterns(self):
compare = FilesComparison()
# red != blue, banana != grapefruit => 2 failures
self.assertEqual(compare.check_strings(['abc','red', 'banana'],
['abc','blue', 'grapefruit'],
create_temporaries=False),
(1, ['2 lines are different, starting at line 2',
'No files available for comparison']))
# red != blue, banana !~ gr.*t => 2 failures
self.assertEqual(compare.check_strings(['abc','red', 'banana'],
['abc','blue', 'grapefruit'],
ignore_patterns=['gr.*t'],
create_temporaries=False),
(1, ['2 lines are different, starting at line 2',
'No files available for comparison',
'Note exclusions:',
' ignore_patterns:',
' gr.*t']))
# red != blue, but great DOES ~ gr.*t => 1 failure
self.assertEqual(compare.check_strings(['abc','red', 'great'],
['abc','blue', 'grapefruit'],
ignore_patterns=['gr.*t'],
create_temporaries=False),
(1, ['1 line is different, starting at line 2',
'No files available for comparison',
'Note exclusions:',
' ignore_patterns:',
' gr.*t']))
# spangle DOES ~ sp......, and breadfruit DOES ~ .*fruit => success
self.assertEqual(compare.check_strings(['abc','spangle', 'breadfruit'],
['abc','spanner', 'grapefruit'],
ignore_patterns=[
'sp.....',
'[bg].*fruit'
]),
(0, []))
def test_preprocess(self):
compare = FilesComparison()
def strip_first_five(strings):
return [s[5:] for s in strings]
def strip_first_seven(strings):
return [s[7:] for s in strings]
self.assertEqual(compare.check_strings(['abc','spangle', 'breadfruit'],
['abc','spanner', 'grapefruit'],
preprocess=strip_first_five,
create_temporaries=False),
(1, ['1 line is different, starting at line 2',
'No files available for comparison']))
self.assertEqual(compare.check_strings(['abc','spangle', 'breadfruit'],
['abc','spanner', 'grapefruit'],
preprocess=strip_first_seven),
(0, []))
def test_permutations(self):
compare = FilesComparison()
self.assertEqual(compare.check_strings(['abc','spangle', 'spanner'],
['spangle','spanner', 'abc'],
max_permutation_cases=1,
create_temporaries=False),
(1, ['3 lines are different, starting at line 1',
'No files available for comparison']))
self.assertEqual(compare.check_strings(['abc','spangle', 'spanner'],
['abc','spanner', 'spangle'],
max_permutation_cases=2),
(0, []))
self.assertEqual(compare.check_strings(['abc','spangle', 'spanner'],
['spangle','spanner', 'abc'],
max_permutation_cases=3),
(0, []))
if __name__ == '__main__':
unittest.main()
|
|
"""
"""
import unittest
from freezegun import freeze_time
import json
import datetime
from flask import Flask
from flask.ext.restful import fields
from flask.ext.restful import marshal
from flask.ext.restful.fields import MarshallingException
from acmapi.fields import Date
from acmapi.fields import root_fields
from acmapi.fields import event_fields
from acmapi.fields import post_fields
from acmapi.fields import person_fields
from acmapi.fields import membership_fields
from acmapi.fields import officership_fields
import acmapi
from acmapi import models
from acmapi import resources
from acmapi import DB
from acmapi.resources import API
from acmapi.models import Person
from acmapi.models import Officership
import base64
HEADERS={
'Authorization': 'Basic ' + base64.b64encode("root:1234")
}
class test_memberships_resource(unittest.TestCase):
@freeze_time("2012-01-14 12:00:01")
def setUp(self):
self.app = acmapi.create_app(SQLALCHEMY_DATABASE_URI='sqlite://')
self.app.testing = True
with self.app.test_request_context():
DB.create_all()
person = Person.create(
name = None,
username = 'root',
email = None,
website = None,
password = '1234',
)
DB.session.add(person)
DB.session.commit()
officership = Officership.create(
person = person,
title = 'Vice Chair',
start_date = datetime.date.today(),
end_date = None,
)
DB.session.add(person)
DB.session.add(officership)
DB.session.commit()
@freeze_time("2012-01-14 12:00:01")
def test_add_valid_event(self):
self.maxDiff = None
with self.app.test_client() as client:
response = client.post(
'http://localhost:5000/people/',
headers = HEADERS,
data = {
'username': 'bob',
'name': 'Bob Billy',
'email': 'bbob@example.com',
'website': 'http://bbob.example.com',
'password': 'password1234',
})
response = client.post(
'http://localhost:5000/events/',
headers = HEADERS,
data = {
'title': 'Title A',
'description': 'Description A',
'location': 'Location A',
'speaker': 'Speaker A',
'start': '2014-10-10 10:10:10.000000',
'end': '2014-10-10 11:10:10.000000',
})
self.assertEqual(
json.loads(response.data),
{
'title': 'Title A',
'description': 'Description A',
'location': 'Location A',
'speaker': 'Speaker A',
"edited_at": '2012-01-14 12:00:01.000000',
"editor": "http://localhost:5000/people/1",
"editor_id": 1,
"hidden": False,
"canceled": False,
"event_id": 1,
"revision": 1,
'start': '2014-10-10 10:10:10.000000',
'end': '2014-10-10 11:10:10.000000',
})
@freeze_time("2012-01-14 12:00:01")
def test_edit_valid_event(self):
with self.app.test_client() as client:
response = client.post(
'http://localhost:5000/people/',
headers = HEADERS,
data = {
'username': 'bob',
'name': 'Bob Billy',
'email': 'bbob@example.com',
'website': 'http://bbob.example.com',
'password': 'password1234',
})
response = client.post(
'http://localhost:5000/events/',
headers = HEADERS,
data = {
'title': 'Title A',
'description': 'Description A',
'location': 'Location A',
'speaker': 'Speaker A',
'start': '2014-10-10 10:10:10.000000',
'end': '2014-10-10 11:10:10.000000',
})
response = client.put(
'http://localhost:5000/events/1',
headers = HEADERS,
data = {
'title': 'Title B',
'description': 'Description B',
'location': 'Location B',
'speaker': 'Speaker B',
})
self.assertEqual(
json.loads(response.data),
{
'title': 'Title B',
'description': 'Description B',
'location': 'Location B',
'speaker': 'Speaker B',
"edited_at": '2012-01-14 12:00:01.000000',
"editor": "http://localhost:5000/people/1",
"editor_id": 1,
"hidden": False,
"canceled": False,
"event_id": 1,
"revision": 2,
'start': '2014-10-10 10:10:10.000000',
'end': '2014-10-10 11:10:10.000000',
})
response = client.get(
'http://localhost:5000/events/1')
self.assertEqual(
json.loads(response.data),
{
'page': 1,
'pagesize': 10,
'nextpage': None,
'events': [
{
'title': 'Title A',
'description': 'Description A',
'location': 'Location A',
'speaker': 'Speaker A',
"edited_at": '2012-01-14 12:00:01.000000',
"editor": "http://localhost:5000/people/1",
"editor_id": 1,
"hidden": False,
"canceled": False,
"event_id": 1,
"revision": 1,
'start': '2014-10-10 10:10:10.000000',
'end': '2014-10-10 11:10:10.000000',
},{
'title': 'Title B',
'description': 'Description B',
'location': 'Location B',
'speaker': 'Speaker B',
"edited_at": '2012-01-14 12:00:01.000000',
"editor": "http://localhost:5000/people/1",
"editor_id": 1,
"hidden": False,
"canceled": False,
"event_id": 1,
"revision": 2,
'start': '2014-10-10 10:10:10.000000',
'end': '2014-10-10 11:10:10.000000',
}
]
})
@freeze_time("2012-01-14 12:00:01")
def test_multiple_events_with_multiple_revisions(self):
with self.app.test_client() as client:
response = client.post(
'http://localhost:5000/people/',
headers = HEADERS,
data = {
'username': 'bob',
'name': 'Bob Billy',
'email': 'bbob@example.com',
'website': 'http://bbob.example.com',
'password': 'password1234',
})
response = client.post(
'http://localhost:5000/events/',
headers = HEADERS,
data = {
'title': 'Title A',
'description': 'Description A',
'location': 'Location A',
'speaker': 'Speaker A',
'start': '2014-10-10 10:10:10.000000',
'end': '2014-10-10 11:10:10.000000',
})
response = client.put(
'http://localhost:5000/events/1',
headers = HEADERS,
data = {
'title': 'Title B',
'description': 'Description B',
'location': 'Location B',
'speaker': 'Speaker B',
})
response = client.post(
'http://localhost:5000/events/',
headers = HEADERS,
data = {
'title': 'Title C',
'description': 'Description C',
'location': 'Location C',
'speaker': 'Speaker C',
'start': '2014-10-10 10:10:10.000000',
'end': '2014-10-10 11:10:10.000000',
})
response = client.put(
'http://localhost:5000/events/2',
headers = HEADERS,
data = {
'title': 'Title D',
'description': 'Description D',
'location': 'Location D',
'speaker': 'Speaker D',
})
response = client.get(
'http://localhost:5000/events/1')
self.assertEqual(
json.loads(response.data),
{
'page': 1,
'pagesize': 10,
'nextpage': None,
'events': [
{
'title': 'Title A',
'description': 'Description A',
'location': 'Location A',
'speaker': 'Speaker A',
"edited_at": '2012-01-14 12:00:01.000000',
"editor": "http://localhost:5000/people/1",
"editor_id": 1,
"hidden": False,
"canceled": False,
"event_id": 1,
"revision": 1,
'start': '2014-10-10 10:10:10.000000',
'end': '2014-10-10 11:10:10.000000',
},{
'title': 'Title B',
'description': 'Description B',
'location': 'Location B',
'speaker': 'Speaker B',
"edited_at": '2012-01-14 12:00:01.000000',
"editor": "http://localhost:5000/people/1",
"editor_id": 1,
"hidden": False,
"canceled": False,
"event_id": 1,
"revision": 2,
'start': '2014-10-10 10:10:10.000000',
'end': '2014-10-10 11:10:10.000000',
}
]
})
response = client.get(
'http://localhost:5000/events/2')
self.assertEqual(
json.loads(response.data),
{
'page': 1,
'pagesize': 10,
'nextpage': None,
'events': [
{
'title': 'Title C',
'description': 'Description C',
'location': 'Location C',
'speaker': 'Speaker C',
"edited_at": '2012-01-14 12:00:01.000000',
"editor": "http://localhost:5000/people/1",
"editor_id": 1,
"hidden": False,
"canceled": False,
"event_id": 2,
"revision": 1,
'start': '2014-10-10 10:10:10.000000',
'end': '2014-10-10 11:10:10.000000',
},{
'title': 'Title D',
'description': 'Description D',
'location': 'Location D',
'speaker': 'Speaker D',
"edited_at": '2012-01-14 12:00:01.000000',
"editor": "http://localhost:5000/people/1",
"editor_id": 1,
"hidden": False,
"canceled": False,
"event_id": 2,
"revision": 2,
'start': '2014-10-10 10:10:10.000000',
'end': '2014-10-10 11:10:10.000000',
}
]
})
response = client.get(
'http://localhost:5000/events/')
self.assertEqual(
json.loads(response.data),
{
'page': 1,
'pagesize': 10,
'nextpage': None,
'events': [
{
'title': 'Title B',
'description': 'Description B',
'location': 'Location B',
'speaker': 'Speaker B',
"edited_at": '2012-01-14 12:00:01.000000',
"editor": "http://localhost:5000/people/1",
"editor_id": 1,
"hidden": False,
"canceled": False,
"event_id": 1,
"revision": 2,
'start': '2014-10-10 10:10:10.000000',
'end': '2014-10-10 11:10:10.000000',
},{
'title': 'Title D',
'description': 'Description D',
'location': 'Location D',
'speaker': 'Speaker D',
"edited_at": '2012-01-14 12:00:01.000000',
"editor": "http://localhost:5000/people/1",
"editor_id": 1,
"hidden": False,
"canceled": False,
"event_id": 2,
"revision": 2,
'start': '2014-10-10 10:10:10.000000',
'end': '2014-10-10 11:10:10.000000',
}
]
})
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''
Adapted from https://github.com/tornadomeet/ResNet/blob/master/symbol_resnet.py
(Original author Wei Wu) by Antti-Pekka Hynninen
"Flexible Layout" (fl) version created by Dick Carter.
Implementing the original resnet ILSVRC 2015 winning network from:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun. "Deep Residual Learning for Image Recognition"
'''
import mxnet as mx
import numpy as np
import random
# Transform a symbol from one layout to another, or do nothing if they have the same layout
def transform_layout(data, from_layout, to_layout):
supported_layouts = ['NCHW', 'NHWC']
if from_layout not in supported_layouts:
raise ValueError('Not prepared to handle layout: {}'.format(from_layout))
if to_layout not in supported_layouts:
raise ValueError('Not prepared to handle layout: {}'.format(to_layout))
# Insert transpose if from_layout and to_layout don't match
if from_layout == 'NCHW' and to_layout == 'NHWC':
return mx.sym.transpose(data, axes=(0, 2, 3, 1))
elif from_layout == 'NHWC' and to_layout == 'NCHW':
return mx.sym.transpose(data, axes=(0, 3, 1, 2))
else:
return data
# A BatchNorm wrapper that responds to the input layout
def batchnorm(data, io_layout, batchnorm_layout, **kwargs):
# Transpose as needed to batchnorm_layout
transposed_as_needed = transform_layout(data, io_layout, batchnorm_layout)
bn_axis = 3 if batchnorm_layout == 'NHWC' else 1
batchnormed = mx.sym.BatchNorm(data=transposed_as_needed, axis=bn_axis, **kwargs)
# Transpose back to i/o layout as needed
return transform_layout(batchnormed, batchnorm_layout, io_layout)
# A BatchNormAddRelu wrapper that responds to the input layout
def batchnorm_add_relu(data, addend, io_layout, batchnorm_layout, **kwargs):
# Transpose as needed to batchnorm_layout
transposed_data_as_needed = transform_layout(data, io_layout, batchnorm_layout)
transposed_addend_as_needed = transform_layout(addend, io_layout, batchnorm_layout)
bn_axis = 3 if batchnorm_layout == 'NHWC' else 1
batchnormed = mx.sym.BatchNormAddRelu(data=transposed_data_as_needed,
addend=transposed_addend_as_needed,
axis=bn_axis, **kwargs)
# Transpose back to i/o layout as needed
return transform_layout(batchnormed, batchnorm_layout, io_layout)
# A Pooling wrapper that responds to the input layout
def pooling(data, io_layout, pooling_layout, **kwargs):
# Pooling kernel, as specified by pooling_layout, may be in conflict with i/o layout.
transposed_as_needed = transform_layout(data, io_layout, pooling_layout)
pooled = mx.sym.Pooling(data=transposed_as_needed, layout=pooling_layout, **kwargs)
# Transpose back to i/o layout as needed
return transform_layout(pooled, pooling_layout, io_layout)
# Assumption is that data comes in and out in the 'conv_layout' format.
# If this format is different from the 'batchnorm_layout' format, then the batchnorm() routine
# will introduce transposes on both sides of the mx.sym.BatchNorm symbol
def residual_unit(data, num_filter, stride, dim_match, name, bottle_neck=True,
workspace=256, memonger=False, conv_layout='NCHW', batchnorm_layout='NCHW',
verbose=False, cudnn_bn_off=False, bn_eps=2e-5, bn_mom=0.9, conv_algo=-1,
fuse_bn_relu=False, fuse_bn_add_relu=False, cudnn_tensor_core_only=False):
"""Return ResNet Unit symbol for building ResNet
Parameters
----------
data : str
Input data
num_filter : int
Number of output channels
bnf : int
Bottle neck channels factor with regard to num_filter
stride : tuple
Stride used in convolution
dim_match : Boolean
True means channel number between input and output is the same, otherwise means differ
name : str
Base name of the operators
workspace : int
Workspace used in convolution operator
"""
act = 'relu' if fuse_bn_relu else None
if bottle_neck:
conv1 = mx.sym.Convolution(data=data, num_filter=int(num_filter*0.25), kernel=(1,1), stride=(1,1), pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_conv1', layout=conv_layout,
cudnn_algo_verbose=verbose,
cudnn_algo_fwd=conv_algo, cudnn_algo_bwd_data=conv_algo, cudnn_algo_bwd_filter=conv_algo,
cudnn_tensor_core_only=cudnn_tensor_core_only)
bn1 = batchnorm(data=conv1, io_layout=conv_layout, batchnorm_layout=batchnorm_layout,
fix_gamma=False, eps=bn_eps, momentum=bn_mom, name=name + '_bn1', cudnn_off=cudnn_bn_off, act_type=act)
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1') if not fuse_bn_relu else bn1
conv2 = mx.sym.Convolution(data=act1, num_filter=int(num_filter*0.25), kernel=(3,3), stride=stride, pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2', layout=conv_layout,
cudnn_algo_verbose=verbose,
cudnn_algo_fwd=conv_algo, cudnn_algo_bwd_data=conv_algo, cudnn_algo_bwd_filter=conv_algo,
cudnn_tensor_core_only=cudnn_tensor_core_only)
bn2 = batchnorm(data=conv2, io_layout=conv_layout, batchnorm_layout=batchnorm_layout,
fix_gamma=False, eps=bn_eps, momentum=bn_mom, name=name + '_bn2', cudnn_off=cudnn_bn_off, act_type=act)
act2 = mx.sym.Activation(data=bn2, act_type='relu', name=name + '_relu2') if not fuse_bn_relu else bn2
conv3 = mx.sym.Convolution(data=act2, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0), no_bias=True,
workspace=workspace, name=name + '_conv3', layout=conv_layout,
cudnn_algo_verbose=verbose,
cudnn_algo_fwd=conv_algo, cudnn_algo_bwd_data=conv_algo, cudnn_algo_bwd_filter=conv_algo,
cudnn_tensor_core_only=cudnn_tensor_core_only)
if dim_match:
shortcut = data
else:
conv1sc = mx.sym.Convolution(data=data, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
workspace=workspace, name=name+'_conv1sc', layout=conv_layout,
cudnn_algo_verbose=verbose,
cudnn_algo_fwd=conv_algo, cudnn_algo_bwd_data=conv_algo, cudnn_algo_bwd_filter=conv_algo,
cudnn_tensor_core_only=cudnn_tensor_core_only)
shortcut = batchnorm(data=conv1sc, io_layout=conv_layout, batchnorm_layout=batchnorm_layout,
fix_gamma=False, eps=bn_eps, momentum=bn_mom, name=name + '_bn_sc', cudnn_off=cudnn_bn_off)
if memonger:
shortcut._set_attr(mirror_stage='True')
if fuse_bn_add_relu:
return batchnorm_add_relu(data=conv3, addend=shortcut, io_layout=conv_layout, batchnorm_layout=batchnorm_layout,
fix_gamma=False, eps=bn_eps, momentum=bn_mom, name=name + '_bn3', cudnn_off=cudnn_bn_off)
else:
bn3 = batchnorm(data=conv3, io_layout=conv_layout, batchnorm_layout=batchnorm_layout,
fix_gamma=False, eps=bn_eps, momentum=bn_mom, name=name + '_bn3', cudnn_off=cudnn_bn_off)
return mx.sym.Activation(data=bn3 + shortcut, act_type='relu', name=name + '_relu3')
else:
raise NotImplementedError
def resnet(units, num_stages, filter_list, num_classes, image_shape, bottle_neck=True, workspace=256, dtype='float32', memonger=False,
input_layout='NCHW', conv_layout='NCHW', batchnorm_layout='NCHW', pooling_layout='NCHW', verbose=False,
cudnn_bn_off=False, bn_eps=2e-5, bn_mom=0.9, conv_algo=-1,
fuse_bn_relu=False, fuse_bn_add_relu=False, force_tensor_core=False, use_dali=True, label_smoothing = 0.0):
"""Return ResNet symbol of
Parameters
----------
units : list
Number of units in each stage
num_stages : int
Number of stage
filter_list : list
Channel size of each stage
num_classes : int
Ouput size of symbol
dataset : str
Dataset type, only cifar10 and imagenet supports
workspace : int
Workspace used in convolution operator
dtype : str
Precision (float32 or float16)
memonger : boolean
Activates "memory monger" to reduce the model's memory footprint
input_layout : str
interpretation (e.g. NCHW vs NHWC) of data provided by the i/o pipeline (may introduce transposes
if in conflict with 'layout' above)
conv_layout : str
interpretation (e.g. NCHW vs NHWC) of data for convolution operation.
batchnorm_layout : str
directs which kernel performs the batchnorm (may introduce transposes if in conflict with 'conv_layout' above)
pooling_layout : str
directs which kernel performs the pooling (may introduce transposes if in conflict with 'conv_layout' above)
"""
act = 'relu' if fuse_bn_relu else None
num_unit = len(units)
assert(num_unit == num_stages)
data = mx.sym.Variable(name='data')
if not use_dali:
# double buffering of data
if dtype == 'float32':
data = mx.sym.identity(data=data, name='id')
else:
if dtype == 'float16':
data = mx.sym.Cast(data=data, dtype=np.float16)
(nchannel, height, width) = image_shape
# Insert transpose as needed to get the input layout to match the desired processing layout
data = transform_layout(data, input_layout, conv_layout)
if height <= 32: # such as cifar10
body = mx.sym.Convolution(data=data, num_filter=filter_list[0], kernel=(3, 3), stride=(1,1), pad=(1, 1),
no_bias=True, name="conv0", workspace=workspace, layout=conv_layout,
cudnn_algo_verbose=verbose,
cudnn_algo_fwd=conv_algo, cudnn_algo_bwd_data=conv_algo, cudnn_algo_bwd_filter=conv_algo,
cudnn_tensor_core_only=force_tensor_core)
# Is this BatchNorm supposed to be here?
body = batchnorm(data=body, io_layout=conv_layout, batchnorm_layout=batchnorm_layout,
fix_gamma=False, eps=bn_eps, momentum=bn_mom, name='bn0', cudnn_off=cudnn_bn_off)
else: # often expected to be 224 such as imagenet
body = mx.sym.Convolution(data=data, num_filter=filter_list[0], kernel=(7, 7), stride=(2,2), pad=(3, 3),
no_bias=True, name="conv0", workspace=workspace, layout=conv_layout,
cudnn_algo_verbose=verbose,
cudnn_algo_fwd=conv_algo, cudnn_algo_bwd_data=conv_algo, cudnn_algo_bwd_filter=conv_algo,
cudnn_tensor_core_only=force_tensor_core)
body = batchnorm(data=body, io_layout=conv_layout, batchnorm_layout=batchnorm_layout,
fix_gamma=False, eps=bn_eps, momentum=bn_mom, name='bn0', cudnn_off=cudnn_bn_off, act_type=act)
if not fuse_bn_relu:
body = mx.sym.Activation(data=body, act_type='relu', name='relu0')
body = pooling(data=body, io_layout=conv_layout, pooling_layout=pooling_layout,
kernel=(3, 3), stride=(2, 2), pad=(1, 1), pool_type='max')
for i in range(num_stages):
body = residual_unit(body, filter_list[i+1], (1 if i==0 else 2, 1 if i==0 else 2), False,
name='stage%d_unit%d' % (i + 1, 1),
bottle_neck=bottle_neck, workspace=workspace,
memonger=memonger, conv_layout=conv_layout, batchnorm_layout=batchnorm_layout,
verbose=verbose, cudnn_bn_off=cudnn_bn_off, bn_eps=bn_eps, bn_mom=bn_mom,
conv_algo=conv_algo, fuse_bn_relu=fuse_bn_relu, fuse_bn_add_relu=fuse_bn_add_relu,
cudnn_tensor_core_only=force_tensor_core)
for j in range(units[i]-1):
body = residual_unit(body, filter_list[i+1], (1,1), True, name='stage%d_unit%d' % (i + 1, j + 2),
bottle_neck=bottle_neck, workspace=workspace,
memonger=memonger, conv_layout=conv_layout, batchnorm_layout=batchnorm_layout,
verbose=verbose, cudnn_bn_off=cudnn_bn_off, bn_eps = bn_eps, bn_mom=bn_mom,
conv_algo=conv_algo, fuse_bn_relu=fuse_bn_relu, fuse_bn_add_relu=fuse_bn_add_relu,
cudnn_tensor_core_only=force_tensor_core)
# Although kernel is not used here when global_pool=True, we should put one
pool1 = pooling(data=body, io_layout=conv_layout, pooling_layout=pooling_layout,
global_pool=True, kernel=(7, 7), pool_type='avg', name='pool1')
flat = mx.sym.Flatten(data=pool1)
fc1 = mx.sym.FullyConnected(data=flat, num_hidden=num_classes, name='fc1', cublas_algo_verbose=verbose)
if dtype == 'float16':
fc1 = mx.sym.Cast(data=fc1, dtype=np.float32)
##########################################################################
# MXNet computes Cross Entropy loss gradients without explicitly computing
# the value of loss function.
# Take a look here:
# https://mxnet.incubator.apache.org/api/python/symbol/symbol.html#mxnet.symbol.SoftmaxOutput
# for further details
##########################################################################
return mx.sym.SoftmaxOutput(data=fc1, name='softmax', smooth_alpha=label_smoothing)
def get_symbol(num_classes, num_layers, image_shape, conv_workspace=512, dtype='float32',
input_layout='NCHW', conv_layout='NCHW', batchnorm_layout='NCHW', pooling_layout='NCHW',
verbose=False, seed=None, cudnn_bn_off=False, batchnorm_eps=2e-5, batchnorm_mom=0.9,
conv_algo=-1, fuse_bn_relu=False, fuse_bn_add_relu=False, force_tensor_core=False, use_dali=True, label_smoothing = 0.0, **kwargs):
"""
Adapted from https://github.com/tornadomeet/ResNet/blob/master/symbol_resnet.py
(Original author Wei Wu) by Antti-Pekka Hynninen
Implementing the original resnet ILSVRC 2015 winning network from:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun. "Deep Residual Learning for Image Recognition"
"""
image_shape = [int(l) for l in image_shape.split(',')]
(nchannel, height, width) = image_shape
if height <= 28:
num_stages = 3
if (num_layers-2) % 9 == 0 and num_layers >= 164:
per_unit = [(num_layers-2)//9]
filter_list = [16, 64, 128, 256]
bottle_neck = True
elif (num_layers-2) % 6 == 0 and num_layers < 164:
per_unit = [(num_layers-2)//6]
filter_list = [16, 16, 32, 64]
bottle_neck = False
else:
raise ValueError("no experiments done on num_layers {}, you can do it yourself".format(num_layers))
units = per_unit * num_stages
else:
if num_layers >= 50:
filter_list = [64, 256, 512, 1024, 2048]
bottle_neck = True
else:
filter_list = [64, 64, 128, 256, 512]
bottle_neck = False
num_stages = 4
if num_layers == 18:
units = [2, 2, 2, 2]
elif num_layers == 34:
units = [3, 4, 6, 3]
elif num_layers == 50:
units = [3, 4, 6, 3]
elif num_layers == 101:
units = [3, 4, 23, 3]
elif num_layers == 152:
units = [3, 8, 36, 3]
elif num_layers == 200:
units = [3, 24, 36, 3]
elif num_layers == 269:
units = [3, 30, 48, 8]
else:
raise ValueError("no experiments done on num_layers {}, you can do it yourself".format(num_layers))
return resnet(units = units,
num_stages = num_stages,
filter_list = filter_list,
num_classes = num_classes,
image_shape = image_shape,
bottle_neck = bottle_neck,
workspace = conv_workspace,
dtype = dtype,
input_layout = input_layout,
conv_layout = conv_layout,
batchnorm_layout = batchnorm_layout,
pooling_layout = pooling_layout,
verbose = verbose,
cudnn_bn_off = cudnn_bn_off,
bn_eps = batchnorm_eps,
bn_mom = batchnorm_mom,
conv_algo = conv_algo,
fuse_bn_relu = fuse_bn_relu,
fuse_bn_add_relu = fuse_bn_add_relu,
force_tensor_core = force_tensor_core,
use_dali = use_dali,
label_smoothing = label_smoothing)
|
|
#!/usr/bin/python
import os, sys, inspect, subprocess, tempfile
# Include the parent directory in the search path.
cmd_folder = os.path.abspath(os.path.split(inspect.getfile( inspect.currentframe() ))[0])
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
from common import dbConn, DEFAULT_TAG, BACKUP_TOP, config, problems
from datetime import datetime
import itertools
import time
import calendar
import glob
# Edit distance, for approximate problem name matching.
def editDist( a, b ):
dst = [ [ 0 for x in range( 0, len( b ) + 1 ) ] for y in range( 0, len( a ) + 1 ) ]
for i in range( 0, len( a ) + 1 ):
dst[ i ][ 0 ] = i
for j in range( 0, len( b ) + 1 ):
dst[ 0 ][ j ] = j
for i in range( 1, len( a ) + 1 ):
for j in range( 1, len( b ) + 1 ):
if ( a[ i - 1 ] == b[ j - 1 ] ):
dst[ i ][ j ] = dst[ i - 1 ][ j - 1 ]
else:
dst[ i ][ j ] = dst[ i - 1 ][ j - 1 ] + 1
if dst[ i - 1 ][ j ] + 1 < dst[ i ][ j ]:
dst[ i ][ j ] = dst[ i - 1 ][ j ] + 1
if dst[ i ][ j - 1 ] + 1 < dst[ i ][ j ]:
dst[ i ][ j ] = dst[ i ][ j - 1 ] + 1
return dst[ len( a ) ][ len( b ) ]
def repeatedString( str, pat ):
"""Return true if str is one or more repeated copies of pat."""
n = len( pat );
if len( str ) < n:
return 0
for j in range( 0, len( str ) ):
if str[ j ] != pat[ j % n ]:
return 0;
return 1;
class File:
def __init__( self, path, time ):
# Set team directory, path under that and modification time.
self.path = path
self.time = time
# File size in bytes
self.size = 0
# Number of lines in the file (once we compute it)
self.lineCount = 0
# Report of lines added and removed (from git)
self.linesChanged = 0
def __repr__(self):
return '%s' % self.path
# Representation for the value part of the file_to_problem mapping,
# i.e., one line from the file_to_problem table. This used to be a
# 4-tuple. When the lang_id was added, this struct was created to
# help with the transition to 5 fields and to make the structure
# easier to interpret.
class MappingRec:
def __init__( self, db_id, problem_id, lang_id, override, new_problem_id ):
# Remember the five database fields.
self.db_id = db_id
self.problem_id = problem_id
self.lang_id = lang_id
self.override = override
self.new_problem_id = new_problem_id
# This functionality needs to be split into an executable analyzer and
# a reusable classifier. That will help with testing.
class Analyzer:
def __init__( self, basePath ):
# path to the top of the backup directory.
self.basePath = basePath
# index of the last team in the competition.
self.lastTeam = config[ "teambackup" ][ "lastTeam" ]
# interval for updating team backups, used in this script to freshen stale modification
# times, if it looks like there's a reason.
self.backupInterval = config[ "teambackup" ][ "interval" ]
# Strings to strip from the filename before we try to guess
self.commonStrips = [ 'problem', 'prob', '_', '-' ]
# Valid source file extensions, and what each one says
# about the source language.
self.extensionMap = {
'cc': "C++",
'cpp': "C++",
'c': "C",
'java': "Java",
'py': "Python" # FIXME: do we want to discern between Python 2/3?
}
# map from problem ID to a list of keywords to look for.
self.probKeywords = {}
# List of problems. We use this mostly as the offficial
# list of problem letters, from the configuration.
self.problemList = problems
# Contest start time in Unix seconds from the database.
cursor = dbConn.cursor()
cursor.execute( "SELECT start_time FROM contests ORDER BY start_time DESC LIMIT 1" )
row = cursor.fetchone()
if ( row == None ):
print("Error: no contest found in the database.")
exit(1)
self.contestStart = row[0]
# For each team, a list of team-specific strips from filenames.
self.teamStrips = {}
# we use the next two fields to hold copies of database
# information (the file_to_problem and file_modtime tables)
# while the script is running, and to update the tables once
# the script has run.
# For every team and path, this is a triple, datatabase_id,
# latest modification time and File object (if the file has
# has changed). We only add a new entry to edit_activity if
# it's sufficiently newer than what we have there or if we just
# committed and git reports that a file has changed.
self.lastEditTimes = {}
# map from team_id and path to a MappingRec instance
# containing db_id, problem_id, lang_id, override flag and new
# problem ID (if we just generated a new mapping). This lets
# us know what to ignore in the mapping and what to update
# when we re-write the database. Multiple files may map to
# the same problem, if the team is working on multiple
# versions or has some supporting files.
self.fileMappings = {}
def loadConfiguration( self ):
# Read the list of problem names
self.probKeywords = {}
cursor = dbConn.cursor()
cursor.execute( "SELECT problem_id, keyword FROM problem_keywords" )
row = cursor.fetchone()
while ( row != None ):
if ( row[ 0 ] in self.probKeywords ):
self.probKeywords[ row[ 0 ] ].append( row[ 1 ].lower() )
else:
self.probKeywords[ row[ 0 ] ] = [ row[ 1 ].lower() ]
row = cursor.fetchone()
# get latest known edit times for every team/path
cursor.execute( "SELECT id, team_id, path, modify_timestamp FROM file_modtime" )
row = cursor.fetchone()
while ( row != None ):
self.lastEditTimes[ ( row[ 1 ], row[ 2 ] ) ] = [ row[ 0 ], row[ 3 ], None ]
row = cursor.fetchone()
# get existing mapping records for all mapped files.
cursor.execute( "SELECT id, team_id, path, problem_id, lang_id, override FROM file_to_problem" )
row = cursor.fetchone()
while ( row != None ):
self.fileMappings[ ( int( row[ 1 ] ), row[ 2 ] ) ] = MappingRec( row[ 0 ], row[ 3 ], row[ 4 ], row[ 5 ], None )
# Old mapping
# 0 -> id, 1 -> problem_id, 2 -> override, 3 -> new_problem_id
row = cursor.fetchone()
# load any team-specific strips.
cursor.execute( "SELECT team_id, str FROM team_strips" )
row = cursor.fetchone()
while ( row != None ):
if ( row[ 0 ] in self.teamStrips ):
self.teamStrips[ row[ 0 ] ].append( row[ 1 ].lower() )
else:
self.teamStrips[ row[ 0 ] ] = [ row[ 1 ].lower() ]
row = cursor.fetchone()
cursor.close()
def parseGitDiffs( self, bdir ):
"""Get a full report of differences between the current
revision and the previous one. Return as a map from path name
(teamxxx/path/to/file) to a tuple giving lines removed and
lines added."""
origin = os.getcwd()
os.chdir( bdir )
statFile = tempfile.TemporaryFile()
# Get the report.
subprocess.call( [ "git", "diff", "--numstat", "HEAD^", "HEAD" ], stdout=statFile )
# Each line is lines added, lines removed, path
result = {}
statFile.seek( 0 )
for line in statFile:
fields = line.rstrip().split( "\t" )
# Git still tracks binary files, but it doesn't report lines
# changed. Looks like it just reports a dash instead, but
# we ignore anything that's not an int.
try:
result[ fields[ 2 ] ] = ( int( fields[ 0 ] ), int( fields[ 1 ] ) )
except ValueError:
pass
os.chdir( origin )
return result;
def countLines(self, p):
"""Given path p, count the number of lines in the file it points to."""
f = open( p )
lineCount = sum( 1 for line in f)
f.close()
return lineCount
def checkAutosaves( self, f ):
# Split into directory and file.
( dirName, fileName ) = os.path.split( f )
autoTime = None
# is it an emacs autosave file
autoFile = "%s/#%s#" % ( dirName, fileName )
if os.path.exists( autoFile ):
autoTime = os.path.getmtime( autoFile )
# is it a vim autosave file
autoFile = "%s/.%s.swp" % ( dirName, fileName )
if os.path.exists( autoFile ):
newTime = os.path.getmtime( autoFile )
if ( autoTime == None or newTime > autoTime ):
autoTime = newTime
return autoTime
def stripDecoration( self, strips, str ):
last = None
while str != last:
last = str
for s in strips:
idx = str.find( s )
while idx != -1:
str = str[:idx] + str[(idx + len( s )):]
idx = str.find( s )
return str
def guessProblem( self, team, path ):
"""Should return the most likely problem id for this file,
or None.
"""
# Split into directory and file.
( dirName, fileName ) = os.path.split( path )
dirName = dirName.lower()
fileName = fileName.lower()
# Build general and team-specific strips. I'm sure
# there's a better way to do this.
strips = []
for x in self.commonStrips:
strips.append( x )
if ( team in self.teamStrips ):
for x in self.teamStrips[ team ]:
strips.append( x )
baseName, extension = os.path.splitext( fileName )
extension = extension.lstrip( '.' )
if extension not in self.extensionMap:
return None
# Strip off extra words
shortName = self.stripDecoration( strips, baseName )
# Ordering here is a little bit important. We look first
# at the matches that are more confident. Then, we look
# at the ones that are less likely
# First, consider just the filename against all problem keywords.
for problem_id, keywords in self.probKeywords.iteritems():
for keyword in keywords:
# tsp.cpp -> a
if shortName == keyword:
return problem_id
# Here, we try to match against arbitrarily many occurrences of the problem
# letter. Some teams are using names like aaa.c for their third attempt.
for problem_id in self.problemList:
# a.cpp -> a or aaa.cpp -> a
if repeatedString( shortName, problem_id.lower() ):
return problem_id
# Then, start looking at the path.
if len( dirName ) > 0:
dirList = dirName.split( '/' )
for dir in dirList:
shortDirName = self.stripDecoration( strips, dir )
for problem_id, keywords in self.probKeywords.iteritems():
for keyword in keywords:
# tsp/sol.java -> a
if shortDirName == keyword:
return problem_id
for problem_id in self.problemList:
# a/code.cpp -> a or aaa/code.cpp -> a
if repeatedString( shortDirName, problem_id.lower() ):
return problem_id
# Then, take matches that occur anywhere in the problem name
for problem_id, keywords in self.probKeywords.iteritems():
for keyword in keywords:
if ( keyword in baseName ):
return problem_id
# Then, the problem letter attached to some other word with a
# non-alpha character.
for problem_id in self.problemList:
letter = problem_id.lower()
# b_2.c -> b
if ( len( baseName ) > len( letter ) and
baseName.startswith( letter ) and
not baseName[ len( letter ) ].isalpha() ):
return problem_id
# losning_b.c -> b
if ( len( baseName ) > len( letter ) and
baseName.endswith( letter ) and
not baseName[ -( len( letter ) + 1 )].isalpha() ):
return problem_id
# Then, look for path elements containing the name.
if len( dirName ) > 0:
dirList = dirName.split( '/' )
for dir in dirList:
shortDirName = self.stripDecoration( strips, dir )
for problem_id, keywords in self.probKeywords.iteritems():
for keyword in keywords:
# retry_b/sol.java -> b
if keyword in shortDirName:
return problem_id
# Then, try an approximate match against a keyword, willing to miss
# a fraction of the total characters.
for problem_id, keywords in self.probKeywords.iteritems():
for keyword in keywords:
if ( len( keyword ) > 3 and
editDist( keyword, shortName ) <= len( keyword ) * 0.25 ):
return problem_id
# Then, look for approximate matches in any directory element.
if len( dirName ) > 0:
dirList = dirName.split( '/' )
for dir in dirList:
shortDirName = self.stripDecoration( strips, dir )
for problem_id, keywords in self.probKeywords.iteritems():
for keyword in keywords:
if ( len( keyword ) > 3 and
editDist( keyword, shortDirName ) <= len( keyword ) * 0.25 ):
return problem_id
return None
def guessPath( self, team, path ):
"""Testing interface for problem guessing"""
self.loadConfiguration()
return self.guessProblem( team, path )
def checkActivity( self, bdir, tag ):
"""Scan the given backup dir and generate reports of the state of
files believed to correspond to various problems in the problem set."""
# Time when this script started running.
scriptStartTime = int( time.time() )
self.loadConfiguration()
# Get diff reports from git.
gitDiffs = self.parseGitDiffs( bdir )
# We should rethink some of the following loop. Right now, it
# tries to find file changes, including changes to editor auto-save
# files. But, to report changed lines in the file, we depend on git
# (which probably isn't tracking these auto-save files, so we'd have
# nothing to report)
# Visit home directory for each team.
tlist = sorted( glob.glob( bdir + '/team*' ) )
for tdir in tlist:
( dirname, tname ) = os.path.split( tdir )
team = int( tname.lstrip( 'team' ) )
cmd = "find %s/ -type f" % tdir
for f in os.popen( cmd ).readlines():
f = f.rstrip( '\n' )
fname = f[len(tdir) + 1:]
( dummy, extension ) = os.path.splitext( fname )
extension = extension.lstrip( '.' )
if extension in self.extensionMap:
fobj = File( fname, os.path.getmtime( f ) )
# Get lines changed, etc. We need to consult the git diff output.
# The tag is to make sure we only record lines changed on an analysis
# pass that's paired to a git commit. Independent analysis passes
# don't get this, since that could infate the appearance of how
# much editing is being done.
gitPath = f[len(bdir) + 1:]
if gitPath in gitDiffs and tag != DEFAULT_TAG:
fobj.linesChanged = gitDiffs[ gitPath ][ 0 ] + gitDiffs[ gitPath ][ 1 ];
mappingRec = None
lastEditRec = None
# Files with completely implausible modification times can get ignored.
ignoreEdit = False
# see if there's a mapping for this file.
if ( team, fname ) in self.fileMappings:
mappingRec = self.fileMappings[ ( team, fname ) ]
# If there's no forced mapping for this problem, try to guess one.
if ( mappingRec == None or mappingRec.override == 0 ):
prob = self.guessProblem( team, fobj.path )
if prob != None:
if mappingRec == None:
mappingRec = MappingRec( None, None, self.extensionMap[ extension ],
0, None );
self.fileMappings[ ( team, fname ) ] = mappingRec
if mappingRec.problem_id != prob:
mappingRec.new_problem_id = prob;
# see if there's an edit record for this file.
if ( team, fname ) in self.lastEditTimes:
lastEditRec = self.lastEditTimes[ ( team, fname ) ]
# check common editor auto-saves, to see if there
# is a fresher modification time.
autoTime = self.checkAutosaves( f );
if ( autoTime != None and autoTime > fobj.time ):
fobj.time = autoTime
# Try to guard against anomalous file edit times. These are unlikely to happen,
# but they could look strange in the report or even suppress tracking of files
# if they have a modification time in the future.
# No edits should happen before the start of the contest or after right now.
if fobj.time < self.contestStart:
fobj.time = self.contestStart
# If the file really changes, we definitely want to record it, possibly with
# a sane-ified modification time.
if fobj.time < scriptStartTime - 2 * self.backupInterval:
if fobj.linesChanged > 0:
fobj.time = int( scriptStartTime - self.backupInterval / 2 )
# If the file looks like it changed in the future, ignore it unless git agrees it's changing.
if fobj.time > scriptStartTime + self.backupInterval:
print "Future Modification: ", fobj.path, " changed ", fobj.linesChanged, " lines, ", (fobj.time - scriptStartTime), " seconds in the future"
if fobj.linesChanged > 0:
fobj.time = scriptStartTime
else:
ignoreEdit = True
# Is this newer than our last known edit?
# We don't just depend on git for this, since we can
# also watch auto-saves.
if not ignoreEdit and ( lastEditRec == None or lastEditRec[ 1 ] + 10 < fobj.time ):
if lastEditRec == None:
lastEditRec = [ None, None, None ]
self.lastEditTimes[ ( team, fname ) ] = lastEditRec
# Grab file size and number of lines.
fobj.size = os.path.getsize( f )
fobj.lineCount = self.countLines( f )
lastEditRec[ 2 ] = fobj;
# Write out any new mappings
cursor = dbConn.cursor()
for k, v in self.fileMappings.iteritems():
if v.new_problem_id != None:
if v.db_id == None:
update = "INSERT INTO file_to_problem (team_id, path, problem_id, lang_id, override ) VALUES ( '%s', '%s', '%s', '%s', '0' )" % ( k[ 0 ], dbConn.escape_string( k[ 1 ] ), v.new_problem_id, v.lang_id )
cursor.execute( update )
else:
update = "UPDATE file_to_problem SET problem_id='%s' WHERE id='%d'" % ( v.new_problem_id, v[ 0 ] )
cursor.execute( update )
print "( %s, %s ) -> %s" % ( k[ 0 ], k[ 1 ], v.new_problem_id )
# Write out fresh edit times to file_modtime and new records to edit_activity
cursor = dbConn.cursor()
for k, v in self.lastEditTimes.iteritems():
if v[ 2 ] != None:
t = time.gmtime( v[ 2 ].time )
if v[ 0 ] == None:
update = "INSERT INTO file_modtime (team_id, path, modify_timestamp ) VALUES ( '%s', '%s', '%d' )" % ( k[ 0 ], dbConn.escape_string( k[ 1 ] ), t )
cursor.execute( update )
else:
update = "UPDATE file_modtime SET modify_time='%d' WHERE id='%d'" % ( t, v[ 0 ] )
cursor.execute( update )
# Compute time since start of contest.
cmin = ( v[ 2 ].time - self.contestStart ) / 60
update = "INSERT INTO edit_activity (team_id, path, modify_timestamp, modify_time, file_size_bytes, line_count, lines_changed, git_tag ) VALUES ( '%s', '%s', '%d', '%s', '%d', '%d', '%d', '%s' )" % ( k[ 0 ], dbConn.escape_string( k[ 1 ] ), t, cmin, v[ 2 ].size, v[ 2 ].lineCount, v[ 2 ].linesChanged, tag )
cursor.execute( update )
# Create and write the summary of edit activity by problem, edit_latest
# Map from team and problem_id to a triple, database_id,
# timestamp and valid flag. the valid flag lets us delete
# database rows (say, if a file_to_problem mapping changes). An
# entry is valid as long as there is a file that's mapped to
# the given problem, even if the file no longer exists.
modLatest = {}
# get latest known edit times for every team/problem.
cursor.execute( "SELECT id, team_id, problem_id, modify_timestamp FROM edit_latest" )
row = cursor.fetchone()
while ( row != None ):
modLatest[ ( row[ 1 ], row[ 2 ] ) ] = [ row[ 0 ], row[ 3 ], 0 ]
row = cursor.fetchone()
for k, v in self.fileMappings.iteritems():
prob = v.problem_id
if v.new_problem_id != None:
prob = v.new_problem_id
if prob != None and prob != 'none':
if k in self.lastEditTimes:
lastEditRec = self.lastEditTimes[ k ]
t = lastEditRec[ 1 ]
if lastEditRec[ 2 ] != None:
t = lastEditRec[ 2 ].time;
if ( k[ 0 ], prob ) in modLatest:
rec = modLatest[ ( k[ 0 ], prob ) ]
if t > rec[ 1 ]:
rec[ 1 ] = t
rec[ 2 ] = 1;
else:
modLatest[ ( k[ 0 ], prob ) ] = [ None, t, 1 ]
for k, v in modLatest.iteritems():
t = time.gmtime( v[ 1 ] )
if v[ 0 ] == None:
update = "INSERT INTO edit_latest (team_id, problem_id, modify_timestamp ) VALUES ( '%s', '%s', '%d' )" % ( k[ 0 ], k[ 1 ], t )
cursor.execute( update )
elif v[ 2 ]:
update = "UPDATE edit_latest SET modify_timestamp='%d' WHERE id='%d'" % ( t, v[ 0 ] )
cursor.execute( update )
else:
update = "DELETE FROM edit_latest WHERE id='%d'" % ( v[ 0 ] )
cursor.execute( update )
def reportUnclassified( self, bdir ):
"""Report all the source files that are not mapped to any problem
yet."""
self.loadConfiguration()
# Visit home directory for each team.
tlist = sorted( glob.glob( bdir + '/team*' ) )
for tdir in tlist:
( dirname, tname ) = os.path.split( tdir )
team = int( tname.lstrip( 'team' ) )
cmd = "find %s/ -type f" % tdir
for f in os.popen( cmd ).readlines():
f = f.rstrip( '\n' )
fname = f[len(tdir) + 1:]
( dummy, extension ) = os.path.splitext( fname )
extension = extension.lstrip( '.' )
if extension in self.extensionMap:
fobj = File( fname, os.path.getmtime( f ) )
prob = None;
# see if there's an override for this file.
if ( team, fname ) in self.fileMappings:
mappingRec = self.fileMappings[ ( team, fname ) ]
if mappingRec.override:
prob = mappingRec.problem_id
print "%s <= %s" % ( prob, f )
# if it's not a forced mapping, try to guess and report that.
if prob == None:
# No forced problem, try to guess.
prob = self.guessProblem( team, fobj.path )
# report the file and the problem its assigned to.
if prob == None:
print "unknown <- %s" % ( f )
else:
print "%s <- %s" % ( prob, f )
if __name__ == '__main__':
analyzer = Analyzer( BACKUP_TOP )
tag = DEFAULT_TAG;
if len( sys.argv ) > 1:
tag = sys.argv[ 1 ]
analyzer.checkActivity( BACKUP_TOP, tag )
|
|
from itertools import chain
from ModestMaps.Core import Coordinate
import math
import pyproj
merc_proj = pyproj.Proj(init='epsg:3857')
latlng_proj = pyproj.Proj(proj='latlong')
def serialize_coord(coord):
return '%d/%d/%d' % (coord.zoom, coord.column, coord.row)
def deserialize_coord(coord_string):
fields = coord_string.split('/')
if len(fields) != 3:
return None
# z/x/y -> /zoom/col/row
try:
zoom, col, row = map(int, fields)
except ValueError:
return None
coord = Coordinate(row=row, column=col, zoom=zoom)
return coord
def create_coord(x, y, z):
return Coordinate(row=y, column=x, zoom=z)
def parse_expired_coord_string(coord_string):
# we use the same format in the queue as the expired tile list from
# osm2pgsql
return deserialize_coord(coord_string)
def n_tiles_in_zoom(zoom):
assert zoom >= 0
n = 0
for i in xrange(zoom + 1):
n += math.pow(4, i)
return int(n)
def seed_tiles(zoom_start=0, zoom_until=10):
for zoom in xrange(zoom_start, zoom_until + 1):
limit = int(math.pow(2, zoom))
for col in xrange(limit):
for row in xrange(limit):
yield Coordinate(zoom=zoom, column=col, row=row)
# http://wiki.openstreetmap.org/wiki/Slippy_map_tilenames
def num2deg(xtile, ytile, zoom):
n = 2.0 ** zoom
lon_deg = xtile / n * 360.0 - 180.0
lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * ytile / n)))
lat_deg = math.degrees(lat_rad)
return (lat_deg, lon_deg)
# http://wiki.openstreetmap.org/wiki/Slippy_map_tilenames
def deg2num(lat_deg, lon_deg, zoom):
lat_rad = math.radians(lat_deg)
n = 2.0 ** zoom
xtile = int((lon_deg + 180.0) / 360.0 * n)
ytile = int(
(1.0 - math.log(math.tan(lat_rad) + (1 / math.cos(lat_rad))) /
math.pi) / 2.0 * n)
return (xtile, ytile)
def coord_to_bounds(coord):
topleft_lat, topleft_lng = num2deg(coord.column, coord.row, coord.zoom)
bottomright_lat, bottomright_lng = num2deg(
coord.column + 1, coord.row + 1, coord.zoom)
minx = topleft_lng
miny = bottomright_lat
maxx = bottomright_lng
maxy = topleft_lat
# coord_to_bounds is used to calculate boxes that could be off the grid
# clamp the max values in that scenario
maxx = min(180, maxx)
maxy = min(90, maxy)
bounds = (minx, miny, maxx, maxy)
return bounds
def reproject_lnglat_to_mercator(x, y, *unused_coords):
return pyproj.transform(latlng_proj, merc_proj, x, y)
def reproject_mercator_to_lnglat(x, y, *unused_coords):
return pyproj.transform(merc_proj, latlng_proj, x, y)
# mercator <-> point conversions ported from tilestache
earth_radius = 6378137
earth_circum = 2 * math.pi * earth_radius
coord_mercator_point_zoom = math.log(earth_circum) / math.log(2)
half_earth_circum = earth_circum / 2
def mercator_point_to_coord_fractional(z, x, y):
coord = Coordinate(
column=x + half_earth_circum,
row=half_earth_circum - y,
zoom=coord_mercator_point_zoom,
)
coord = coord.zoomTo(z)
return coord
def mercator_point_to_coord(z, x, y):
coord = mercator_point_to_coord_fractional(z, x, y).container()
return coord
def coord_to_mercator_point(coord):
coord = coord.zoomTo(coord_mercator_point_zoom)
x = coord.column - half_earth_circum
y = half_earth_circum - coord.row
return x, y
def coord_to_mercator_bounds(coord):
ul_x, ul_y = coord_to_mercator_point(coord)
lr_x, lr_y = coord_to_mercator_point(coord.down().right())
minx = min(ul_x, lr_x)
miny = min(ul_y, lr_y)
maxx = max(ul_x, lr_x)
maxy = max(ul_y, lr_y)
return minx, miny, maxx, maxy
def bounds_to_coords(bounds, zoom):
minx, miny, maxx, maxy = bounds
topleft_lng = minx
topleft_lat = maxy
bottomright_lat = miny
bottomright_lng = maxx
topleftx, toplefty = deg2num(topleft_lat, topleft_lng, zoom)
bottomrightx, bottomrighty = deg2num(
bottomright_lat, bottomright_lng, zoom)
# clamp max values
maxval = int(math.pow(2, zoom) - 1)
bottomrightx = min(maxval, bottomrightx)
bottomrighty = min(maxval, bottomrighty)
topleftcoord = Coordinate(row=toplefty, column=topleftx, zoom=zoom)
# check if one coordinate subsumes the whole bounds at this zoom
if topleftx == bottomrightx and toplefty == bottomrighty:
return [topleftcoord]
# we have two inclusive coordinates representing the range
bottomrightcoord = Coordinate(
row=bottomrighty, column=bottomrightx, zoom=zoom)
return topleftcoord, bottomrightcoord
def tile_generator_for_single_bounds(bounds, zoom_start, zoom_until):
for zoom in xrange(zoom_start, zoom_until + 1):
coords = bounds_to_coords(bounds, zoom)
assert len(coords) in (1, 2)
if len(coords) == 1:
coord = coords[0]
start_col = coord.column
start_row = coord.row
end_col = start_col
end_row = start_row
else:
topleftcoord, bottomrightcoord = coords
start_col = topleftcoord.column
start_row = topleftcoord.row
end_col = bottomrightcoord.column
end_row = bottomrightcoord.row
for tile in tile_generator_for_range(
start_col, start_row,
end_col, end_row,
zoom, zoom):
yield tile
def tile_generator_for_range(
start_col, start_row,
end_col, end_row,
zoom_start, zoom_until):
zoom_multiplier = 1
# all the "end" parameters are inclusive
# bump them all up here to make them exclusive for range
end_col += 1
end_row += 1
zoom_until += 1
for zoom in xrange(zoom_start, zoom_until):
for col in xrange(start_col * zoom_multiplier,
end_col * zoom_multiplier):
for row in xrange(start_row * zoom_multiplier,
end_row * zoom_multiplier):
yield Coordinate(row=row, column=col, zoom=zoom)
zoom_multiplier *= 2
def tile_generator_for_multiple_bounds(bounds, zoom_start, zoom_until):
return chain.from_iterable(
tile_generator_for_single_bounds(bounds, zoom_start, zoom_until)
for bounds in bounds)
# The tiles will get encoded into integers suitable for redis to store. When
# redis is given integers, it is able to store them efficiently. Note that the
# integers are sent over to redis as a string. Another format was tried which
# packed the data into 6 bytes and then sent those 6 bytes as a string, but
# that actually took more memory in redis, presumably because raw integers can
# be stored more efficiently.
# This is how the data is encoded into a 64 bit integer:
# 1 bit unused | 29 bits column | 29 bits row | 5 bits zoom
zoom_bits = 5
row_bits = 29
col_bits = 29
zoom_mask = int('1' * zoom_bits, 2)
row_mask = int(('1' * row_bits), 2)
col_mask = row_mask
row_offset = zoom_bits
col_offset = zoom_bits + row_bits
# some additional masks to help with efficient zoom up operations
all_but_zoom_mask = int('1' * 64, 2) << zoom_bits
high_row_mask = int(('1' * (1 + col_bits)) +
'0' +
('1' * (row_bits - 1 + zoom_bits)), 2)
def coord_marshall_int(coord):
zoom = int(coord.zoom)
column = int(coord.column)
row = int(coord.row)
val = zoom | (row << row_offset) | (column << col_offset)
return val
def coord_unmarshall_int(coord_int):
if isinstance(coord_int, (str, unicode)):
coord_int = int(coord_int)
zoom = zoom_mask & coord_int
row = row_mask & (coord_int >> row_offset)
column = col_mask & (coord_int >> col_offset)
return Coordinate(column=column, row=row, zoom=zoom)
# perform an efficient zoom up operation via the integer directly
def coord_int_zoom_up(coord_int):
# First we'll update the row/col values both simultaneously by
# shifting all bits to the right in an attempt to divide both by
# 2. This is *almost* correct; we just need to account for the
# fact that the lowest bit of the column value can "leak" into the
# high bit of the row, which we do by zero'ing out just that bit
# via the high_row_mask.
coord_int_shifted = (coord_int >> 1) & high_row_mask
zoom = zoom_mask & coord_int
# Given that the row/col bits are now set correctly, all that
# remains is to update the zoom bits. This is done by applying a
# mask to zero out all the zoom bits, and then or'ing the new
# parent zoom bits into place
parent_coord_int = (coord_int_shifted & all_but_zoom_mask) | (zoom - 1)
return parent_coord_int
def coord_children(coord):
first_child = coord.zoomBy(1)
return (
first_child,
first_child.down(),
first_child.right(),
first_child.right().down())
def coord_children_range(coord, zoom_until):
assert zoom_until > coord.zoom, 'zoom_until (%r) must be > coord.zoom ' \
'(%r)' % (zoom_until, coord)
for child in coord_children_subrange(coord, coord.zoom + 1, zoom_until):
yield child
def coord_children_subrange(coord, zoom_start, zoom_until):
assert zoom_start >= coord.zoom
assert zoom_until >= coord.zoom
children_to_process = [coord]
if zoom_start <= coord.zoom:
yield coord
cur_zoom = coord.zoom
while cur_zoom < zoom_until:
next_children = []
cur_zoom += 1
for child_to_process in children_to_process:
children = coord_children(child_to_process)
for child in children:
if zoom_start <= cur_zoom:
yield child
next_children.append(child)
children_to_process = next_children
tolerances = [6378137 * 2 * math.pi / (2 ** (zoom + 8)) for zoom in range(22)]
def tolerance_for_zoom(zoom):
tol_idx = zoom if 0 <= zoom < len(tolerances) else -1
tolerance = tolerances[tol_idx]
return tolerance
def bounds_buffer(bounds, buf_size):
return (
bounds[0] - buf_size, bounds[1] - buf_size,
bounds[2] + buf_size, bounds[3] + buf_size,
)
# radius from http://wiki.openstreetmap.org/wiki/Zoom_levels
earth_equatorial_radius_meters = 6372798.2
earth_equatorial_circumference_meters = 40041472.01586051
def calc_meters_per_pixel_dim(zoom):
meters_in_dimension = (earth_equatorial_circumference_meters /
(2 ** (zoom + 8)))
return meters_in_dimension
def calc_meters_per_pixel_area(zoom):
meters_per_pixel_dim = calc_meters_per_pixel_dim(zoom)
meters_per_pixel_area = meters_per_pixel_dim * meters_per_pixel_dim
return meters_per_pixel_area
_geom_type_lookup = {
'Point': 'point',
'MultiPoint': 'point',
'LineString': 'line',
'MultiLineString': 'line',
'Polygon': 'polygon',
'MultiPolygon': 'polygon',
}
def normalize_geometry_type(geom_type):
result = _geom_type_lookup.get(geom_type)
assert result, \
'normalize_geometry_type: unknown geometry %s' % geom_type
return result
def coord_is_valid(coord, max_zoom=20):
if coord.zoom < 0 or coord.zoom > max_zoom:
return False
if coord.column < 0 or coord.row < 0:
return False
max_colrow = int(math.pow(2, coord.zoom))
if coord.column >= max_colrow or coord.row >= max_colrow:
return False
return True
def metatile_zoom_from_size(metatile_size):
metatile_zoom = 0
if metatile_size is not None:
metatile_zoom = int(math.log(metatile_size, 2))
assert (1 << metatile_zoom) == metatile_size, \
"Metatile size must be a power of two."
return metatile_zoom
def metatile_zoom_from_str(tile_size):
if not tile_size:
# missing tile size indicates the default, which is currently 256px.
# this is a zoom offset of 0.
return 0
# calculate the number of standard 256px tiles across this tile is.
size = int(tile_size) / 256
# and convert that to the zoom level offset that would create a metatile
# of that size.
return metatile_zoom_from_size(size)
|
|
from __future__ import absolute_import
try:
from itertools import zip_longest as izip_longest, repeat # pylint: disable=E0611
except ImportError:
from itertools import izip_longest as izip_longest, repeat # pylint: disable=E0611
import logging
import sys
import time
import six
from six.moves import queue
from .base import (
Consumer,
FETCH_DEFAULT_BLOCK_TIMEOUT,
AUTO_COMMIT_MSG_COUNT,
AUTO_COMMIT_INTERVAL,
FETCH_MIN_BYTES,
FETCH_BUFFER_SIZE_BYTES,
MAX_FETCH_BUFFER_SIZE_BYTES,
FETCH_MAX_WAIT_TIME,
ITER_TIMEOUT_SECONDS,
NO_MESSAGES_WAIT_TIME_SECONDS
)
from ..common import (
FetchRequestPayload, KafkaError, OffsetRequestPayload,
ConsumerFetchSizeTooSmall, ConsumerNoMoreData,
UnknownTopicOrPartitionError, NotLeaderForPartitionError,
OffsetOutOfRangeError, FailedPayloadsError, check_error
)
from kafka.protocol.message import PartialMessage
log = logging.getLogger(__name__)
class FetchContext(object):
"""
Class for managing the state of a consumer during fetch
"""
def __init__(self, consumer, block, timeout):
self.consumer = consumer
self.block = block
if block:
if not timeout:
timeout = FETCH_DEFAULT_BLOCK_TIMEOUT
self.timeout = timeout * 1000
def __enter__(self):
"""Set fetch values based on blocking status"""
self.orig_fetch_max_wait_time = self.consumer.fetch_max_wait_time
self.orig_fetch_min_bytes = self.consumer.fetch_min_bytes
if self.block:
self.consumer.fetch_max_wait_time = self.timeout
self.consumer.fetch_min_bytes = 1
else:
self.consumer.fetch_min_bytes = 0
def __exit__(self, type, value, traceback):
"""Reset values"""
self.consumer.fetch_max_wait_time = self.orig_fetch_max_wait_time
self.consumer.fetch_min_bytes = self.orig_fetch_min_bytes
class SimpleConsumer(Consumer):
"""
A simple consumer implementation that consumes all/specified partitions
for a topic
Arguments:
client: a connected SimpleClient
group: a name for this consumer, used for offset storage and must be unique
If you are connecting to a server that does not support offset
commit/fetch (any prior to 0.8.1.1), then you *must* set this to None
topic: the topic to consume
Keyword Arguments:
partitions: An optional list of partitions to consume the data from
auto_commit: default True. Whether or not to auto commit the offsets
auto_commit_every_n: default 100. How many messages to consume
before a commit
auto_commit_every_t: default 5000. How much time (in milliseconds) to
wait before commit
fetch_size_bytes: number of bytes to request in a FetchRequest
buffer_size: default 4K. Initial number of bytes to tell kafka we
have available. This will double as needed.
max_buffer_size: default 16K. Max number of bytes to tell kafka we have
available. None means no limit.
iter_timeout: default None. How much time (in seconds) to wait for a
message in the iterator before exiting. None means no
timeout, so it will wait forever.
auto_offset_reset: default largest. Reset partition offsets upon
OffsetOutOfRangeError. Valid values are largest and smallest.
Otherwise, do not reset the offsets and raise OffsetOutOfRangeError.
Auto commit details:
If both auto_commit_every_n and auto_commit_every_t are set, they will
reset one another when one is triggered. These triggers simply call the
commit method on this class. A manual call to commit will also reset
these triggers
"""
def __init__(self, client, group, topic, auto_commit=True, partitions=None,
auto_commit_every_n=AUTO_COMMIT_MSG_COUNT,
auto_commit_every_t=AUTO_COMMIT_INTERVAL,
fetch_size_bytes=FETCH_MIN_BYTES,
buffer_size=FETCH_BUFFER_SIZE_BYTES,
max_buffer_size=MAX_FETCH_BUFFER_SIZE_BYTES,
iter_timeout=None,
auto_offset_reset='largest'):
super(SimpleConsumer, self).__init__(
client, group, topic,
partitions=partitions,
auto_commit=auto_commit,
auto_commit_every_n=auto_commit_every_n,
auto_commit_every_t=auto_commit_every_t)
if max_buffer_size is not None and buffer_size > max_buffer_size:
raise ValueError('buffer_size (%d) is greater than '
'max_buffer_size (%d)' %
(buffer_size, max_buffer_size))
self.buffer_size = buffer_size
self.max_buffer_size = max_buffer_size
self.fetch_max_wait_time = FETCH_MAX_WAIT_TIME
self.fetch_min_bytes = fetch_size_bytes
self.fetch_offsets = self.offsets.copy()
self.iter_timeout = iter_timeout
self.auto_offset_reset = auto_offset_reset
self.queue = queue.Queue()
def __repr__(self):
return '<SimpleConsumer group=%s, topic=%s, partitions=%s>' % \
(self.group, self.topic, str(self.offsets.keys()))
def reset_partition_offset(self, partition):
"""Update offsets using auto_offset_reset policy (smallest|largest)
Arguments:
partition (int): the partition for which offsets should be updated
Returns: Updated offset on success, None on failure
"""
LATEST = -1
EARLIEST = -2
if self.auto_offset_reset == 'largest':
reqs = [OffsetRequestPayload(self.topic, partition, LATEST, 1)]
elif self.auto_offset_reset == 'smallest':
reqs = [OffsetRequestPayload(self.topic, partition, EARLIEST, 1)]
else:
# Let's raise an reasonable exception type if user calls
# outside of an exception context
if sys.exc_info() == (None, None, None):
raise OffsetOutOfRangeError('Cannot reset partition offsets without a '
'valid auto_offset_reset setting '
'(largest|smallest)')
# Otherwise we should re-raise the upstream exception
# b/c it typically includes additional data about
# the request that triggered it, and we do not want to drop that
raise # pylint: disable=E0704
# send_offset_request
log.info('Resetting topic-partition offset to %s for %s:%d',
self.auto_offset_reset, self.topic, partition)
try:
(resp, ) = self.client.send_offset_request(reqs)
except KafkaError as e:
log.error('%s sending offset request for %s:%d',
e.__class__.__name__, self.topic, partition)
else:
self.offsets[partition] = resp.offsets[0]
self.fetch_offsets[partition] = resp.offsets[0]
return resp.offsets[0]
def seek(self, offset, whence=None, partition=None):
"""
Alter the current offset in the consumer, similar to fseek
Arguments:
offset: how much to modify the offset
whence: where to modify it from, default is None
* None is an absolute offset
* 0 is relative to the earliest available offset (head)
* 1 is relative to the current offset
* 2 is relative to the latest known offset (tail)
partition: modify which partition, default is None.
If partition is None, would modify all partitions.
"""
if whence is None: # set an absolute offset
if partition is None:
for tmp_partition in self.offsets:
self.offsets[tmp_partition] = offset
else:
self.offsets[partition] = offset
elif whence == 1: # relative to current position
if partition is None:
for tmp_partition, _offset in self.offsets.items():
self.offsets[tmp_partition] = _offset + offset
else:
self.offsets[partition] += offset
elif whence in (0, 2): # relative to beginning or end
reqs = []
deltas = {}
if partition is None:
# divide the request offset by number of partitions,
# distribute the remained evenly
(delta, rem) = divmod(offset, len(self.offsets))
for tmp_partition, r in izip_longest(self.offsets.keys(),
repeat(1, rem),
fillvalue=0):
deltas[tmp_partition] = delta + r
for tmp_partition in self.offsets.keys():
if whence == 0:
reqs.append(OffsetRequestPayload(self.topic, tmp_partition, -2, 1))
elif whence == 2:
reqs.append(OffsetRequestPayload(self.topic, tmp_partition, -1, 1))
else:
pass
else:
deltas[partition] = offset
if whence == 0:
reqs.append(OffsetRequestPayload(self.topic, partition, -2, 1))
elif whence == 2:
reqs.append(OffsetRequestPayload(self.topic, partition, -1, 1))
else:
pass
resps = self.client.send_offset_request(reqs)
for resp in resps:
self.offsets[resp.partition] = \
resp.offsets[0] + deltas[resp.partition]
else:
raise ValueError('Unexpected value for `whence`, %d' % whence)
# Reset queue and fetch offsets since they are invalid
self.fetch_offsets = self.offsets.copy()
self.count_since_commit += 1
if self.auto_commit:
self.commit()
self.queue = queue.Queue()
def get_messages(self, count=1, block=True, timeout=0.1):
"""
Fetch the specified number of messages
Keyword Arguments:
count: Indicates the maximum number of messages to be fetched
block: If True, the API will block till all messages are fetched.
If block is a positive integer the API will block until that
many messages are fetched.
timeout: When blocking is requested the function will block for
the specified time (in seconds) until count messages is
fetched. If None, it will block forever.
"""
messages = []
if timeout is not None:
timeout += time.time()
new_offsets = {}
log.debug('getting %d messages', count)
while len(messages) < count:
block_time = timeout - time.time()
log.debug('calling _get_message block=%s timeout=%s', block, block_time)
block_next_call = block is True or block > len(messages)
result = self._get_message(block_next_call, block_time,
get_partition_info=True,
update_offset=False)
log.debug('got %s from _get_messages', result)
if not result:
if block_next_call and (timeout is None or time.time() <= timeout):
continue
break
partition, message = result
_msg = (partition, message) if self.partition_info else message
messages.append(_msg)
new_offsets[partition] = message.offset + 1
# Update and commit offsets if necessary
self.offsets.update(new_offsets)
self.count_since_commit += len(messages)
self._auto_commit()
log.debug('got %d messages: %s', len(messages), messages)
return messages
def get_message(self, block=True, timeout=0.1, get_partition_info=None):
return self._get_message(block, timeout, get_partition_info)
def _get_message(self, block=True, timeout=0.1, get_partition_info=None,
update_offset=True):
"""
If no messages can be fetched, returns None.
If get_partition_info is None, it defaults to self.partition_info
If get_partition_info is True, returns (partition, message)
If get_partition_info is False, returns message
"""
start_at = time.time()
while self.queue.empty():
# We're out of messages, go grab some more.
log.debug('internal queue empty, fetching more messages')
with FetchContext(self, block, timeout):
self._fetch()
if not block or time.time() > (start_at + timeout):
break
try:
partition, message = self.queue.get_nowait()
if update_offset:
# Update partition offset
self.offsets[partition] = message.offset + 1
# Count, check and commit messages if necessary
self.count_since_commit += 1
self._auto_commit()
if get_partition_info is None:
get_partition_info = self.partition_info
if get_partition_info:
return partition, message
else:
return message
except queue.Empty:
log.debug('internal queue empty after fetch - returning None')
return None
def __iter__(self):
if self.iter_timeout is None:
timeout = ITER_TIMEOUT_SECONDS
else:
timeout = self.iter_timeout
while True:
message = self.get_message(True, timeout)
if message:
yield message
elif self.iter_timeout is None:
# We did not receive any message yet but we don't have a
# timeout, so give up the CPU for a while before trying again
time.sleep(NO_MESSAGES_WAIT_TIME_SECONDS)
else:
# Timed out waiting for a message
break
def _fetch(self):
# Create fetch request payloads for all the partitions
partitions = dict((p, self.buffer_size)
for p in self.fetch_offsets.keys())
while partitions:
requests = []
for partition, buffer_size in six.iteritems(partitions):
requests.append(FetchRequestPayload(self.topic, partition,
self.fetch_offsets[partition],
buffer_size))
# Send request
responses = self.client.send_fetch_request(
requests,
max_wait_time=int(self.fetch_max_wait_time),
min_bytes=self.fetch_min_bytes,
fail_on_error=False
)
retry_partitions = {}
for resp in responses:
try:
check_error(resp)
except UnknownTopicOrPartitionError:
log.error('UnknownTopicOrPartitionError for %s:%d',
resp.topic, resp.partition)
self.client.reset_topic_metadata(resp.topic)
raise
except NotLeaderForPartitionError:
log.error('NotLeaderForPartitionError for %s:%d',
resp.topic, resp.partition)
self.client.reset_topic_metadata(resp.topic)
continue
except OffsetOutOfRangeError:
log.warning('OffsetOutOfRangeError for %s:%d. '
'Resetting partition offset...',
resp.topic, resp.partition)
self.reset_partition_offset(resp.partition)
# Retry this partition
retry_partitions[resp.partition] = partitions[resp.partition]
continue
except FailedPayloadsError as e:
log.warning('FailedPayloadsError for %s:%d',
e.payload.topic, e.payload.partition)
# Retry this partition
retry_partitions[e.payload.partition] = partitions[e.payload.partition]
continue
partition = resp.partition
buffer_size = partitions[partition]
# Check for partial message
if resp.messages and isinstance(resp.messages[-1].message, PartialMessage):
# If buffer is at max and all we got was a partial message
# raise ConsumerFetchSizeTooSmall
if (self.max_buffer_size is not None and
buffer_size == self.max_buffer_size and
len(resp.messages) == 1):
log.error('Max fetch size %d too small', self.max_buffer_size)
raise ConsumerFetchSizeTooSmall()
if self.max_buffer_size is None:
buffer_size *= 2
else:
buffer_size = min(buffer_size * 2, self.max_buffer_size)
log.warning('Fetch size too small, increase to %d (2x) '
'and retry', buffer_size)
retry_partitions[partition] = buffer_size
resp.messages.pop()
for message in resp.messages:
if message.offset < self.fetch_offsets[partition]:
log.debug('Skipping message %s because its offset is less than the consumer offset',
message)
continue
# Put the message in our queue
self.queue.put((partition, message))
self.fetch_offsets[partition] = message.offset + 1
partitions = retry_partitions
|
|
from __future__ import division
from datetime import datetime, date
import json
import time
from flask import request
from sqlalchemy.ext.mutable import Mutable
from sqlalchemy.ext.compiler import compiles
from sqlalchemy import types, desc
from sqlalchemy.orm import backref
from sqlalchemy import event, DDL
from dateutil.tz import tzoffset
from flask.ext.sqlalchemy import SQLAlchemy
from utils import raw_name, safe_name, convert_datetime_to_iso_8601
db = SQLAlchemy()
# -------------------
# Initiation logic
# -------------------
def initialize_database(app):
""" Takes an initalized flask application and binds a database context to allow query execution
"""
# see https://github.com/mitsuhiko/flask-sqlalchemy/issues/82
db.app = app
db.init_app(app)
return db
# -------------------
# Types
# -------------------
class JsonType(Mutable, types.TypeDecorator):
""" JSON wrapper type for TEXT database storage.
References:
http://stackoverflow.com/questions/4038314/sqlalchemy-json-as-blob-text
http://docs.sqlalchemy.org/en/rel_0_9/orm/extensions/mutable.html
"""
impl = types.Unicode
def process_bind_param(self, value, engine):
return unicode(json.dumps(value))
def process_result_value(self, value, engine):
if value:
return json.loads(value)
else:
# default can also be a list
return {}
class TSVectorType(types.TypeDecorator):
''' TSVECTOR wrapper type for database storage.
References:
http://stackoverflow.com/questions/13837111/tsvector-in-sqlalchemy
'''
impl = types.UnicodeText
@compiles(TSVectorType, 'postgresql')
def compile_tsvector(element, compiler, **kw):
return 'tsvector'
# -------------------
# Models
# -------------------
class Organization(db.Model):
'''
Brigades and other civic tech organizations
'''
# Columns
name = db.Column(db.Unicode(), primary_key=True)
website = db.Column(db.Unicode())
events_url = db.Column(db.Unicode())
rss = db.Column(db.Unicode())
projects_list_url = db.Column(db.Unicode())
type = db.Column(db.Unicode())
city = db.Column(db.Unicode())
latitude = db.Column(db.Float())
longitude = db.Column(db.Float())
last_updated = db.Column(db.Integer())
started_on = db.Column(db.Unicode())
member_count = db.Column(db.Integer())
keep = db.Column(db.Boolean())
tsv_body = db.Column(TSVectorType())
id = db.Column(db.Unicode())
# Relationships
# can contain events, stories, projects (these relationships are defined in the child objects)
def __init__(self, name, website=None, events_url=None, members_count=None,
rss=None, projects_list_url=None, type=None, city=None, latitude=None, longitude=None, last_updated=time.time()):
self.name = name
self.website = website
self.events_url = events_url
self.rss = rss
self.projects_list_url = projects_list_url
self.type = type
self.city = city
self.latitude = latitude
self.longitude = longitude
self.keep = True
self.last_updated = last_updated
self.started_on = unicode(date.today())
self.id = safe_name(raw_name(name))
self.members_count = members_count
def current_events(self):
'''
Return the two soonest upcoming events
'''
filter_old = Event.start_time_notz >= datetime.utcnow()
current_events = Event.query.filter_by(organization_name=self.name)\
.filter(filter_old).order_by(Event.start_time_notz.asc()).limit(2).all()
current_events_json = [row.asdict() for row in current_events]
return current_events_json
def current_projects(self):
'''
Return the three most current projects
'''
current_projects = Project.query.filter_by(organization_name=self.name).order_by(desc(Project.last_updated)).limit(3)
current_projects_json = [project.asdict(include_issues=False) for project in current_projects]
return current_projects_json
def current_stories(self):
'''
Return the two most current stories
'''
current_stories = Story.query.filter_by(organization_name=self.name).order_by(desc(Story.id)).limit(2).all()
current_stories_json = [row.asdict() for row in current_stories]
return current_stories_json
def all_events(self):
''' API link to all an orgs events
'''
# Make a nice org name
organization_name = safe_name(self.name)
return '%s://%s/api/organizations/%s/events' % (request.scheme, request.host, organization_name)
def upcoming_events(self):
''' API link to an orgs upcoming events
'''
# Make a nice org name
organization_name = safe_name(self.name)
return '%s://%s/api/organizations/%s/upcoming_events' % (request.scheme, request.host, organization_name)
def past_events(self):
''' API link to an orgs past events
'''
# Make a nice org name
organization_name = safe_name(self.name)
return '%s://%s/api/organizations/%s/past_events' % (request.scheme, request.host, organization_name)
def all_projects(self):
''' API link to all an orgs projects
'''
# Make a nice org name
organization_name = safe_name(self.name)
return '%s://%s/api/organizations/%s/projects' % (request.scheme, request.host, organization_name)
def all_issues(self):
'''API link to all an orgs issues
'''
# Make a nice org name
organization_name = safe_name(self.name)
return '%s://%s/api/organizations/%s/issues' % (request.scheme, request.host, organization_name)
def all_stories(self):
''' API link to all an orgs stories
'''
# Make a nice org name
organization_name = safe_name(self.name)
return '%s://%s/api/organizations/%s/stories' % (request.scheme, request.host, organization_name)
def all_attendance(self):
''' API link to orgs attendance '''
organization_name = safe_name(self.name)
return '%s://%s/api/organizations/%s/attendance' % (request.scheme, request.host, organization_name)
def api_id(self):
''' Return organization name made safe for use in a URL.
'''
return safe_name(self.name)
def api_url(self):
''' API link to itself
'''
return '%s://%s/api/organizations/%s' % (request.scheme, request.host, self.api_id())
def asdict(self, include_extras=False):
''' Return Organization as a dictionary, with some properties tweaked.
Optionally include linked projects, events, and stories.
'''
organization_dict = db.Model.asdict(self)
# remove fields that don't need to be public
del organization_dict['keep']
del organization_dict['tsv_body']
for key in ('all_events', 'all_projects', 'all_stories', 'all_issues',
'upcoming_events', 'past_events', 'api_url', 'all_attendance'):
organization_dict[key] = getattr(self, key)()
if include_extras:
for key in ('current_events', 'current_projects', 'current_stories'):
organization_dict[key] = getattr(self, key)()
return organization_dict
tbl = Organization.__table__
# Index the tsvector column
db.Index('index_org_tsv_body', tbl.c.tsv_body, postgresql_using='gin')
# Trigger to populate the search index column
trig_ddl = DDL("""
CREATE TRIGGER tsvupdate_orgs_trigger BEFORE INSERT OR UPDATE ON organization FOR EACH ROW EXECUTE PROCEDURE tsvector_update_trigger(tsv_body, 'pg_catalog.english', name);
""")
# Initialize the trigger after table is created
event.listen(tbl, 'after_create', trig_ddl.execute_if(dialect='postgresql'))
class Story(db.Model):
'''
Blog posts from a Brigade.
'''
# Columns
id = db.Column(db.Integer(), primary_key=True)
title = db.Column(db.Unicode())
link = db.Column(db.Unicode())
type = db.Column(db.Unicode())
keep = db.Column(db.Boolean())
# Relationships
# child
organization = db.relationship('Organization', single_parent=True, cascade='all, delete-orphan', backref=backref("stories", cascade="save-update, delete"))
organization_name = db.Column(db.Unicode(), db.ForeignKey('organization.name', ondelete='CASCADE'), nullable=False)
def __init__(self, title=None, link=None, type=None, organization_name=None):
self.title = title
self.link = link
self.type = type
self.organization_name = organization_name
self.keep = True
def api_url(self):
''' API link to itself
'''
return '%s://%s/api/stories/%s' % (request.scheme, request.host, str(self.id))
def asdict(self, include_organization=False):
''' Return Story as a dictionary, with some properties tweaked.
Optionally include linked organization.
'''
story_dict = db.Model.asdict(self)
# remove fields that don't need to be public
del story_dict['keep']
story_dict['api_url'] = self.api_url()
if include_organization:
story_dict['organization'] = self.organization.asdict()
return story_dict
class Project(db.Model):
'''
Civic tech projects on GitHub
'''
# Columns
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.Unicode())
code_url = db.Column(db.Unicode())
link_url = db.Column(db.Unicode())
description = db.Column(db.Unicode())
type = db.Column(db.Unicode())
categories = db.Column(db.Unicode())
tags = db.Column(db.Unicode())
github_details = db.Column(JsonType())
last_updated = db.Column(db.DateTime())
last_updated_issues = db.Column(db.Unicode())
last_updated_civic_json = db.Column(db.Unicode())
last_updated_root_files = db.Column(db.Unicode())
keep = db.Column(db.Boolean())
tsv_body = db.Column(TSVectorType())
status = db.Column(db.Unicode())
languages = db.Column(JsonType())
# Relationships
# child
organization = db.relationship('Organization', single_parent=True, cascade='all, delete-orphan', backref=backref("projects", cascade="save-update, delete"))
organization_name = db.Column(db.Unicode(), db.ForeignKey('organization.name', ondelete='CASCADE'), nullable=False)
# can contain issues (this relationship is defined in the child object)
def __init__(self, name, code_url=None, link_url=None,
description=None, type=None, categories=None, tags=None,
github_details=None, last_updated=None, last_updated_issues=None,
last_updated_civic_json=None, last_updated_root_files=None, organization_name=None,
keep=None, status=None, languages=None):
self.name = name
self.code_url = code_url
self.link_url = link_url
self.description = description
self.type = type
self.categories = categories
self.tags = tags
self.github_details = github_details
self.last_updated = last_updated
self.last_updated_issues = last_updated_issues
self.last_updated_civic_json = last_updated_civic_json
self.last_updated_root_files = last_updated_root_files
self.organization_name = organization_name
self.keep = True
self.status = status
self.languages = languages
def api_url(self):
''' API link to itself
'''
return '%s://%s/api/projects/%s' % (request.scheme, request.host, str(self.id))
def asdict(self, include_organization=False, include_issues=True):
''' Return Project as a dictionary, with some properties tweaked.
Optionally include linked organization.
'''
project_dict = db.Model.asdict(self)
# remove fields that don't need to be public
del project_dict['keep']
del project_dict['tsv_body']
del project_dict['last_updated_issues']
del project_dict['last_updated_civic_json']
del project_dict['last_updated_root_files']
project_dict['api_url'] = self.api_url()
if include_organization:
project_dict['organization'] = self.organization.asdict()
if include_issues:
project_dict['issues'] = [o.asdict() for o in db.session.query(Issue).filter(Issue.project_id == project_dict['id']).all()]
return project_dict
tbl = Project.__table__
# Index the tsvector column
db.Index('index_project_tsv_body', tbl.c.tsv_body, postgresql_using='gin')
# Trigger to populate the search index column
trig_ddl = DDL("""
DROP FUNCTION IF EXISTS project_search_trigger();
CREATE FUNCTION project_search_trigger() RETURNS trigger AS $$
begin
new.tsv_body :=
setweight(to_tsvector('pg_catalog.english', coalesce(new.status,'')), 'A') ||
setweight(to_tsvector('pg_catalog.english', coalesce(new.tags,'')), 'A') ||
setweight(to_tsvector('pg_catalog.english', coalesce(new.name,'')), 'B') ||
setweight(to_tsvector('pg_catalog.english', coalesce(new.description,'')), 'B') ||
setweight(to_tsvector('pg_catalog.english', coalesce(new.languages,'')), 'A');
return new;
end
$$ LANGUAGE plpgsql;
CREATE TRIGGER tsvupdate_projects_trigger BEFORE INSERT OR UPDATE ON project FOR EACH ROW EXECUTE PROCEDURE project_search_trigger();
""")
# Initialize the trigger after table is created
event.listen(tbl, 'after_create', trig_ddl.execute_if(dialect='postgresql'))
class Issue(db.Model):
'''
Issues of Civic Tech Projects on Github
'''
# Columns
id = db.Column(db.Integer(), primary_key=True)
title = db.Column(db.Unicode())
html_url = db.Column(db.Unicode())
body = db.Column(db.Unicode())
keep = db.Column(db.Boolean())
created_at = db.Column(db.DateTime())
updated_at = db.Column(db.DateTime())
# Relationships
# child
project = db.relationship('Project', single_parent=True, cascade='all, delete-orphan', backref=backref("issues", cascade="save-update, delete"))
project_id = db.Column(db.Integer(), db.ForeignKey('project.id', ondelete='CASCADE'), nullable=False, index=True)
# can contain labels (this relationship is defined in the child object)
def __init__(self, title, project_id=None, html_url=None, labels=None, body=None, created_at=None, updated_at=None):
self.title = title
self.html_url = html_url
self.body = body
self.project_id = project_id
self.created_at = created_at
self.updated_at = updated_at
self.keep = True
def api_url(self):
''' API link to itself
'''
return '%s://%s/api/issues/%s' % (request.scheme, request.host, str(self.id))
def asdict(self, include_project=False):
'''
Return issue as a dictionary with some properties tweaked
'''
issue_dict = db.Model.asdict(self)
# TODO: Also paged_results assumes asdict takes this argument, should be checked and fixed later
if include_project:
issue_dict['project'] = db.session.query(Project).filter(Project.id == self.project_id).first().asdict()
del issue_dict['project']['issues']
del issue_dict['project_id']
# remove fields that don't need to be public
del issue_dict['keep']
# manually convert dates to ISO 8601
issue_dict['created_at'] = convert_datetime_to_iso_8601(issue_dict['created_at'])
issue_dict['updated_at'] = convert_datetime_to_iso_8601(issue_dict['updated_at'])
issue_dict['api_url'] = self.api_url()
issue_dict['labels'] = [l.asdict() for l in self.labels]
return issue_dict
class Label(db.Model):
'''
Issue labels for projects on Github
'''
# Columns
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.Unicode())
color = db.Column(db.Unicode())
url = db.Column(db.Unicode())
# Relationships
# child
issue = db.relationship('Issue', single_parent=True, cascade='all, delete-orphan', backref=backref("labels", cascade="save-update, delete"))
issue_id = db.Column(db.Integer, db.ForeignKey('issue.id', ondelete='CASCADE'), nullable=False, index=True)
def __init__(self, name, color, url, issue_id=None):
self.name = name
self.color = color
self.url = url
self.issue_id = issue_id
def asdict(self):
'''
Return label as a dictionary with some properties tweaked
'''
label_dict = db.Model.asdict(self)
# remove fields that don't need to be public
del label_dict['id']
del label_dict['issue_id']
return label_dict
class Event(db.Model):
'''
Organizations events from Meetup
'''
# Columns
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.Unicode())
description = db.Column(db.Unicode())
event_url = db.Column(db.Unicode())
location = db.Column(db.Unicode())
created_at = db.Column(db.Unicode())
start_time_notz = db.Column(db.DateTime(False))
end_time_notz = db.Column(db.DateTime(False))
utc_offset = db.Column(db.Integer())
rsvps = db.Column(db.Integer())
keep = db.Column(db.Boolean())
# Relationships
# child
organization = db.relationship('Organization', single_parent=True, cascade='all, delete-orphan', backref=backref("events", cascade="save-update, delete"))
organization_name = db.Column(db.Unicode(), db.ForeignKey('organization.name', ondelete='CASCADE'), nullable=False)
def __init__(self, name, event_url, start_time_notz, created_at, utc_offset,
organization_name, location=None, end_time_notz=None, description=None, rsvps=None):
self.name = name
self.description = description
self.location = location
self.event_url = event_url
self.start_time_notz = start_time_notz
self.utc_offset = utc_offset
self.end_time_notz = end_time_notz
self.organization_name = organization_name
self.created_at = created_at
self.rsvps = rsvps
self.keep = True
def start_time(self):
''' Get a string representation of the start time with UTC offset.
'''
if self.start_time_notz is None:
return None
tz = tzoffset(None, self.utc_offset)
st = self.start_time_notz
dt = datetime(st.year, st.month, st.day, st.hour, st.minute, st.second, tzinfo=tz)
return dt.strftime('%Y-%m-%d %H:%M:%S %z')
def end_time(self):
''' Get a string representation of the end time with UTC offset.
'''
if self.end_time_notz is None:
return None
tz = tzoffset(None, self.utc_offset)
et = self.end_time_notz
dt = datetime(et.year, et.month, et.day, et.hour, et.minute, et.second, tzinfo=tz)
return dt.strftime('%Y-%m-%d %H:%M:%S %z')
def api_url(self):
''' API link to itself
'''
return '%s://%s/api/events/%s' % (request.scheme, request.host, str(self.id))
def asdict(self, include_organization=False):
''' Return Event as a dictionary, with some properties tweaked.
Optionally include linked organization.
'''
event_dict = db.Model.asdict(self)
# remove fields that don't need to be public
for key in ('keep', 'start_time_notz', 'end_time_notz', 'utc_offset'):
del event_dict[key]
for key in ('start_time', 'end_time', 'api_url'):
event_dict[key] = getattr(self, key)()
if include_organization:
event_dict['organization'] = self.organization.asdict()
return event_dict
class Attendance(db.Model):
''' Attendance at organization events
sourced from the peopledb
'''
# Columns
organization_url = db.Column(db.Unicode(), primary_key=True)
total = db.Column(db.Integer())
weekly = db.Column(JsonType())
# Relationship
organization = db.relationship('Organization', single_parent=True, cascade='all, delete-orphan', backref=backref("attendance", cascade="save-update, delete"))
organization_name = db.Column(db.Unicode(), db.ForeignKey('organization.name', ondelete='CASCADE'), nullable=False)
def __init__(self, organization_url, organization_name, total, weekly):
self.organization_url = organization_url
self.organization_name = organization_name
self.total = total
self.weekly = weekly
class Error(db.Model):
'''
Errors from run_update.py
'''
# Columns
id = db.Column(db.Integer(), primary_key=True)
error = db.Column(db.Unicode())
time = db.Column(db.DateTime(False))
|
|
#!/usr/bin/env python3
"""This script is used to create the experimental data look up tables for the
atom class.
In order to run this script simply needs to know where you want the generated
files to live.
usage:
python3 generate_atomicinfo.py <output_directory>
"""
#Knobs that you may want to tweak
pm2bohr = 52.917721067
####################### Begin Script #################################
import os
import sys
import re
if len(sys.argv) != 2:
print("Usage: generate_atomicinfo.py <output_directory>")
quit(1)
outbase = sys.argv[1]
mypath = os.path.dirname(os.path.realpath(__file__))
datadir = os.path.join(mypath, "data")
atomicinfo = {}
#File containing the names and symbols of the atoms
name_file = os.path.join(datadir, "ElementNames.txt")
#File containing the masses of the atoms
mass_file = os.path.join(datadir, "CIAAW-MASSES.formatted.txt")
#File containing the covalent radii of the atoms
cov_file = os.path.join(datadir,"CovRadii.txt")
#File containing the Van der Waals radii of the atoms
vdw_file = os.path.join(datadir,"VanDerWaalRadius.txt")
#File containing the isotope masses
iso_file = os.path.join(datadir, "CIAAW-ISOTOPEMASSES.formatted.txt")
#File containing the isotope abundances
iso_ab_file = os.path.join(datadir, "CIAAW-ABUNDANCE.formatted.txt")
#File containing the multiplicities of each atom
mult_file = os.path.join(datadir, "NIST-ATOMICION.formatted.txt")
#Read in names and make initial entries
for l in open(name_file).readlines():
l.strip()
z, sym, name = l.split()
z = int(z)
atomicinfo[z] = { "sym" : sym,
"name" : name,
"mult": 0,
"termsym": "x",
"mass": (0.0, 0.0, 0.0),
"covradius":0.0,
"vdwradius":0.0,
"isos" : {}
}
# Read in experimental masses
for l in open(mass_file).readlines()[5:]:
l.strip()
z, sym, mid, low, high = l.split()
z = int(z)
atomicinfo[z]["mass"] = ( mid, low, high )
#Read in Covalent Radii
for l in open(cov_file).readlines()[1:]:
l.strip()
z,r,unit=l.split()
z=int(z)
r=float(r)
atomicinfo[z]["covradius"]=r/pm2bohr
#Read in van der waal Radii
for l in open(vdw_file).readlines()[1:]:
l.strip()
z,r,unit=l.split()
z=int(z)
r=float(r)
atomicinfo[z]["vdwradius"]=r/pm2bohr
# Read in isotope masses
for l in open(iso_file).readlines()[5:]:
l.strip()
z, sym, isonum, mid, low, high = l.split()
z = int(z)
if not isonum in atomicinfo[z]["isos"]:
atomicinfo[z]["isos"][isonum] = { }
atomicinfo[z]["isos"][isonum]["mass"] = (mid, low, high)
# Read in isotope abundances
for l in open(iso_ab_file).readlines()[5:]:
l.strip()
z, sym, isonum, mid, low, high = l.split()
z = int(z)
if not isonum in atomicinfo[z]["isos"]:
atomicinfo[z]["isos"][isonum] = { }
atomicinfo[z]["isos"][isonum]["abundance"] = (mid, low, high)
# Fill in missing isotope info
for z, atom in atomicinfo.items():
for isonum, isodat in atom["isos"].items():
if not "mass" in isodat:
isodat["mass"] = (isonum, isonum, isonum)
if not "abundance" in isodat:
isodat["abundance"] = (0, 0, 0)
# Read in multiplicities
for l in open(mult_file).readlines()[5:]:
z, occ, mult, termsym = l.split()
z = int(z)
atomicinfo[z]["mult"] = int(mult)
atomicinfo[z]["termsym"] = termsym
header_file = os.path.join(outbase,"AtomicInfo.hpp")
src_file = os.path.join(outbase,"AtomicInfo.cpp")
comment ="""
/** \\file Declares structures containing basic experimental data.
*
* \warning This file is automatically generated via generate_atomicinfo.py.
* DO NOT EDIT!!!
*/
"""
with open(header_file,'w') as f:
f.write("#pragma once\n")
f.write("#include <string>\n")
f.write("#include <unordered_map>\n")
f.write("#include <vector>\n")
f.write(comment+'\n')
f.write("namespace LibChemist {\n")
f.write("namespace detail_ {\n\n")
f.write("struct IsotopeData {\n")
f.write(" size_t isonum; //! Isotope number (Z + number of neutrons)\n")
f.write(" double mass; //! Mass of the isotope\n")
f.write(" double mass_low; //! Lower bound of the isotope mass\n")
f.write(" double mass_high; //! Upper bound of the isotope mass\n")
f.write(" double abund; //! Natural abundance of the isotope (out of 1)\n")
f.write(" double abund_low; //! Lower bound on the isotope's abundance\n")
f.write(" double abund_high; //! Upper bound on the isotope's abundance\n")
f.write("};\n\n")
f.write("/*! \brief Information about an atom/element */\n")
f.write("struct AtomicData {\n")
f.write(" size_t Z; //! Atomic Z-number (number of protons)\n")
f.write(" std::string sym; //! Element's atomic symbol\n")
f.write(" std::string name; //! Full name of the element\n")
f.write(" int multiplicity; //! Ground-state multiplicity\n")
f.write(" std::string termsym; //! Term symbol character\n")
f.write(" double mass; //! Mass of the element (isotope masses weighted by abundance)\n")
f.write(" double mass_low; //! Lower bound on the mass of the element\n")
f.write(" double mass_high; //! Upper bound on the mass of the element\n")
f.write(" double covradius; //! Covalent radius in a.u.\n")
f.write(" double vdwradius; //! VDW radius in a.u.\n")
f.write(" std::vector<IsotopeData> isotopes; //!< All isotope information for this atom\n")
f.write("};\n\n")
f.write("extern const std::unordered_map<size_t, std::string> Z2sym_;\n\n")
f.write("extern const std::unordered_map<std::string, size_t> sym2Z_;\n\n")
f.write("extern const std::unordered_map<size_t, AtomicData> atomic_data_;\n\n")
f.write("/** \\brief Returns the most common isotope number for the atom with atomic number \p Z\n")
f.write(" *\n")
f.write(" * \\param[in] Z The atomic number for which you want the most common isotope\n")
f.write(" * \\returns The isotope number (protons+neutrons) of the most common isotope\n")
f.write(" * \\throws std::out_of_range if we do not have data for \p Z\n")
f.write(" */\n")
f.write("size_t most_common_isotope(size_t Z);\n\n")
f.write("/** \\brief Returns the mass of a given isotope number\n")
f.write(" *\n")
f.write(" * \\param[in] Z The atomic number of the atom\n")
f.write(" * \\param[in] isonum The isotope number (protons+neutrons)\n")
f.write(" * \\returns The mass of the desired isomer, in Daltons\n")
f.write(" * \\throws std::out_of_range if we do not have data for \p Z or if\n")
f.write(" * there is no data for that isotope number.\n")
f.write(" */\n")
f.write("double isotope_mass(size_t Z, size_t isonum);\n\n")
f.write("}}//End namespaces\n")
with open(src_file,'w') as f:
f.write("#include \"LibChemist/lut/AtomicInfo.hpp\"\n")
f.write("#include <algorithm>\n\n")
f.write("namespace LibChemist {\n")
f.write("namespace detail_ {\n")
# First, atomic Z to symbol map
f.write("extern const std::unordered_map<size_t, std::string> Z2sym_{\n")
for k,v in sorted(atomicinfo.items()):
f.write(" {{ {} , \"{}\" }},\n".format(k, v["sym"]))
f.write("}; // close Z2sym_\n\n\n")
# Next, atomic symbol to Z
f.write("extern const std::unordered_map<std::string, size_t> sym2Z_{\n")
for k,v in sorted(atomicinfo.items()):
f.write(" {{ \"{}\" , {} }},\n".format(v["sym"], k))
f.write("}; // close sym2Z_\n\n\n")
# Next, full atomic data
f.write("extern const std::unordered_map<size_t, AtomicData> atomic_data_{\n")
for k,v in sorted(atomicinfo.items()):
f.write(" {{ {:<4} , {{ {},\n".format(k, k))
f.write(" \"{}\",\n".format(v["sym"]))
f.write(" \"{}\",\n".format(v["name"]))
f.write(" {},\n".format(v["mult"]))
f.write(" \"{}\",\n".format(v["termsym"]))
f.write(" {},\n".format(v["mass"][0]))
f.write(" {},\n".format(v["mass"][1]))
f.write(" {},\n".format(v["mass"][2]))
f.write(" {},\n".format(v["covradius"]))
f.write(" {},\n".format(v["vdwradius"]))
# isotope info
f.write(" {\n")
for ki,vi in sorted(v["isos"].items()):
f.write(" {{ {}, {}, {}, {}, {}, {}, {} }},\n".
format(ki,vi["mass"][0],vi["mass"][1],vi["mass"][2],
vi["abundance"][0], vi["abundance"][1],
vi["abundance"][2]
)
)
f.write(" },\n") # Closes isotope vector
f.write(" },\n") # Closes atomic data
f.write(" },\n\n") # closes map pair
f.write("}; // close atomic_Z_data_\n\n")
f.write("size_t most_common_isotope(size_t Z){\n")
f.write(" const auto& ad=atomic_data_.at(Z);\n")
f.write(" auto maxit = std::max_element(\n")
f.write(" ad.isotopes.begin(), ad.isotopes.end(),\n")
f.write(" [](const IsotopeData & idat1, const IsotopeData & idat2)\n")
f.write(" { return idat1.abund < idat2.abund; });\n")
f.write(" return maxit->isonum;\n}\n\n")
f.write("double isotope_mass(size_t Z, size_t isonum){\n")
f.write(" const auto& ad=atomic_data_.at(Z);\n")
f.write(" for(const auto& x: ad.isotopes)\n")
f.write(" if(x.isonum==isonum)return x.mass;\n")
f.write(" throw std::out_of_range(\"Isotope number is not in range\");\n}\n\n")
f.write("}}//End namespaces\n")
|
|
# Copyright (c) 2007-8, Playful Invention Company.
# Copyright (c) 2008-11, Walter Bender
# Copyright (c) 2011 Collabora Ltd. <http://www.collabora.co.uk/>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import gtk
from math import pi
import os
import pango
import cairo
import pangocairo
from .tautils import get_path
from .taconstants import (Color, TMP_SVG_PATH, DEFAULT_PEN_COLOR,
DEFAULT_BACKGROUND_COLOR, DEFAULT_FONT)
def wrap100(n):
''' A variant on mod... 101 -> 99; 199 -> 1 '''
n = int(n)
n %= 200
if n > 99:
n = 199 - n
return n
def calc_shade(c, s, invert=False):
''' Convert a color to the current shade (lightness/darkness). '''
# Assumes 16 bit input values
if invert:
if s == -1:
return int(c)
elif s < 0:
return int(c / (1 + s))
return int((c - 65536 * s) / (1 - s))
else:
if s < 0:
return int(c * (1 + s))
return int(c + (65536 - c) * s)
def calc_gray(c, g, invert=False):
''' Gray is a psuedo saturation calculation. '''
# Assumes 16 bit input values
if g == 100:
return int(c)
if invert:
if g == 0:
return int(c)
else:
return int(((c * 100) - (32768 * (100 - g))) / g)
else:
return int(((c * g) + (32768 * (100 - g))) / 100)
colors = {}
DEGTOR = pi / 180.
RTODEG = 180. / pi
COLOR_TABLE = (
0xFF0000, 0xFF0D00, 0xFF1A00, 0xFF2600, 0xFF3300,
0xFF4000, 0xFF4D00, 0xFF5900, 0xFF6600, 0xFF7300,
0xFF8000, 0xFF8C00, 0xFF9900, 0xFFA600, 0xFFB300,
0xFFBF00, 0xFFCC00, 0xFFD900, 0xFFE600, 0xFFF200,
0xFFFF00, 0xE6FF00, 0xCCFF00, 0xB3FF00, 0x99FF00,
0x80FF00, 0x66FF00, 0x4DFF00, 0x33FF00, 0x1AFF00,
0x00FF00, 0x00FF0D, 0x00FF1A, 0x00FF26, 0x00FF33,
0x00FF40, 0x00FF4D, 0x00FF59, 0x00FF66, 0x00FF73,
0x00FF80, 0x00FF8C, 0x00FF99, 0x00FFA6, 0x00FFB3,
0x00FFBF, 0x00FFCC, 0x00FFD9, 0x00FFE6, 0x00FFF2,
0x00FFFF, 0x00F2FF, 0x00E6FF, 0x00D9FF, 0x00CCFF,
0x00BFFF, 0x00B3FF, 0x00A6FF, 0x0099FF, 0x008CFF,
0x0080FF, 0x0073FF, 0x0066FF, 0x0059FF, 0x004DFF,
0x0040FF, 0x0033FF, 0x0026FF, 0x001AFF, 0x000DFF,
0x0000FF, 0x0D00FF, 0x1A00FF, 0x2600FF, 0x3300FF,
0x4000FF, 0x4D00FF, 0x5900FF, 0x6600FF, 0x7300FF,
0x8000FF, 0x8C00FF, 0x9900FF, 0xA600FF, 0xB300FF,
0xBF00FF, 0xCC00FF, 0xD900FF, 0xE600FF, 0xF200FF,
0xFF00FF, 0xFF00E6, 0xFF00CC, 0xFF00B3, 0xFF0099,
0xFF0080, 0xFF0066, 0xFF004D, 0xFF0033, 0xFF001A)
class TurtleGraphics:
''' A class for the Turtle graphics canvas '''
def __init__(self, turtle_window, width, height):
''' Create a sprite to hold the canvas. '''
self.turtle_window = turtle_window
self.width = width
self.height = height
self.textsize = 48
self._fgrgb = DEFAULT_PEN_COLOR
self._bgrgb = DEFAULT_BACKGROUND_COLOR
self._font = DEFAULT_FONT
self._shade = 0
self._color = 0
self._gray = 100
self.cr_svg = None # Surface used for saving to SVG
# Build a cairo.Context from a cairo.XlibSurface
self.canvas = cairo.Context(self.turtle_window.turtle_canvas)
cr = gtk.gdk.CairoContext(self.canvas)
cr.set_line_cap(1) # Set the line cap to be round
self.set_pen_size(5)
def setup_svg_surface(self):
''' Set up a surface for saving to SVG '''
svg_surface = cairo.SVGSurface(self.get_svg_path(),
self.width, self.height)
self.svg_surface = svg_surface
self.cr_svg = cairo.Context(svg_surface)
self.cr_svg.set_line_cap(1) # Set the line cap to be round
def get_svg_path(self):
'''We use a separate file for the svg used for generating Sugar icons
'''
if self.turtle_window.running_sugar:
return os.path.join(get_path(self.turtle_window.activity,
'instance'), 'output.svg')
else:
return TMP_SVG_PATH
def fill_polygon(self, poly_points):
''' Draw the polygon... '''
def _fill_polygon(cr, poly_points):
cr.new_path()
for i, p in enumerate(poly_points):
if p[0] == 'move':
if i == len(poly_points) - 1 or \
poly_points[i + 1][0] not in ['rarc', 'larc']:
cr.move_to(p[1], p[2])
elif p[0] == 'rarc':
cr.arc(p[1], p[2], p[3], p[4], p[5])
elif p[0] == 'larc':
cr.arc_negative(p[1], p[2], p[3], p[4], p[5])
else: # line
cr.line_to(p[1], p[2])
cr.close_path()
cr.fill()
_fill_polygon(self.canvas, poly_points)
self.inval()
if self.cr_svg is not None:
_fill_polygon(self.cr_svg, poly_points)
def clearscreen(self):
'''Clear the canvas and reset most graphics attributes to defaults.'''
def _clearscreen(cr):
cr.move_to(0, 0)
self._bgrgb = DEFAULT_BACKGROUND_COLOR
cr.set_source_rgb(self._bgrgb[0] / 255.,
self._bgrgb[1] / 255.,
self._bgrgb[2] / 255.)
cr.rectangle(0, 0, self.width * 2, self.height * 2)
cr.fill()
_clearscreen(self.canvas)
self.inval()
if self.cr_svg is not None:
_clearscreen(self.cr_svg)
def rarc(self, x, y, r, a, heading):
''' draw a clockwise arc '''
def _rarc(cr, x, y, r, a, h):
cr.arc(x, y, r, (h - 180) * DEGTOR, (h - 180 + a) * DEGTOR)
cr.stroke()
_rarc(self.canvas, x, y, r, a, heading)
self.inval()
if self.cr_svg is not None:
_rarc(self.cr_svg, x, y, r, a, heading)
def larc(self, x, y, r, a, heading):
''' draw a counter-clockwise arc '''
def _larc(cr, x, y, r, a, h):
cr.arc_negative(x, y, r, h * DEGTOR, (h - a) * DEGTOR)
cr.stroke()
_larc(self.canvas, x, y, r, a, heading)
self.inval()
if self.cr_svg is not None:
_larc(self.cr_svg, x, y, r, a, heading)
def set_pen_size(self, pen_size):
''' Set the pen size '''
self.canvas.set_line_width(pen_size)
if self.cr_svg is not None:
self.cr_svg.set_line_width(pen_size)
def fillscreen(self, c, s):
''' Deprecated method: Fill screen with color/shade '''
self.fillscreen_with_gray(c, s, self._gray)
def fillscreen_with_gray(self, color, shade, gray):
''' Fill screen with color/shade/gray and reset to defaults '''
save_rgb = self._fgrgb[:]
# Special case for color blocks
if isinstance(color, Color):
if color.color is None:
self._shade = color.shade
else:
self._color = color.color
else:
self._color = color
if isinstance(shade, Color):
self._shade = shade.shade
else:
self._shade = shade
if isinstance(gray, Color):
self._gray = gray.gray
else:
self._gray = gray
if self._gray < 0:
self._gray = 0
if self._gray > 100:
self._gray = 100
self.set_fgcolor(shade=self._shade, gray=self._gray, color=self._color)
self._bgrgb = self._fgrgb[:]
def _fillscreen(cr, rgb, w, h):
cr.set_source_rgb(rgb[0] / 255., rgb[1] / 255., rgb[2] / 255.)
cr.rectangle(0, 0, w * 2, h * 2)
cr.fill()
_fillscreen(self.canvas, self._fgrgb, self.width, self.height)
self.inval()
if self.cr_svg is not None:
_fillscreen(self.cr_svg, self._fgrgb, self.width, self.height)
self._fgrgb = save_rgb[:]
def set_fgcolor(self, shade=None, gray=None, color=None):
''' Set the foreground color '''
if shade is not None:
self._shade = shade
if gray is not None:
self._gray = gray
if color is not None:
self._color = color
sh = (wrap100(self._shade) - 50) / 50.0
rgb = COLOR_TABLE[wrap100(self._color)]
r = (rgb >> 8) & 0xff00
r = calc_gray(r, self._gray)
r = calc_shade(r, sh)
g = rgb & 0xff00
g = calc_gray(g, self._gray)
g = calc_shade(g, sh)
b = (rgb << 8) & 0xff00
b = calc_gray(b, self._gray)
b = calc_shade(b, sh)
self._fgrgb = [r >> 8, g >> 8, b >> 8]
def draw_surface(self, surface, x, y, w, h):
''' Draw a surface '''
def _draw_surface(cr, surface, x, y, w, h):
cc = gtk.gdk.CairoContext(cr)
cc.set_source_surface(surface, x, y)
cc.rectangle(x, y, w, h)
cc.fill()
_draw_surface(self.canvas, surface, x, y, w, h)
self.inval()
if self.cr_svg is not None:
_draw_surface(self.cr_svg, surface, x, y, w, h)
def draw_pixbuf(self, pixbuf, a, b, x, y, w, h, heading):
''' Draw a pixbuf '''
def _draw_pixbuf(cr, pixbuf, a, b, x, y, w, h, heading):
# Build a gtk.gdk.CairoContext from a cairo.Context to access
# the set_source_pixbuf attribute.
cc = gtk.gdk.CairoContext(cr)
cc.save()
# center the rotation on the center of the image
cc.translate(x + w / 2., y + h / 2.)
cc.rotate(heading * DEGTOR)
cc.translate(-x - w / 2., -y - h / 2.)
cc.set_source_pixbuf(pixbuf, x, y)
cc.rectangle(x, y, w, h)
cc.fill()
cc.restore()
_draw_pixbuf(self.canvas, pixbuf, a, b, x, y, w, h, heading)
self.inval()
if self.cr_svg is not None:
_draw_pixbuf(self.cr_svg, pixbuf, a, b, x, y, w, h, heading)
def set_font(self, font_name):
''' Set font used by draw_text '''
self._font = str(font_name)
def draw_text(self, label, x, y, size, width, heading, scale):
''' Draw text '''
def _draw_text(cr, label, x, y, size, width, scale, heading, rgb,
wrap=False):
import textwrap
final_scale = int(size * scale) * pango.SCALE
label = str(label)
if wrap:
label = '\n'.join(textwrap.wrap(label, int(width / scale)))
cc = pangocairo.CairoContext(cr)
pl = cc.create_layout()
fd = pango.FontDescription(self._font)
fd.set_size(final_scale)
pl.set_font_description(fd)
if isinstance(label, (str, unicode)):
pl.set_text(label.replace('\0', ' '))
elif isinstance(label, (float, int)):
pl.set_text(str(label))
else:
pl.set_text(str(label))
pl.set_width(int(width) * pango.SCALE)
cc.save()
cc.translate(x, y)
cc.rotate(heading * DEGTOR)
cr.set_source_rgb(rgb[0] / 255., rgb[1] / 255., rgb[2] / 255.)
cc.update_layout(pl)
cc.show_layout(pl)
cc.restore()
width *= scale
_draw_text(self.canvas, label, x, y, size, width, scale, heading,
self._fgrgb)
self.inval()
if self.cr_svg is not None: # and self.pendown:
_draw_text(self.cr_svg, label, x, y, size, width, scale, heading,
self._fgrgb, wrap=True)
def set_source_rgb(self):
r = self._fgrgb[0] / 255.
g = self._fgrgb[1] / 255.
b = self._fgrgb[2] / 255.
self.canvas.set_source_rgb(r, g, b)
if self.cr_svg is not None:
self.cr_svg.set_source_rgb(r, g, b)
def draw_line(self, x1, y1, x2, y2):
''' Draw a line '''
def _draw_line(cr, x1, y1, x2, y2):
cr.move_to(x1, y1)
cr.line_to(x2, y2)
cr.stroke()
_draw_line(self.canvas, x1, y1, x2, y2)
if self.cr_svg is not None:
_draw_line(self.cr_svg, x1, y1, x2, y2)
self.inval()
def get_color_index(self, r, g, b, a=0):
''' Find the closest palette entry to the rgb triplet '''
if self._shade != 50 or self._gray != 100:
r <<= 8
g <<= 8
b <<= 8
if self._shade != 50:
sh = (wrap100(self._shade) - 50) / 50.
r = calc_shade(r, sh, True)
g = calc_shade(g, sh, True)
b = calc_shade(b, sh, True)
if self._gray != 100:
r = calc_gray(r, self._gray, True)
g = calc_gray(g, self._gray, True)
b = calc_gray(b, self._gray, True)
r >>= 8
g >>= 8
b >>= 8
min_distance = 1000000
closest_color = -1
for i, c in enumerate(COLOR_TABLE):
cr = int((c & 0xff0000) >> 16)
cg = int((c & 0x00ff00) >> 8)
cb = int((c & 0x0000ff))
distance_squared = \
((cr - r) ** 2) + ((cg - g) ** 2) + ((cb - b) ** 2)
if distance_squared == 0:
return i
if distance_squared < min_distance:
min_distance = distance_squared
closest_color = i
return closest_color
def get_pixel(self, x, y):
''' Read the pixel at x, y '''
if self.turtle_window.interactive_mode:
x = int(x)
y = int(y)
w = self.turtle_window.turtle_canvas.get_width()
h = self.turtle_window.turtle_canvas.get_height()
if x < 0 or x > (w - 1) or y < 0 or y > (h - 1):
return(-1, -1, -1, -1)
# create a new 1x1 cairo surface
cs = cairo.ImageSurface(cairo.FORMAT_RGB24, 1, 1)
cr = cairo.Context(cs)
cr.set_source_surface(self.turtle_window.turtle_canvas, -x, -y)
cr.rectangle(0, 0, 1, 1)
cr.set_operator(cairo.OPERATOR_SOURCE)
cr.fill()
cs.flush() # ensure all writing is done
pixels = cs.get_data() # Read the pixel
return (ord(pixels[2]), ord(pixels[1]), ord(pixels[0]), 0)
else:
return(-1, -1, -1, -1)
def svg_close(self):
''' Close current SVG graphic '''
self.cr_svg.show_page()
self.svg_surface.flush()
self.svg_surface.finish()
def svg_reset(self):
''' Reset svg flags '''
self.cr_svg = None
def inval(self):
''' Invalidate a region for gtk '''
self.turtle_window.inval_all()
|
|
"""
This module provides functionality for tracking bridge stability metrics, using
the model introduced in [1] and implemented in [2].
[1] Karsten Loesing, An Analysis of Tor Bridge Stability. Technical Report.
The Tor Project, October 2011.
https://metrics.torproject.org/papers/bridge-stability-2011-10-31.pdf
[2] https://gitweb.torproject.org/metrics-tasks/task-4255/SimulateBridgeStability.java
"""
import logging
import bridgedb.Storage
# tunables
weighting_factor = float(19)/float(20)
discountIntervalMillis = long(60*60*12*1000)
class BridgeHistory(object):
""" Record Class that tracks a single Bridge
The fields stored are:
fingerprint, ip, port, weightedUptime, weightedTime, weightedRunLength,
totalRunWeights, lastSeenWithDifferentAddressAndPort,
lastSeenWithThisAddressAndPort, lastDiscountedHistoryValues.
fingerprint The Bridge Fingerprint (unicode)
ip The Bridge IP (unicode)
port The Bridge orport (integer)
weightedUptime Weighted uptime in seconds (long int)
weightedTime Weighted time in seconds (long int)
weightedRunLength Weighted run length of previous addresses or ports in
seconds. (long int)
totalRunWeights Total run weights of previously used addresses or
ports. (float)
lastSeenWithDifferentAddressAndPort
Timestamp in milliseconds when this
bridge was last seen with a different address or port. (long int)
lastSeenWithThisAddressAndPort
Timestamp in milliseconds when this bridge was last seen
with this address and port. (long int)
lastDiscountedHistoryValues:
Timestamp in milliseconds when this bridge was last discounted. (long int)
lastUpdatedWeightedTime:
Timestamp in milliseconds when the weighted time was updated. (long int)
"""
def __init__(self, fingerprint, ip, port,
weightedUptime, weightedTime, weightedRunLength, totalRunWeights,
lastSeenWithDifferentAddressAndPort, lastSeenWithThisAddressAndPort,
lastDiscountedHistoryValues, lastUpdatedWeightedTime):
self.fingerprint = fingerprint
self.ip = ip
self.port = port
self.weightedUptime = long(weightedUptime)
self.weightedTime = long(weightedTime)
self.weightedRunLength = long(weightedRunLength)
self.totalRunWeights = float(totalRunWeights)
self.lastSeenWithDifferentAddressAndPort = \
long(lastSeenWithDifferentAddressAndPort)
self.lastSeenWithThisAddressAndPort = long(lastSeenWithThisAddressAndPort)
self.lastDiscountedHistoryValues = long(lastDiscountedHistoryValues)
self.lastUpdatedWeightedTime = long(lastUpdatedWeightedTime)
def discountWeightedFractionalUptimeAndWeightedTime(self, discountUntilMillis):
""" discount weighted times """
if self.lastDiscountedHistoryValues == 0:
self.lastDiscountedHistoryValues = discountUntilMillis
rounds = self.numDiscountRounds(discountUntilMillis)
if rounds > 0:
discount = lambda x: (weighting_factor**rounds)*x
self.weightedUptime = discount(self.weightedUptime)
self.weightedTime = discount(self.weightedTime)
self.weightedRunLength = discount(self.weightedRunLength)
self.totalRunWeights = discount(self.totalRunWeights)
self.lastDiscountedHistoryValues += discountIntervalMillis * rounds
return rounds
def numDiscountRounds(self, discountUntilMillis):
""" return the number of rounds of discounting needed to bring this
history element current """
result = discountUntilMillis - self.lastDiscountedHistoryValues
result = int(result/discountIntervalMillis)
return max(result,0)
@property
def weightedFractionalUptime(self):
"""Weighted Fractional Uptime"""
if self.weightedTime <0.0001: return long(0)
return long(10000) * self.weightedUptime / self.weightedTime
@property
def tosa(self):
"""the Time On Same Address (TOSA)"""
return ( self.lastSeenWithThisAddressAndPort - \
self.lastSeenWithDifferentAddressAndPort ) / 1000
@property
def familiar(self):
"""
A bridge is 'familiar' if 1/8 of all active bridges have appeared
more recently than it, or if it has been around for a Weighted Time of 8 days.
"""
# if this bridge has been around longer than 8 days
if self.weightedTime >= long(8 * 24 * 60 * 60):
return True
# return True if self.weightedTime is greater than the weightedTime
# of the > bottom 1/8 all bridges, sorted by weightedTime
db = bridgedb.Storage.getDB()
allWeightedTimes = [ bh.weightedTime for bh in db.getAllBridgeHistory()]
numBridges = len(allWeightedTimes)
logging.debug("Got %d weightedTimes", numBridges)
allWeightedTimes.sort()
if self.weightedTime >= allWeightedTimes[numBridges/8]:
return True
return False
@property
def wmtbac(self):
"""Weighted Mean Time Between Address Change"""
totalRunLength = self.weightedRunLength + \
((self.lastSeenWithThisAddressAndPort -
self.lastSeenWithDifferentAddressAndPort) / long(1000))
totalWeights = self.totalRunWeights + 1.0
if totalWeights < 0.0001: return long(0)
assert(isinstance(long,totalRunLength))
assert(isinstance(long,totalWeights))
return totalRunlength / totalWeights
def addOrUpdateBridgeHistory(bridge, timestamp):
db = bridgedb.Storage.getDB()
bhe = db.getBridgeHistory(bridge.fingerprint)
if not bhe:
# This is the first status, assume 60 minutes.
secondsSinceLastStatusPublication = long(60*60)
lastSeenWithDifferentAddressAndPort = timestamp * long(1000)
lastSeenWithThisAddressAndPort = timestamp * long(1000)
bhe = BridgeHistory(
bridge.fingerprint, bridge.ip, bridge.orport,
0,#weightedUptime
0,#weightedTime
0,#weightedRunLength
0,# totalRunWeights
lastSeenWithDifferentAddressAndPort, # first timestamnp
lastSeenWithThisAddressAndPort,
0,#lastDiscountedHistoryValues,
0,#lastUpdatedWeightedTime
)
# first time we have seen this descriptor
db.updateIntoBridgeHistory(bhe)
# Calculate the seconds since the last parsed status. If this is
# the first status or we haven't seen a status for more than 60
# minutes, assume 60 minutes.
statusPublicationMillis = long(timestamp * 1000)
if (statusPublicationMillis - bhe.lastSeenWithThisAddressAndPort) > 60*60*1000:
secondsSinceLastStatusPublication = long(60*60)
logging.debug("Capping secondsSinceLastStatusPublication to 1 hour")
# otherwise, roll with it
else:
secondsSinceLastStatusPublication = \
(statusPublicationMillis - bhe.lastSeenWithThisAddressAndPort)/1000
if secondsSinceLastStatusPublication <= 0 and bhe.weightedTime > 0:
# old descriptor, bail
logging.warn("Received old descriptor for bridge %s with timestamp %d",
bhe.fingerprint, statusPublicationMillis/1000)
return bhe
# iterate over all known bridges and apply weighting factor
discountAndPruneBridgeHistories(statusPublicationMillis)
# Update the weighted times of bridges
updateWeightedTime(statusPublicationMillis)
# For Running Bridges only:
# compare the stored history against the descriptor and see if the
# bridge has changed its address or port
bhe = db.getBridgeHistory(bridge.fingerprint)
if not bridge.running:
logging.info("%s is not running" % bridge.fingerprint)
return bhe
# Parse the descriptor and see if the address or port changed
# If so, store the weighted run time
if bridge.orport != bhe.port or bridge.ip != bhe.ip:
bhe.totalRunWeights += 1.0;
bhe.weightedRunLength += bhe.tosa
bhe.lastSeenWithDifferentAddressAndPort =\
bhe.lastSeenWithThisAddressAndPort
# Regardless of whether the bridge is new, kept or changed
# its address and port, raise its WFU times and note its
# current address and port, and that we saw it using them.
bhe.weightedUptime += secondsSinceLastStatusPublication
bhe.lastSeenWithThisAddressAndPort = statusPublicationMillis
bhe.ip = str(bridge.ip)
bhe.port = bridge.orport
return db.updateIntoBridgeHistory(bhe)
def discountAndPruneBridgeHistories(discountUntilMillis):
db = bridgedb.Storage.getDB()
bhToRemove = []
bhToUpdate = []
for bh in db.getAllBridgeHistory():
# discount previous values by factor of 0.95 every 12 hours
bh.discountWeightedFractionalUptimeAndWeightedTime(discountUntilMillis)
# give the thing at least 24 hours before pruning it
if bh.weightedFractionalUptime < 1 and bh.weightedTime > 60*60*24:
logging.debug("Removing bridge from history: %s" % bh.fingerprint)
bhToRemove.append(bh.fingerprint)
else:
bhToUpdate.append(bh)
for k in bhToUpdate: db.updateIntoBridgeHistory(k)
for k in bhToRemove: db.delBridgeHistory(k)
def updateWeightedTime(statusPublicationMillis):
bhToUpdate = []
db = bridgedb.Storage.getDB()
for bh in db.getBridgesLastUpdatedBefore(statusPublicationMillis):
interval = (statusPublicationMillis - bh.lastUpdatedWeightedTime)/1000
if interval > 0:
bh.weightedTime += min(3600,interval) # cap to 1hr
bh.lastUpdatedWeightedTime = statusPublicationMillis
#db.updateIntoBridgeHistory(bh)
bhToUpdate.append(bh)
for bh in bhToUpdate:
db.updateIntoBridgeHistory(bh)
|
|
from bika.lims.content.analysis import Analysis
from bika.lims.testing import BIKA_FUNCTIONAL_TESTING
from bika.lims.tests.base import BikaFunctionalTestCase
from bika.lims.utils.analysisrequest import create_analysisrequest
from bika.lims.workflow import doActionFor
from plone.app.testing import login, logout
from plone.app.testing import TEST_USER_NAME
import unittest
try:
import unittest2 as unittest
except ImportError: # Python 2.7
import unittest
class TestHiddenAnalyses(BikaFunctionalTestCase):
layer = BIKA_FUNCTIONAL_TESTING
def setUp(self):
super(TestHiddenAnalyses, self).setUp()
login(self.portal, TEST_USER_NAME)
servs = self.portal.bika_setup.bika_analysisservices
# analysis-service-3: Calcium (Ca)
# analysis-service-6: Cooper (Cu)
# analysis-service-7: Iron (Fe)
self.services = [servs['analysisservice-3'],
servs['analysisservice-6'],
servs['analysisservice-7']]
# Calcium - Hidden not set
# Copper - Hidden set to False
self.services[1].setHidden(False)
# Iron - Hidden set to True
self.services[2].setHidden(True)
profs = self.portal.bika_setup.bika_analysisprofiles
# analysisprofile-1: Trace Metals
self.analysisprofile = profs['analysisprofile-1']
artemp = self.portal.bika_setup.bika_artemplates
# artemplate-2: Bruma Metals
self.artemplate = artemp['artemplate-2']
def tearDown(self):
# Restore
for s in self.services:
s.setHidden(False)
self.analysisprofile.setAnalysisServicesSettings([])
self.artemplate.setAnalysisServicesSettings([])
logout()
super(TestHiddenAnalyses, self).tearDown()
def test_service_hidden_service(self):
service = self.services[1]
uid = service.UID()
self.assertFalse(service.getHidden())
self.assertFalse(service.Schema().getField('Hidden').get(service))
service.setHidden(False)
self.assertFalse(service.getHidden())
self.assertFalse(service.Schema().getField('Hidden').get(service))
service.setHidden(True)
self.assertTrue(service.getHidden())
self.assertTrue(service.Schema().getField('Hidden').get(service))
# Restore
service.setHidden(False)
def test_service_hidden_profile(self):
# Profile
# For Calcium (unset)
uid = self.services[0].UID();
self.assertFalse(self.services[0].getHidden())
self.assertFalse(self.analysisprofile.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in self.analysisprofile.getAnalysisServiceSettings(uid))
# For Copper (False)
uid = self.services[1].UID()
self.assertFalse(self.services[1].getHidden())
self.assertFalse(self.analysisprofile.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in self.analysisprofile.getAnalysisServiceSettings(uid))
# For Iron (True)
uid = self.services[2].UID()
self.assertTrue(self.services[2].getHidden())
self.assertTrue(self.analysisprofile.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in self.analysisprofile.getAnalysisServiceSettings(uid))
# Modify visibility for Calcium in profile
uid = self.services[0].UID();
sets = [{'uid': uid}]
self.analysisprofile.setAnalysisServicesSettings(sets)
self.assertFalse(self.analysisprofile.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in self.analysisprofile.getAnalysisServiceSettings(uid))
sets = [{'uid': uid, 'hidden': False}]
self.analysisprofile.setAnalysisServicesSettings(sets)
self.assertFalse(self.analysisprofile.isAnalysisServiceHidden(uid))
self.assertTrue('hidden' in self.analysisprofile.getAnalysisServiceSettings(uid))
sets = [{'uid': uid, 'hidden': True}]
self.analysisprofile.setAnalysisServicesSettings(sets)
self.assertTrue(self.analysisprofile.isAnalysisServiceHidden(uid))
self.assertTrue('hidden' in self.analysisprofile.getAnalysisServiceSettings(uid))
# Modify visibility for Cooper in profile
uid = self.services[1].UID();
sets = [{'uid': uid}]
self.analysisprofile.setAnalysisServicesSettings(sets)
self.assertFalse(self.analysisprofile.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in self.analysisprofile.getAnalysisServiceSettings(uid))
sets = [{'uid': uid, 'hidden': False}]
self.analysisprofile.setAnalysisServicesSettings(sets)
self.assertFalse(self.analysisprofile.isAnalysisServiceHidden(uid))
self.assertTrue('hidden' in self.analysisprofile.getAnalysisServiceSettings(uid))
sets = [{'uid': uid, 'hidden': True}]
self.analysisprofile.setAnalysisServicesSettings(sets)
self.assertTrue(self.analysisprofile.isAnalysisServiceHidden(uid))
self.assertTrue('hidden' in self.analysisprofile.getAnalysisServiceSettings(uid))
# Modify visibility for Iron in profile
uid = self.services[2].UID();
sets = [{'uid': uid}]
self.analysisprofile.setAnalysisServicesSettings(sets)
self.assertTrue(self.analysisprofile.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in self.analysisprofile.getAnalysisServiceSettings(uid))
sets = [{'uid': uid, 'hidden': False}]
self.analysisprofile.setAnalysisServicesSettings(sets)
self.assertFalse(self.analysisprofile.isAnalysisServiceHidden(uid))
self.assertTrue('hidden' in self.analysisprofile.getAnalysisServiceSettings(uid))
sets = [{'uid': uid, 'hidden': True}]
self.analysisprofile.setAnalysisServicesSettings(sets)
self.assertTrue(self.analysisprofile.isAnalysisServiceHidden(uid))
self.assertTrue('hidden' in self.analysisprofile.getAnalysisServiceSettings(uid))
# Restore
self.analysisprofile.setAnalysisServicesSettings([])
def test_service_hidden_artemplate(self):
# Template
# For Calcium (unset)
uid = self.services[0].UID();
self.assertFalse(self.services[0].getHidden())
self.assertFalse(self.analysisprofile.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in self.artemplate.getAnalysisServiceSettings(uid))
# For Copper (False)
uid = self.services[1].UID()
self.assertFalse(self.services[1].getHidden())
self.assertFalse(self.artemplate.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in self.artemplate.getAnalysisServiceSettings(uid))
# For Iron (True)
uid = self.services[2].UID()
self.assertTrue(self.services[2].getHidden())
self.assertTrue(self.artemplate.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in self.artemplate.getAnalysisServiceSettings(uid))
# Modify visibility for Calcium in template
uid = self.services[0].UID();
sets = [{'uid': uid}]
self.artemplate.setAnalysisServicesSettings(sets)
self.assertFalse(self.artemplate.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in self.artemplate.getAnalysisServiceSettings(uid))
sets = [{'uid': uid, 'hidden': False}]
self.artemplate.setAnalysisServicesSettings(sets)
self.assertFalse(self.artemplate.isAnalysisServiceHidden(uid))
self.assertTrue('hidden' in self.artemplate.getAnalysisServiceSettings(uid))
sets = [{'uid': uid, 'hidden': True}]
self.artemplate.setAnalysisServicesSettings(sets)
self.assertTrue(self.artemplate.isAnalysisServiceHidden(uid))
self.assertTrue('hidden' in self.artemplate.getAnalysisServiceSettings(uid))
# Modify visibility for Cooper in template
uid = self.services[1].UID();
sets = [{'uid': uid}]
self.artemplate.setAnalysisServicesSettings(sets)
self.assertFalse(self.artemplate.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in self.artemplate.getAnalysisServiceSettings(uid))
sets = [{'uid': uid, 'hidden': False}]
self.artemplate.setAnalysisServicesSettings(sets)
self.assertFalse(self.artemplate.isAnalysisServiceHidden(uid))
self.assertTrue('hidden' in self.artemplate.getAnalysisServiceSettings(uid))
sets = [{'uid': uid, 'hidden': True}]
self.artemplate.setAnalysisServicesSettings(sets)
self.assertTrue(self.artemplate.isAnalysisServiceHidden(uid))
self.assertTrue('hidden' in self.artemplate.getAnalysisServiceSettings(uid))
# Modify visibility for Iron in template
uid = self.services[2].UID();
sets = [{'uid': uid}]
self.artemplate.setAnalysisServicesSettings(sets)
self.assertTrue(self.artemplate.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in self.artemplate.getAnalysisServiceSettings(uid))
sets = [{'uid': uid, 'hidden': False}]
self.artemplate.setAnalysisServicesSettings(sets)
self.assertFalse(self.artemplate.isAnalysisServiceHidden(uid))
self.assertTrue('hidden' in self.artemplate.getAnalysisServiceSettings(uid))
sets = [{'uid': uid, 'hidden': True}]
self.artemplate.setAnalysisServicesSettings(sets)
self.assertTrue(self.artemplate.isAnalysisServiceHidden(uid))
self.assertTrue('hidden' in self.artemplate.getAnalysisServiceSettings(uid))
# Restore
self.artemplate.setAnalysisServicesSettings([])
def test_service_hidden_analysisrequest(self):
# Input results
# Client: Happy Hills
# SampleType: Apple Pulp
# Contact: Rita Mohale
# Analyses: [Calcium, Copper, Iron]
client = self.portal.clients['client-1']
sampletype = self.portal.bika_setup.bika_sampletypes['sampletype-1']
request = {}
services = [s.UID() for s in self.services]
values = {'Client': client.UID(),
'Contact': client.getContacts()[0].UID(),
'SamplingDate': '2015-01-01',
'SampleType': sampletype.UID()}
ar = create_analysisrequest(client, request, values, services)
self.assertFalse('hidden' in ar.getAnalysisServiceSettings(services[0]))
self.assertFalse(ar.isAnalysisServiceHidden(services[0]))
self.assertFalse(ar.getAnalysisServiceSettings(services[1]).get('hidden'))
self.assertFalse(ar.isAnalysisServiceHidden(services[1]))
self.assertFalse(ar.getAnalysisServiceSettings(services[2]).get('hidden'))
self.assertTrue(ar.isAnalysisServiceHidden(services[2]))
# For Calcium (unset)
uid = self.services[0].UID()
self.assertFalse(self.services[0].getHidden())
self.assertFalse(self.analysisprofile.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in self.artemplate.getAnalysisServiceSettings(uid))
# For Copper (False)
uid = self.services[1].UID()
self.assertFalse(self.services[1].getHidden())
self.assertFalse(ar.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in ar.getAnalysisServiceSettings(uid))
# For Iron (True)
uid = self.services[2].UID()
self.assertTrue(self.services[2].getHidden())
self.assertTrue(ar.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in ar.getAnalysisServiceSettings(uid))
# Modify visibility for Calcium in AR
uid = self.services[0].UID();
sets = [{'uid': uid}]
ar.setAnalysisServicesSettings(sets)
self.assertFalse(ar.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in ar.getAnalysisServiceSettings(uid))
sets = [{'uid': uid, 'hidden': False}]
ar.setAnalysisServicesSettings(sets)
self.assertFalse(ar.isAnalysisServiceHidden(uid))
self.assertTrue('hidden' in ar.getAnalysisServiceSettings(uid))
sets = [{'uid': uid, 'hidden': True}]
ar.setAnalysisServicesSettings(sets)
self.assertTrue(ar.isAnalysisServiceHidden(uid))
self.assertTrue('hidden' in ar.getAnalysisServiceSettings(uid))
ar.setAnalysisServicesSettings([])
# AR with profile with no changes
values['Profile'] = self.analysisprofile.UID()
ar = create_analysisrequest(client, request, values, services)
self.assertFalse('hidden' in ar.getAnalysisServiceSettings(services[0]))
self.assertFalse(ar.getAnalysisServiceSettings(services[1]).get('hidden'))
self.assertFalse(ar.getAnalysisServiceSettings(services[2]).get('hidden'))
uid = self.services[0].UID()
self.assertFalse(self.services[0].getHidden())
self.assertFalse(ar.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in ar.getAnalysisServiceSettings(uid))
uid = self.services[1].UID()
self.assertFalse(self.services[1].getHidden())
self.assertFalse(ar.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in ar.getAnalysisServiceSettings(uid))
uid = self.services[2].UID()
self.assertTrue(self.services[2].getHidden())
self.assertTrue(ar.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in ar.getAnalysisServiceSettings(uid))
# AR with template with no changes
values['Template'] = self.artemplate
del values['Profile']
ar = create_analysisrequest(client, request, values, services)
self.assertFalse('hidden' in ar.getAnalysisServiceSettings(services[0]))
self.assertFalse(ar.getAnalysisServiceSettings(services[1]).get('hidden'))
self.assertFalse(ar.getAnalysisServiceSettings(services[2]).get('hidden'))
uid = self.services[0].UID()
self.assertFalse(self.services[0].getHidden())
self.assertFalse(ar.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in ar.getAnalysisServiceSettings(uid))
uid = self.services[1].UID()
self.assertFalse(self.services[1].getHidden())
self.assertFalse(ar.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in ar.getAnalysisServiceSettings(uid))
uid = self.services[2].UID()
self.assertTrue(self.services[2].getHidden())
self.assertTrue(ar.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in ar.getAnalysisServiceSettings(uid))
# AR with profile, with changes
values['Profile'] = self.analysisprofile.UID()
del values['Template']
matrix = [[2, 1,-2], # AS = Not set
[2, 1,-2], # AS = False
[2, 1,-1]]
for i in range(len(matrix)):
sets = {'uid': services[i]}
opts = [0, 1, 2]
for j in opts:
if j == 0:
sets['hidden'] = False
elif j == 1:
sets['hidden'] = True
else:
del sets['hidden']
self.analysisprofile.setAnalysisServicesSettings(sets)
ar = create_analysisrequest(client, request, values, services)
res = matrix[i][j]
if res < 0:
self.assertFalse('hidden' in ar.getAnalysisServiceSettings(services[i]))
else:
self.assertTrue('hidden' in ar.getAnalysisServiceSettings(services[i]))
if abs(res) == 1:
self.assertTrue(ar.isAnalysisServiceHidden(services[i]))
elif abs(res) == 2:
self.assertFalse(ar.isAnalysisServiceHidden(services[i]))
# Restore
self.analysisprofile.setAnalysisServicesSettings([])
# AR with template, with changes
values['Template'] = self.artemplate.UID()
del values['Profile']
matrix = [[2, 1,-2], # AS = Not set
[2, 1,-2], # AS = False
[2, 1,-1]]
for i in range(len(matrix)):
sets = {'uid': services[i]}
opts = [0, 1, 2]
for j in opts:
if j == 0:
sets['hidden'] = False
elif j == 1:
sets['hidden'] = True
else:
del sets['hidden']
self.artemplate.setAnalysisServicesSettings(sets)
ar = create_analysisrequest(client, request, values, services)
res = matrix[i][j]
if res < 0:
self.assertFalse('hidden' in ar.getAnalysisServiceSettings(services[i]))
else:
self.assertTrue('hidden' in ar.getAnalysisServiceSettings(services[i]))
if abs(res) == 1:
self.assertTrue(ar.isAnalysisServiceHidden(services[i]))
elif abs(res) == 2:
self.assertFalse(ar.isAnalysisServiceHidden(services[i]))
# Restore
self.artemplate.setAnalysisServicesSettings([])
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestHiddenAnalyses))
suite.layer = BIKA_FUNCTIONAL_TESTING
return suite
|
|
#
# This file is part of Gruvi. Gruvi is free software available under the
# terms of the MIT license. See the file "LICENSE" that was provided
# together with this source file for the licensing terms.
#
# Copyright (c) 2012-2017 the Gruvi authors. See the file "AUTHORS" for a
# complete list.
from __future__ import absolute_import, print_function
import os
import six
import socket
import unittest
from unittest import SkipTest
import gruvi
from gruvi.vendor import txdbus
from gruvi.dbus import DbusError, DbusMethodCallError
from gruvi.dbus import DbusProtocol, DbusClient, DbusServer
from gruvi.dbus import parse_dbus_header, TxdbusAuthenticator
from gruvi.transports import TransportError
from support import UnitTest, MockTransport
class TestParseDbusHeader(UnitTest):
def test_simple(self):
m = b'l\1\0\1\0\0\0\0\1\0\0\0\0\0\0\0'
self.assertEqual(parse_dbus_header(m), len(m))
def test_big_endian(self):
m = b'B\1\0\1\0\0\0\0\0\0\0\1\0\0\0\0'
self.assertEqual(parse_dbus_header(m), len(m))
def test_header_array(self):
m = b'l\1\0\1\0\0\0\0\1\0\0\0\10\0\0\0h2345678'
self.assertEqual(parse_dbus_header(m), len(m))
for l in range(16, len(m)):
self.assertEqual(parse_dbus_header(m[:l]), len(m))
def test_padding(self):
m = b'l\1\0\1\0\0\0\0\1\0\0\0\11\0\0\0h234567812345678'
self.assertEqual(parse_dbus_header(m), len(m))
for l in range(16, len(m)):
self.assertEqual(parse_dbus_header(m[:l]), len(m))
def test_body_size(self):
m = b'l\1\0\1\4\0\0\0\1\0\0\0\0\0\0\0b234'
self.assertEqual(parse_dbus_header(m), len(m))
for l in range(16, len(m)):
self.assertEqual(parse_dbus_header(m[:l]), len(m))
def test_illegal_endian(self):
m = b'L\1\0\1\0\0\0\0\1\0\0\0\0\0\0\0'
self.assertRaises(ValueError, parse_dbus_header, m)
m = b'b\1\0\1\0\0\0\0\1\0\0\0\0\0\0\0'
self.assertRaises(ValueError, parse_dbus_header, m)
def test_illegal_type(self):
m = b'l\0\0\1\0\0\0\0\1\0\0\0\0\0\0\0'
self.assertRaises(ValueError, parse_dbus_header, m)
m = b'l\5\0\1\0\0\0\0\1\0\0\0\0\0\0\0'
self.assertRaises(ValueError, parse_dbus_header, m)
def test_illegal_serial(self):
m = b'l\1\0\1\0\0\0\0\0\0\0\0\0\0\0\0'
self.assertRaises(ValueError, parse_dbus_header, m)
class TestDbusProtocol(UnitTest):
def setUp(self):
super(TestDbusProtocol, self).setUp()
self.messages = []
self.protocols = []
def store_messages(self, message, transport, protocol):
self.messages.append(message)
self.protocols.append(protocol)
def store_and_echo_messages(self, message, transport, protocol):
self.messages.append(message)
self.protocols.append(protocol)
response = txdbus.SignalMessage(message.path, message.member, message.interface,
signature=message.signature, body=message.body)
protocol.send_message(response)
def test_auth_missing_creds_byte(self):
# The first thing a client should send to the server is a '\0' byte. If
# not, the server should close the connection.
transport = MockTransport()
protocol = DbusProtocol(server_side=True)
transport.start(protocol)
self.assertFalse(transport._closed.is_set())
protocol.data_received(b'\1')
self.assertIsInstance(protocol._error, DbusError)
self.assertTrue(transport._closed.is_set())
def test_auth_non_ascii(self):
# After the '\0' byte, an authentiction phase happens. The
# authentication protocol is line based and all lines should be ascii.
transport = MockTransport()
protocol = DbusProtocol(server_side=True)
transport.start(protocol)
self.assertFalse(transport._closed.is_set())
protocol.data_received(b'\0\xff\r\n')
self.assertIsInstance(protocol._error, DbusError)
self.assertTrue(transport._closed.is_set())
def test_auth_long_line(self):
# An authentication line should not exceed the maximum line size.
transport = MockTransport()
protocol = DbusProtocol(server_side=True, server_guid='foo')
protocol.max_line_size = 5
transport.start(protocol)
self.assertFalse(transport._closed.is_set())
protocol.data_received(b'\0AUTH ANONYMOUS\r\n')
self.assertIsInstance(protocol._error, DbusError)
self.assertTrue(transport._closed.is_set())
def test_auth_ok(self):
# Test anonymous authenication. Ensure that the server GUID is
# correctly sent back.
transport = MockTransport()
protocol = DbusProtocol(server_side=True, server_guid='foo')
transport.start(protocol)
protocol.data_received(b'\0AUTH ANONYMOUS\r\nBEGIN\r\n')
buf = transport.buffer.getvalue()
self.assertTrue(buf.startswith(b'OK foo'))
auth = protocol._authenticator
self.assertTrue(auth.authenticationSucceeded())
self.assertTrue(auth.getGUID(), 'foo')
self.assertFalse(transport._closed.is_set())
def test_missing_hello(self):
# After authentication, the first message should be a "Hello".
# Otherwise, the server should close the connection.
transport = MockTransport()
protocol = DbusProtocol(self.store_messages, server_side=True, server_guid='foo')
transport.start(protocol)
protocol.data_received(b'\0AUTH ANONYMOUS\r\nBEGIN\r\n')
message = txdbus.MethodCallMessage('/my/path', 'Method')
auth = protocol._authenticator
self.assertTrue(auth.authenticationSucceeded())
protocol.data_received(message.rawMessage)
self.assertIsInstance(protocol._error, DbusError)
self.assertTrue(transport._closed.is_set())
def test_send_message(self):
# After the "Hello" message, it should be possible to send other
# messages.
transport = MockTransport()
protocol = DbusProtocol(self.store_messages, server_side=True, server_guid='foo')
transport.start(protocol)
protocol.data_received(b'\0AUTH ANONYMOUS\r\nBEGIN\r\n')
auth = protocol._authenticator
self.assertTrue(auth.authenticationSucceeded())
message = txdbus.MethodCallMessage('/org/freedesktop/DBus', 'Hello',
interface='org.freedesktop.DBus', destination='org.freedesktop.DBus')
protocol.data_received(message.rawMessage)
gruvi.sleep(0)
self.assertIsNone(protocol._error)
self.assertFalse(transport._closed.is_set())
self.assertTrue(protocol._name_acquired)
self.assertEqual(len(self.messages), 0)
message = txdbus.MethodCallMessage('/my/path', 'Method')
protocol.data_received(message.rawMessage)
gruvi.sleep(0)
self.assertIsNone(protocol._error)
self.assertFalse(transport._closed.is_set())
self.assertEqual(len(self.messages), 1)
self.assertEqual(self.messages[0].path, '/my/path')
self.assertEqual(self.messages[0].member, 'Method')
self.assertEqual(self.protocols, [protocol])
def test_send_message_incremental(self):
# Send a message byte by byte. The protocol should be able process it.
transport = MockTransport()
protocol = DbusProtocol(self.store_messages, server_side=True, server_guid='foo')
transport.start(protocol)
authexchange = b'\0AUTH ANONYMOUS\r\nBEGIN\r\n'
for i in range(len(authexchange)):
protocol.data_received(authexchange[i:i+1])
auth = protocol._authenticator
self.assertTrue(auth.authenticationSucceeded())
message = txdbus.MethodCallMessage('/org/freedesktop/DBus', 'Hello',
interface='org.freedesktop.DBus', destination='org.freedesktop.DBus')
for i in range(len(message.rawMessage)):
protocol.data_received(message.rawMessage[i:i+1])
gruvi.sleep(0)
self.assertIsNone(protocol._error)
self.assertFalse(transport._closed.is_set())
self.assertEqual(len(self.messages), 0)
message = txdbus.MethodCallMessage('/my/path', 'Method')
for i in range(len(message.rawMessage)):
protocol.data_received(message.rawMessage[i:i+1])
gruvi.sleep(0)
self.assertIsNone(protocol._error)
self.assertFalse(transport._closed.is_set())
self.assertEqual(len(self.messages), 1)
self.assertEqual(self.messages[0].path, '/my/path')
self.assertEqual(self.messages[0].member, 'Method')
self.assertEqual(self.protocols, [protocol])
def test_send_message_too_large(self):
# Send a message that exceeds the maximum message size. The connection
# should be closed.
transport = MockTransport()
protocol = DbusProtocol(self.store_messages, server_side=True, server_guid='foo')
transport.start(protocol)
protocol.data_received(b'\0AUTH ANONYMOUS\r\nBEGIN\r\n')
message = txdbus.MethodCallMessage('/org/freedesktop/DBus', 'Hello',
interface='org.freedesktop.DBus', destination='org.freedesktop.DBus')
protocol.data_received(message.rawMessage)
gruvi.sleep(0)
self.assertTrue(protocol._name_acquired)
# Send a signal with a size equal to the high-water mark. This should work.
message = txdbus.SignalMessage('/my/path', 'Signal', 'my.iface',
signature='s', body=['x'*100])
msglen = len(message.rawMessage)
self.assertGreater(msglen, 100)
protocol.max_message_size = msglen
protocol.data_received(message.rawMessage)
gruvi.sleep(0)
self.assertIsNone(protocol._error)
self.assertFalse(transport._closed.is_set())
self.assertEqual(len(self.messages), 1)
# Now send a signal with a size larger than the high-water mark. This should fail.
message = txdbus.SignalMessage('/my/path', 'Signal', 'my.iface',
signature='s', body=['x'*100])
msglen = len(message.rawMessage)
protocol.max_message_size = msglen-1
protocol.data_received(message.rawMessage)
gruvi.sleep(0)
self.assertIsInstance(protocol._error, DbusError)
self.assertTrue(transport._closed.is_set())
self.assertEqual(len(self.messages), 1)
def test_read_write_flow_control(self):
# Send a lot of messages filling up the protocol read buffer.
transport = MockTransport()
protocol = DbusProtocol(self.store_and_echo_messages, server_side=True)
transport.start(protocol)
protocol.data_received(b'\0AUTH ANONYMOUS\r\nBEGIN\r\n')
auth = protocol._authenticator
self.assertTrue(auth.authenticationSucceeded())
message = txdbus.MethodCallMessage('/org/freedesktop/DBus', 'Hello',
interface='org.freedesktop.DBus', destination='org.freedesktop.DBus')
protocol.data_received(message.rawMessage)
gruvi.sleep(0)
self.assertTrue(protocol._name_acquired.is_set())
interrupted = 0
message = txdbus.SignalMessage('/my/path', 'Signal', 'my.iface',
signature='s', body=['x'*100])
msglen = len(message.rawMessage)
protocol.max_queue_size = 10
transport.drain()
transport.set_write_buffer_limits(7*msglen)
for i in range(100):
# Fill up protocol message queue
protocol.data_received(message.rawMessage)
if transport._reading:
continue
interrupted += 1
self.assertEqual(protocol._queue.qsize(), 10)
# Run the dispatcher to fill up the transport write buffer
gruvi.sleep(0)
# Now the write buffer is full and the read buffer still contains
# some entries because it is larger.
self.assertGreater(protocol._queue.qsize(), 0)
self.assertFalse(transport._can_write.is_set())
transport.drain()
# Should be interrupted > 10 times. The write buffer is the limiting factor
# not the read buffer.
self.assertGreater(interrupted, 10)
def echo_app(message, transport, protocol):
# Test application that echos D-Bus arguments
if not isinstance(message, txdbus.MethodCallMessage):
return
if message.member == 'Echo':
reply = txdbus.MethodReturnMessage(message.serial, signature=message.signature,
body=message.body)
elif message.member == 'Error':
reply = txdbus.ErrorMessage('Echo.Error', message.serial, signature=message.signature,
body=message.body)
else:
return
protocol.send_message(reply)
class TestGruviDbus(UnitTest):
def setUp(self):
super(TestGruviDbus, self).setUp()
TxdbusAuthenticator.cookie_dir = self.tempdir
def test_auth_pipe(self):
# Test that authentication works over a Pipe.
server = DbusServer(echo_app)
addr = 'unix:path=' + self.pipename()
server.listen(addr)
client = DbusClient()
client.connect(addr)
cproto = client.protocol
cauth = cproto._authenticator
sproto = list(server.connections)[0][1]
sauth = sproto._authenticator
self.assertTrue(cauth.authenticationSucceeded())
self.assertTrue(sauth.authenticationSucceeded())
self.assertIsInstance(cproto.server_guid, six.text_type)
self.assertTrue(cproto.server_guid.isalnum())
self.assertEqual(cproto.server_guid, cauth.getGUID())
self.assertEqual(cproto.server_guid, sproto.server_guid)
self.assertEqual(sproto.server_guid, sauth.getGUID())
self.assertEqual(cauth.getMechanismName(), sauth.getMechanismName())
if hasattr(socket, 'SO_PEERCRED'):
self.assertEqual(cauth.getMechanismName(), 'EXTERNAL')
elif hasattr(os, 'fork'):
self.assertEqual(cauth.getMechanismName(), 'DBUS_COOKIE_SHA1')
else:
self.assertEqual(cauth.getMechanismName(), 'ANONYMOUS')
client.close()
server.close()
def test_auth_tcp(self):
# Test that authentication works over TCP
server = DbusServer(echo_app)
addr = 'tcp:host=127.0.0.1,port=0'
server.listen(addr)
client = DbusClient()
client.connect(server.addresses[0])
cproto = client.protocol
cauth = cproto._authenticator
sproto = list(server.connections)[0][1]
sauth = sproto._authenticator
self.assertTrue(cauth.authenticationSucceeded())
self.assertTrue(sauth.authenticationSucceeded())
self.assertIsInstance(cproto.server_guid, six.text_type)
self.assertTrue(cproto.server_guid.isalnum())
self.assertEqual(cproto.server_guid, cauth.getGUID())
self.assertEqual(cproto.server_guid, sproto.server_guid)
self.assertEqual(sproto.server_guid, sauth.getGUID())
self.assertEqual(cauth.getMechanismName(), sauth.getMechanismName())
if hasattr(os, 'fork'):
self.assertEqual(cauth.getMechanismName(), 'DBUS_COOKIE_SHA1')
else:
self.assertEqual(cauth.getMechanismName(), 'ANONYMOUS')
client.close()
server.close()
def test_get_unique_name(self):
# Ensure that get_unique_name() works client and server side
server = DbusServer(echo_app)
addr = 'unix:path=' + self.pipename()
server.listen(addr)
client = DbusClient()
client.connect(addr)
unique_name = client.get_unique_name()
self.assertIsInstance(unique_name, six.text_type)
self.assertTrue(unique_name.startswith(':'))
sproto = list(server.connections)[0][1]
self.assertEqual(unique_name, sproto.get_unique_name())
server.close()
client.close()
def test_call_method(self):
# Ensure that calling a method over a Unix socket works.
server = DbusServer(echo_app)
addr = 'unix:path=' + self.pipename()
server.listen(addr)
client = DbusClient()
client.connect(addr)
result = client.call_method('bus.name', '/path', 'my.iface', 'Echo')
self.assertEqual(result, ())
server.close()
client.close()
def test_call_method_tcp(self):
# Ensure that calling a method over TCP works.
server = DbusServer(echo_app)
addr = 'tcp:host=127.0.0.1,port=0'
server.listen(addr)
client = DbusClient()
client.connect(server.addresses[0])
result = client.call_method('bus.name', '/path', 'my.iface', 'Echo')
self.assertEqual(result, ())
server.close()
client.close()
def test_call_method_str_args(self):
# Ensure that calling a method with string arguments works.
server = DbusServer(echo_app)
addr = 'unix:path=' + self.pipename()
server.listen(addr)
client = DbusClient()
client.connect(addr)
result = client.call_method('bus.name', '/path', 'my.iface', 'Echo',
signature='s', args=['foo'])
self.assertEqual(result, ('foo',))
result = client.call_method('bus.name', '/path', 'my.iface', 'Echo',
signature='ss', args=['foo', 'bar'])
self.assertEqual(result, ('foo', 'bar'))
server.close()
client.close()
def test_call_method_int_args(self):
# Ensure that calling a method with integer arguments works.
server = DbusServer(echo_app)
addr = 'unix:path=' + self.pipename()
server.listen(addr)
client = DbusClient()
client.connect(addr)
result = client.call_method('bus.name', '/path', 'my.iface', 'Echo',
signature='i', args=[1])
self.assertEqual(result, (1,))
result = client.call_method('bus.name', '/path', 'my.iface', 'Echo',
signature='ii', args=[1, 2])
self.assertEqual(result, (1, 2))
server.close()
client.close()
def test_call_method_error(self):
# Ensure that a method can return an error and that in this case a
# DbusMethodCallError is raised.
server = DbusServer(echo_app)
addr = 'unix:path=' + self.pipename()
server.listen(addr)
client = DbusClient()
client.connect(addr)
exc = self.assertRaises(DbusMethodCallError, client.call_method,
'bus.name', '/path', 'my.iface', 'Error')
self.assertEqual(exc.error, 'Echo.Error')
self.assertEqual(exc.args, ())
server.close()
client.close()
def test_call_method_error_args(self):
# Call a method that will return an error with arguments. The arguments
# should be available from the exception.
server = DbusServer(echo_app)
addr = 'unix:path=' + self.pipename()
server.listen(addr)
client = DbusClient()
client.connect(addr)
exc = self.assertRaises(DbusMethodCallError, client.call_method,
'bus.name', '/path', 'my.iface', 'Error',
signature='ss', args=('foo', 'bar'))
self.assertEqual(exc.error, 'Echo.Error')
self.assertEqual(exc.args, ('foo', 'bar'))
server.close()
client.close()
def test_send_garbage(self):
# Send random garbage and ensure the connection gets dropped.
server = DbusServer(echo_app)
addr = 'unix:path=' + self.pipename()
server.listen(addr)
client = DbusClient()
client.connect(addr)
exc = None
try:
while True:
chunk = os.urandom(100)
client.transport.write(chunk)
gruvi.sleep(0)
except Exception as e:
exc = e
self.assertIsInstance(exc, TransportError)
server.close()
client.close()
def test_connection_limit(self):
# Establish more connections than the DBUS server is willing to accept.
# The connections should be closed.
server = DbusServer(echo_app)
addr = 'unix:path=' + self.pipename()
server.listen(addr)
server.max_connections = 2
clients = []
exc = None
try:
for i in range(4):
client = DbusClient()
client.connect(addr)
clients.append(client)
except Exception as e:
exc = e
self.assertIsInstance(exc, TransportError)
self.assertEqual(len(server.connections), server.max_connections)
for client in clients:
client.close()
server.close()
class TestNativeDbus(UnitTest):
def setUp(self):
if not os.environ.get('DBUS_SESSION_BUS_ADDRESS'):
raise SkipTest('D-BUS session bus not available')
super(TestNativeDbus, self).setUp()
def test_get_unique_name(self):
# Ensure that get_unique_name() works
client = DbusClient()
client.connect('session')
unique_name = client.get_unique_name()
self.assertIsInstance(unique_name, six.text_type)
self.assertTrue(unique_name.startswith(':'))
client.close()
def test_call_listnames(self):
# Call the ListNames() bus method and ensure the results are a list of
# strings.
client = DbusClient()
client.connect('session')
result = client.call_method('org.freedesktop.DBus', '/org/freedesktop/DBus',
'org.freedesktop.DBus', 'ListNames')
self.assertIsInstance(result, tuple)
self.assertEqual(len(result), 1)
names = result[0]
self.assertIsInstance(names, list)
self.assertGreater(len(names), 0)
for name in names:
self.assertIsInstance(name, six.text_type)
client.close()
if __name__ == '__main__':
os.environ.setdefault('VERBOSE', '1')
unittest.main()
|
|
# Copyright 2022. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from time import time
from simplejson import dumps
from thingsboard_gateway.connectors.converter import log
from thingsboard_gateway.connectors.ftp.ftp_converter import FTPConverter
from thingsboard_gateway.tb_utility.tb_utility import TBUtility
class FTPUplinkConverter(FTPConverter):
def __init__(self, config):
self.__config = config
self.__data_types = {"attributes": "attributes", "timeseries": "telemetry"}
def _get_required_data(self, left_symbol, right_symbol):
dict_result = {"deviceName": None, "deviceType": None, "attributes": [], "telemetry": []}
get_device_name_from_data = False
get_device_type_from_data = False
if left_symbol in self.__config['devicePatternName'] and right_symbol in self.__config['devicePatternName']:
get_device_name_from_data = True
else:
dict_result['deviceName'] = self.__config['devicePatternName']
if left_symbol in self.__config['devicePatternType'] and right_symbol in self.__config['devicePatternType']:
get_device_type_from_data = True
else:
dict_result['deviceType'] = self.__config['devicePatternType']
return dict_result, get_device_name_from_data, get_device_type_from_data
def _convert_table_view_data(self, config, data):
dict_result, get_device_name_from_data, get_device_type_from_data = self._get_required_data('${', '}')
try:
for data_type in self.__data_types:
for information in self.__config[data_type]:
arr = data.split(self.__config['delimiter'])
key_index = information['key']
val_index = information['value']
if '${' in information['key'] and '}' in information['key']:
key_index = config['headers'].index(re.sub(r'[^\w]', '', information['key']))
if '${' in information['value'] and '}' in information['value']:
val_index = config['headers'].index(re.sub(r'[^\w]', '', information['value']))
dict_result[self.__data_types[data_type]].append({
arr[key_index] if isinstance(key_index, int) else key_index:
arr[val_index] if isinstance(val_index, int) else val_index
})
if get_device_name_from_data:
index = config['headers'].index(re.sub(r'[^\w]', '', self.__config['devicePatternName']))
dict_result['deviceName'] = arr[index]
if get_device_type_from_data:
index = config['headers'].index(re.sub(r'[^\w]', '', self.__config['devicePatternType']))
dict_result['deviceType'] = arr[index]
except Exception as e:
log.error('Error in converter, for config: \n%s\n and message: \n%s\n', dumps(self.__config), data)
log.exception(e)
return dict_result
@staticmethod
def _get_key_or_value(key, arr):
if '[' in key and ']' in key:
split_val_arr = key[1:-1].split(':')
first_val_index = split_val_arr[0] or 0
last_val_index = split_val_arr[1] or len(arr)
return arr[int(first_val_index):int(last_val_index)][0]
else:
return key
def _convert_slices_view_data(self, data):
dict_result, get_device_name_from_data, get_device_type_from_data = self._get_required_data('[', ']')
try:
for data_type in self.__data_types:
for information in self.__config[data_type]:
arr = data.split(self.__config['delimiter'])
val = self._get_key_or_value(information['value'], arr)
key = self._get_key_or_value(information['key'], arr)
dict_result[self.__data_types[data_type]].append({key: val})
if get_device_name_from_data:
if self.__config['devicePatternName'] == information['value']:
dict_result['deviceName'] = val
if get_device_type_from_data:
if self.__config['devicePatternType'] == information['value']:
dict_result['deviceType'] = val
except Exception as e:
log.error('Error in converter, for config: \n%s\n and message: \n%s\n', dumps(self.__config), data)
log.exception(e)
return dict_result
def _convert_json_file(self, data):
dict_result = {"deviceName": None, "deviceType": None, "attributes": [], "telemetry": []}
try:
if self.__config.get("devicePatternName") is not None:
device_name_tags = TBUtility.get_values(self.__config.get("devicePatternName"), data,
get_tag=True)
device_name_values = TBUtility.get_values(self.__config.get("devicePatternName"), data,
expression_instead_none=True)
dict_result["deviceName"] = self.__config.get("devicePatternName")
for (device_name_tag, device_name_value) in zip(device_name_tags, device_name_values):
is_valid_key = "${" in self.__config.get("devicePatternName") and "}" in \
self.__config.get("devicePatternName")
dict_result['deviceName'] = dict_result['deviceName'].replace('${' + str(device_name_tag) + '}',
str(device_name_value)) \
if is_valid_key else device_name_tag
else:
log.error("The expression for looking \"deviceName\" not found in config %s", dumps(self.__config))
if self.__config.get("devicePatternType") is not None:
device_type_tags = TBUtility.get_values(self.__config.get("devicePatternType"), data,
get_tag=True)
device_type_values = TBUtility.get_values(self.__config.get("devicePatternType"), data,
expression_instead_none=True)
dict_result["deviceType"] = self.__config.get("devicePatternType")
for (device_type_tag, device_type_value) in zip(device_type_tags, device_type_values):
is_valid_key = "${" in self.__config.get("devicePatternType") and "}" in \
self.__config.get("devicePatternType")
dict_result["deviceType"] = dict_result["deviceType"].replace('${' + str(device_type_tag) + '}',
str(device_type_value)) \
if is_valid_key else device_type_tag
except Exception as e:
log.error('Error in converter, for config: \n%s\n and message: \n%s\n', dumps(self.__config), data)
log.exception(e)
try:
for datatype in self.__data_types:
dict_result[self.__data_types[datatype]] = []
for datatype_config in self.__config.get(datatype, []):
values = TBUtility.get_values(datatype_config["value"], data, datatype_config["type"],
expression_instead_none=True)
values_tags = TBUtility.get_values(datatype_config["value"], data, datatype_config["type"],
get_tag=True)
keys = TBUtility.get_values(datatype_config["key"], data, datatype_config["type"],
expression_instead_none=True)
keys_tags = TBUtility.get_values(datatype_config["key"], data, get_tag=True)
full_key = datatype_config["key"]
for (key, key_tag) in zip(keys, keys_tags):
is_valid_key = "${" in datatype_config["key"] and "}" in \
datatype_config["key"]
full_key = full_key.replace('${' + str(key_tag) + '}',
str(key)) if is_valid_key else key_tag
full_value = datatype_config["value"]
for (value, value_tag) in zip(values, values_tags):
is_valid_value = "${" in datatype_config["value"] and "}" in \
datatype_config["value"]
full_value = full_value.replace('${' + str(value_tag) + '}',
str(value)) if is_valid_value else str(value)
if datatype == 'timeseries' and (
data.get("ts") is not None or data.get("timestamp") is not None):
dict_result[self.__data_types[datatype]].append(
{"ts": data.get('ts', data.get('timestamp', int(time()))),
'values': {full_key: full_value}})
else:
dict_result[self.__data_types[datatype]].append({full_key: full_value})
except Exception as e:
log.error('Error in converter, for config: \n%s\n and message: \n%s\n', dumps(self.__config), str(data))
log.exception(e)
return dict_result
def convert(self, config, data):
if config['file_ext'] == 'csv' or (
config['file_ext'] == 'txt' and self.__config['txt_file_data_view'] == 'TABLE'):
return self._convert_table_view_data(config, data)
elif config['file_ext'] == 'txt' and self.__config['txt_file_data_view'] == 'SLICED':
return self._convert_slices_view_data(data)
elif config['file_ext'] == 'json':
return self._convert_json_file(data)
else:
raise Exception('Incorrect file extension or file data view mode')
|
|
#!/usr/bin/env python
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import (
assertGreaterEqual,
expect_warnings,
get_params,
gettestcases,
expect_info_dict,
try_rm,
report_warning,
)
import hashlib
import io
import json
import socket
import youtube_dl.YoutubeDL
from youtube_dl.compat import (
compat_http_client,
compat_urllib_error,
compat_HTTPError,
)
from youtube_dl.utils import (
DownloadError,
ExtractorError,
format_bytes,
UnavailableVideoError,
)
from youtube_dl.extractor import get_info_extractor
RETRIES = 3
class YoutubeDL(youtube_dl.YoutubeDL):
def __init__(self, *args, **kwargs):
self.to_stderr = self.to_screen
self.processed_info_dicts = []
super(YoutubeDL, self).__init__(*args, **kwargs)
def report_warning(self, message):
# Don't accept warnings during tests
raise ExtractorError(message)
def process_info(self, info_dict):
self.processed_info_dicts.append(info_dict)
return super(YoutubeDL, self).process_info(info_dict)
def _file_md5(fn):
with open(fn, 'rb') as f:
return hashlib.md5(f.read()).hexdigest()
defs = gettestcases()
class TestDownload(unittest.TestCase):
# Parallel testing in nosetests. See
# http://nose.readthedocs.org/en/latest/doc_tests/test_multiprocess/multiprocess.html
_multiprocess_shared_ = True
maxDiff = None
def __str__(self):
"""Identify each test with the `add_ie` attribute, if available."""
def strclass(cls):
"""From 2.7's unittest; 2.6 had _strclass so we can't import it."""
return '%s.%s' % (cls.__module__, cls.__name__)
add_ie = getattr(self, self._testMethodName).add_ie
return '%s (%s)%s:' % (self._testMethodName,
strclass(self.__class__),
' [%s]' % add_ie if add_ie else '')
def setUp(self):
self.defs = defs
# Dynamically generate tests
def generator(test_case, tname):
def test_template(self):
ie = youtube_dl.extractor.get_info_extractor(test_case['name'])
other_ies = [get_info_extractor(ie_key) for ie_key in test_case.get('add_ie', [])]
is_playlist = any(k.startswith('playlist') for k in test_case)
test_cases = test_case.get(
'playlist', [] if is_playlist else [test_case])
def print_skipping(reason):
print('Skipping %s: %s' % (test_case['name'], reason))
if not ie.working():
print_skipping('IE marked as not _WORKING')
return
for tc in test_cases:
info_dict = tc.get('info_dict', {})
if not (info_dict.get('id') and info_dict.get('ext')):
raise Exception('Test definition incorrect. The output file cannot be known. Are both \'id\' and \'ext\' keys present?')
if 'skip' in test_case:
print_skipping(test_case['skip'])
return
for other_ie in other_ies:
if not other_ie.working():
print_skipping('test depends on %sIE, marked as not WORKING' % other_ie.ie_key())
return
params = get_params(test_case.get('params', {}))
params['outtmpl'] = tname + '_' + params['outtmpl']
if is_playlist and 'playlist' not in test_case:
params.setdefault('extract_flat', 'in_playlist')
params.setdefault('skip_download', True)
ydl = YoutubeDL(params, auto_init=False)
ydl.add_default_info_extractors()
finished_hook_called = set()
def _hook(status):
if status['status'] == 'finished':
finished_hook_called.add(status['filename'])
ydl.add_progress_hook(_hook)
expect_warnings(ydl, test_case.get('expected_warnings', []))
def get_tc_filename(tc):
return ydl.prepare_filename(tc.get('info_dict', {}))
res_dict = None
def try_rm_tcs_files(tcs=None):
if tcs is None:
tcs = test_cases
for tc in tcs:
tc_filename = get_tc_filename(tc)
try_rm(tc_filename)
try_rm(tc_filename + '.part')
try_rm(os.path.splitext(tc_filename)[0] + '.info.json')
try_rm_tcs_files()
try:
try_num = 1
while True:
try:
# We're not using .download here since that is just a shim
# for outside error handling, and returns the exit code
# instead of the result dict.
res_dict = ydl.extract_info(
test_case['url'],
force_generic_extractor=params.get('force_generic_extractor', False))
except (DownloadError, ExtractorError) as err:
# Check if the exception is not a network related one
if not err.exc_info[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError, compat_http_client.BadStatusLine) or (err.exc_info[0] == compat_HTTPError and err.exc_info[1].code == 503):
raise
if try_num == RETRIES:
report_warning('%s failed due to network errors, skipping...' % tname)
return
print('Retrying: {0} failed tries\n\n##########\n\n'.format(try_num))
try_num += 1
else:
break
if is_playlist:
self.assertTrue(res_dict['_type'] in ['playlist', 'multi_video'])
self.assertTrue('entries' in res_dict)
expect_info_dict(self, res_dict, test_case.get('info_dict', {}))
if 'playlist_mincount' in test_case:
assertGreaterEqual(
self,
len(res_dict['entries']),
test_case['playlist_mincount'],
'Expected at least %d in playlist %s, but got only %d' % (
test_case['playlist_mincount'], test_case['url'],
len(res_dict['entries'])))
if 'playlist_count' in test_case:
self.assertEqual(
len(res_dict['entries']),
test_case['playlist_count'],
'Expected %d entries in playlist %s, but got %d.' % (
test_case['playlist_count'],
test_case['url'],
len(res_dict['entries']),
))
if 'playlist_duration_sum' in test_case:
got_duration = sum(e['duration'] for e in res_dict['entries'])
self.assertEqual(
test_case['playlist_duration_sum'], got_duration)
# Generalize both playlists and single videos to unified format for
# simplicity
if 'entries' not in res_dict:
res_dict['entries'] = [res_dict]
for tc_num, tc in enumerate(test_cases):
tc_res_dict = res_dict['entries'][tc_num]
# First, check test cases' data against extracted data alone
expect_info_dict(self, tc_res_dict, tc.get('info_dict', {}))
# Now, check downloaded file consistency
tc_filename = get_tc_filename(tc)
if not test_case.get('params', {}).get('skip_download', False):
self.assertTrue(os.path.exists(tc_filename), msg='Missing file ' + tc_filename)
self.assertTrue(tc_filename in finished_hook_called)
expected_minsize = tc.get('file_minsize', 10000)
if expected_minsize is not None:
if params.get('test'):
expected_minsize = max(expected_minsize, 10000)
got_fsize = os.path.getsize(tc_filename)
assertGreaterEqual(
self, got_fsize, expected_minsize,
'Expected %s to be at least %s, but it\'s only %s ' %
(tc_filename, format_bytes(expected_minsize),
format_bytes(got_fsize)))
if 'md5' in tc:
md5_for_file = _file_md5(tc_filename)
self.assertEqual(tc['md5'], md5_for_file)
# Finally, check test cases' data again but this time against
# extracted data from info JSON file written during processing
info_json_fn = os.path.splitext(tc_filename)[0] + '.info.json'
self.assertTrue(
os.path.exists(info_json_fn),
'Missing info file %s' % info_json_fn)
with io.open(info_json_fn, encoding='utf-8') as infof:
info_dict = json.load(infof)
expect_info_dict(self, info_dict, tc.get('info_dict', {}))
finally:
try_rm_tcs_files()
if is_playlist and res_dict is not None and res_dict.get('entries'):
# Remove all other files that may have been extracted if the
# extractor returns full results even with extract_flat
res_tcs = [{'info_dict': e} for e in res_dict['entries']]
try_rm_tcs_files(res_tcs)
return test_template
# And add them to TestDownload
for n, test_case in enumerate(defs):
tname = 'test_' + str(test_case['name'])
i = 1
while hasattr(TestDownload, tname):
tname = 'test_%s_%d' % (test_case['name'], i)
i += 1
test_method = generator(test_case, tname)
test_method.__name__ = str(tname)
ie_list = test_case.get('add_ie')
test_method.add_ie = ie_list and ','.join(ie_list)
setattr(TestDownload, test_method.__name__, test_method)
del test_method
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Client side of the conductor RPC API."""
from oslo.config import cfg
from nova.objects import base as objects_base
from nova.openstack.common import jsonutils
from nova.openstack.common.rpc import common as rpc_common
import nova.openstack.common.rpc.proxy
CONF = cfg.CONF
rpcapi_cap_opt = cfg.StrOpt('conductor',
help='Set a version cap for messages sent to conductor services')
CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels')
class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
"""Client side of the conductor RPC API
API version history:
1.0 - Initial version.
1.1 - Added migration_update
1.2 - Added instance_get_by_uuid and instance_get_all_by_host
1.3 - Added aggregate_host_add and aggregate_host_delete
1.4 - Added migration_get
1.5 - Added bw_usage_update
1.6 - Added get_backdoor_port()
1.7 - Added aggregate_get_by_host, aggregate_metadata_add,
and aggregate_metadata_delete
1.8 - Added security_group_get_by_instance and
security_group_rule_get_by_security_group
1.9 - Added provider_fw_rule_get_all
1.10 - Added agent_build_get_by_triple
1.11 - Added aggregate_get
1.12 - Added block_device_mapping_update_or_create
1.13 - Added block_device_mapping_get_all_by_instance
1.14 - Added block_device_mapping_destroy
1.15 - Added instance_get_all_by_filters and
instance_get_all_hung_in_rebooting and
instance_get_active_by_window
Deprecated instance_get_all_by_host
1.16 - Added instance_destroy
1.17 - Added instance_info_cache_delete
1.18 - Added instance_type_get
1.19 - Added vol_get_usage_by_time and vol_usage_update
1.20 - Added migration_get_unconfirmed_by_dest_compute
1.21 - Added service_get_all_by
1.22 - Added ping
1.23 - Added instance_get_all
Un-Deprecate instance_get_all_by_host
1.24 - Added instance_get
1.25 - Added action_event_start and action_event_finish
1.26 - Added instance_info_cache_update
1.27 - Added service_create
1.28 - Added binary arg to service_get_all_by
1.29 - Added service_destroy
1.30 - Added migration_create
1.31 - Added migration_get_in_progress_by_host_and_node
1.32 - Added optional node to instance_get_all_by_host
1.33 - Added compute_node_create and compute_node_update
1.34 - Added service_update
1.35 - Added instance_get_active_by_window_joined
1.36 - Added instance_fault_create
1.37 - Added task_log_get, task_log_begin_task, task_log_end_task
1.38 - Added service name to instance_update
1.39 - Added notify_usage_exists
1.40 - Added security_groups_trigger_handler and
security_groups_trigger_members_refresh
Remove instance_get_active_by_window
1.41 - Added fixed_ip_get_by_instance, network_get,
instance_floating_address_get_all, quota_commit,
quota_rollback
1.42 - Added get_ec2_ids, aggregate_metadata_get_by_host
1.43 - Added compute_stop
1.44 - Added compute_node_delete
1.45 - Added project_id to quota_commit and quota_rollback
1.46 - Added compute_confirm_resize
1.47 - Added columns_to_join to instance_get_all_by_host and
instance_get_all_by_filters
1.48 - Added compute_unrescue
... Grizzly supports message version 1.48. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 1.48.
1.49 - Added columns_to_join to instance_get_by_uuid
1.50 - Added object_action() and object_class_action()
1.51 - Added the 'legacy' argument to
block_device_mapping_get_all_by_instance
1.52 - Pass instance objects for compute_confirm_resize
1.53 - Added compute_reboot
1.54 - Added 'update_cells' argument to bw_usage_update
1.55 - Pass instance objects for compute_stop
"""
BASE_RPC_API_VERSION = '1.0'
VERSION_ALIASES = {
'grizzly': '1.48',
}
def __init__(self):
version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.conductor,
CONF.upgrade_levels.conductor)
super(ConductorAPI, self).__init__(
topic=CONF.conductor.topic,
default_version=self.BASE_RPC_API_VERSION,
serializer=objects_base.NovaObjectSerializer(),
version_cap=version_cap)
def instance_update(self, context, instance_uuid, updates,
service=None):
updates_p = jsonutils.to_primitive(updates)
return self.call(context,
self.make_msg('instance_update',
instance_uuid=instance_uuid,
updates=updates_p,
service=service),
version='1.38')
def instance_get(self, context, instance_id):
msg = self.make_msg('instance_get',
instance_id=instance_id)
return self.call(context, msg, version='1.24')
def instance_get_by_uuid(self, context, instance_uuid,
columns_to_join=None):
if self.can_send_version('1.49'):
version = '1.49'
msg = self.make_msg('instance_get_by_uuid',
instance_uuid=instance_uuid,
columns_to_join=columns_to_join)
else:
version = '1.2'
msg = self.make_msg('instance_get_by_uuid',
instance_uuid=instance_uuid)
return self.call(context, msg, version=version)
def migration_get(self, context, migration_id):
msg = self.make_msg('migration_get', migration_id=migration_id)
return self.call(context, msg, version='1.4')
def migration_get_unconfirmed_by_dest_compute(self, context,
confirm_window,
dest_compute):
msg = self.make_msg('migration_get_unconfirmed_by_dest_compute',
confirm_window=confirm_window,
dest_compute=dest_compute)
return self.call(context, msg, version='1.20')
def migration_get_in_progress_by_host_and_node(self, context,
host, node):
msg = self.make_msg('migration_get_in_progress_by_host_and_node',
host=host, node=node)
return self.call(context, msg, version='1.31')
def migration_create(self, context, instance, values):
instance_p = jsonutils.to_primitive(instance)
msg = self.make_msg('migration_create', instance=instance_p,
values=values)
return self.call(context, msg, version='1.30')
def migration_update(self, context, migration, status):
migration_p = jsonutils.to_primitive(migration)
msg = self.make_msg('migration_update', migration=migration_p,
status=status)
return self.call(context, msg, version='1.1')
def aggregate_host_add(self, context, aggregate, host):
aggregate_p = jsonutils.to_primitive(aggregate)
msg = self.make_msg('aggregate_host_add', aggregate=aggregate_p,
host=host)
return self.call(context, msg, version='1.3')
def aggregate_host_delete(self, context, aggregate, host):
aggregate_p = jsonutils.to_primitive(aggregate)
msg = self.make_msg('aggregate_host_delete', aggregate=aggregate_p,
host=host)
return self.call(context, msg, version='1.3')
def aggregate_get(self, context, aggregate_id):
msg = self.make_msg('aggregate_get', aggregate_id=aggregate_id)
return self.call(context, msg, version='1.11')
def aggregate_get_by_host(self, context, host, key=None):
msg = self.make_msg('aggregate_get_by_host', host=host, key=key)
return self.call(context, msg, version='1.7')
def aggregate_metadata_add(self, context, aggregate, metadata,
set_delete=False):
aggregate_p = jsonutils.to_primitive(aggregate)
msg = self.make_msg('aggregate_metadata_add', aggregate=aggregate_p,
metadata=metadata,
set_delete=set_delete)
return self.call(context, msg, version='1.7')
def aggregate_metadata_delete(self, context, aggregate, key):
aggregate_p = jsonutils.to_primitive(aggregate)
msg = self.make_msg('aggregate_metadata_delete', aggregate=aggregate_p,
key=key)
return self.call(context, msg, version='1.7')
def aggregate_metadata_get_by_host(self, context, host, key):
msg = self.make_msg('aggregate_metadata_get_by_host', host=host,
key=key)
return self.call(context, msg, version='1.42')
def bw_usage_update(self, context, uuid, mac, start_period,
bw_in=None, bw_out=None,
last_ctr_in=None, last_ctr_out=None,
last_refreshed=None, update_cells=True):
msg_kwargs = dict(uuid=uuid, mac=mac, start_period=start_period,
bw_in=bw_in, bw_out=bw_out, last_ctr_in=last_ctr_in,
last_ctr_out=last_ctr_out,
last_refreshed=last_refreshed)
if self.can_send_version('1.54'):
version = '1.54'
msg_kwargs['update_cells'] = update_cells
else:
version = '1.5'
msg = self.make_msg('bw_usage_update', **msg_kwargs)
return self.call(context, msg, version=version)
def security_group_get_by_instance(self, context, instance):
instance_p = jsonutils.to_primitive(instance)
msg = self.make_msg('security_group_get_by_instance',
instance=instance_p)
return self.call(context, msg, version='1.8')
def security_group_rule_get_by_security_group(self, context, secgroup):
secgroup_p = jsonutils.to_primitive(secgroup)
msg = self.make_msg('security_group_rule_get_by_security_group',
secgroup=secgroup_p)
return self.call(context, msg, version='1.8')
def provider_fw_rule_get_all(self, context):
msg = self.make_msg('provider_fw_rule_get_all')
return self.call(context, msg, version='1.9')
def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
msg = self.make_msg('agent_build_get_by_triple',
hypervisor=hypervisor, os=os,
architecture=architecture)
return self.call(context, msg, version='1.10')
def block_device_mapping_update_or_create(self, context, values,
create=None):
msg = self.make_msg('block_device_mapping_update_or_create',
values=values, create=create)
return self.call(context, msg, version='1.12')
def block_device_mapping_get_all_by_instance(self, context, instance,
legacy=True):
instance_p = jsonutils.to_primitive(instance)
if self.can_send_version('1.51'):
version = '1.51'
msg = self.make_msg('block_device_mapping_get_all_by_instance',
instance=instance_p, legacy=legacy)
elif legacy:
# If the remote side is >= 1.51, it defaults to legacy=True.
# If it's older, it only understands the legacy format.
version = '1.13'
msg = self.make_msg('block_device_mapping_get_all_by_instance',
instance=instance_p)
else:
# If we require new style data, but can't ask for it, then we must
# fail here.
raise rpc_common.RpcVersionCapError(version_cap=self.version_cap)
return self.call(context, msg, version=version)
def block_device_mapping_destroy(self, context, bdms=None,
instance=None, volume_id=None,
device_name=None):
bdms_p = jsonutils.to_primitive(bdms)
instance_p = jsonutils.to_primitive(instance)
msg = self.make_msg('block_device_mapping_destroy',
bdms=bdms_p,
instance=instance_p, volume_id=volume_id,
device_name=device_name)
return self.call(context, msg, version='1.14')
def instance_get_all_by_filters(self, context, filters, sort_key,
sort_dir, columns_to_join=None):
msg = self.make_msg('instance_get_all_by_filters',
filters=filters, sort_key=sort_key,
sort_dir=sort_dir, columns_to_join=columns_to_join)
return self.call(context, msg, version='1.47')
def instance_get_active_by_window_joined(self, context, begin, end=None,
project_id=None, host=None):
msg = self.make_msg('instance_get_active_by_window_joined',
begin=begin, end=end, project_id=project_id,
host=host)
return self.call(context, msg, version='1.35')
def instance_destroy(self, context, instance):
instance_p = jsonutils.to_primitive(instance)
msg = self.make_msg('instance_destroy', instance=instance_p)
self.call(context, msg, version='1.16')
def instance_info_cache_delete(self, context, instance):
instance_p = jsonutils.to_primitive(instance)
msg = self.make_msg('instance_info_cache_delete', instance=instance_p)
self.call(context, msg, version='1.17')
def instance_type_get(self, context, instance_type_id):
msg = self.make_msg('instance_type_get',
instance_type_id=instance_type_id)
return self.call(context, msg, version='1.18')
def vol_get_usage_by_time(self, context, start_time):
start_time_p = jsonutils.to_primitive(start_time)
msg = self.make_msg('vol_get_usage_by_time', start_time=start_time_p)
return self.call(context, msg, version='1.19')
def vol_usage_update(self, context, vol_id, rd_req, rd_bytes, wr_req,
wr_bytes, instance, last_refreshed=None,
update_totals=False):
instance_p = jsonutils.to_primitive(instance)
msg = self.make_msg('vol_usage_update', vol_id=vol_id, rd_req=rd_req,
rd_bytes=rd_bytes, wr_req=wr_req,
wr_bytes=wr_bytes,
instance=instance_p, last_refreshed=last_refreshed,
update_totals=update_totals)
return self.call(context, msg, version='1.19')
def service_get_all_by(self, context, topic=None, host=None, binary=None):
msg = self.make_msg('service_get_all_by', topic=topic, host=host,
binary=binary)
return self.call(context, msg, version='1.28')
def instance_get_all_by_host(self, context, host, node=None,
columns_to_join=None):
msg = self.make_msg('instance_get_all_by_host', host=host, node=node,
columns_to_join=columns_to_join)
return self.call(context, msg, version='1.47')
def instance_fault_create(self, context, values):
msg = self.make_msg('instance_fault_create', values=values)
return self.call(context, msg, version='1.36')
def action_event_start(self, context, values):
values_p = jsonutils.to_primitive(values)
msg = self.make_msg('action_event_start', values=values_p)
return self.call(context, msg, version='1.25')
def action_event_finish(self, context, values):
values_p = jsonutils.to_primitive(values)
msg = self.make_msg('action_event_finish', values=values_p)
return self.call(context, msg, version='1.25')
def instance_info_cache_update(self, context, instance, values):
instance_p = jsonutils.to_primitive(instance)
msg = self.make_msg('instance_info_cache_update',
instance=instance_p,
values=values)
return self.call(context, msg, version='1.26')
def service_create(self, context, values):
msg = self.make_msg('service_create', values=values)
return self.call(context, msg, version='1.27')
def service_destroy(self, context, service_id):
msg = self.make_msg('service_destroy', service_id=service_id)
return self.call(context, msg, version='1.29')
def compute_node_create(self, context, values):
msg = self.make_msg('compute_node_create', values=values)
return self.call(context, msg, version='1.33')
def compute_node_update(self, context, node, values, prune_stats=False):
node_p = jsonutils.to_primitive(node)
msg = self.make_msg('compute_node_update', node=node_p, values=values,
prune_stats=prune_stats)
return self.call(context, msg, version='1.33')
def compute_node_delete(self, context, node):
node_p = jsonutils.to_primitive(node)
msg = self.make_msg('compute_node_delete', node=node_p)
return self.call(context, msg, version='1.44')
def service_update(self, context, service, values):
service_p = jsonutils.to_primitive(service)
msg = self.make_msg('service_update', service=service_p, values=values)
return self.call(context, msg, version='1.34')
def task_log_get(self, context, task_name, begin, end, host, state=None):
msg = self.make_msg('task_log_get', task_name=task_name,
begin=begin, end=end, host=host, state=state)
return self.call(context, msg, version='1.37')
def task_log_begin_task(self, context, task_name, begin, end, host,
task_items=None, message=None):
msg = self.make_msg('task_log_begin_task', task_name=task_name,
begin=begin, end=end, host=host,
task_items=task_items, message=message)
return self.call(context, msg, version='1.37')
def task_log_end_task(self, context, task_name, begin, end, host, errors,
message=None):
msg = self.make_msg('task_log_end_task', task_name=task_name,
begin=begin, end=end, host=host, errors=errors,
message=message)
return self.call(context, msg, version='1.37')
def notify_usage_exists(self, context, instance, current_period=False,
ignore_missing_network_data=True,
system_metadata=None, extra_usage_info=None):
instance_p = jsonutils.to_primitive(instance)
system_metadata_p = jsonutils.to_primitive(system_metadata)
extra_usage_info_p = jsonutils.to_primitive(extra_usage_info)
msg = self.make_msg('notify_usage_exists', instance=instance_p,
current_period=current_period,
ignore_missing_network_data=ignore_missing_network_data,
system_metadata=system_metadata_p,
extra_usage_info=extra_usage_info_p)
return self.call(context, msg, version='1.39')
def security_groups_trigger_handler(self, context, event, args):
args_p = jsonutils.to_primitive(args)
msg = self.make_msg('security_groups_trigger_handler', event=event,
args=args_p)
return self.call(context, msg, version='1.40')
def security_groups_trigger_members_refresh(self, context, group_ids):
msg = self.make_msg('security_groups_trigger_members_refresh',
group_ids=group_ids)
return self.call(context, msg, version='1.40')
def network_migrate_instance_start(self, context, instance, migration):
instance_p = jsonutils.to_primitive(instance)
migration_p = jsonutils.to_primitive(migration)
msg = self.make_msg('network_migrate_instance_start',
instance=instance_p, migration=migration_p)
return self.call(context, msg, version='1.41')
def network_migrate_instance_finish(self, context, instance, migration):
instance_p = jsonutils.to_primitive(instance)
migration_p = jsonutils.to_primitive(migration)
msg = self.make_msg('network_migrate_instance_finish',
instance=instance_p, migration=migration_p)
return self.call(context, msg, version='1.41')
def quota_commit(self, context, reservations, project_id=None,
user_id=None):
reservations_p = jsonutils.to_primitive(reservations)
msg = self.make_msg('quota_commit', reservations=reservations_p,
project_id=project_id, user_id=user_id)
return self.call(context, msg, version='1.45')
def quota_rollback(self, context, reservations, project_id=None,
user_id=None):
reservations_p = jsonutils.to_primitive(reservations)
msg = self.make_msg('quota_rollback', reservations=reservations_p,
project_id=project_id, user_id=user_id)
return self.call(context, msg, version='1.45')
def get_ec2_ids(self, context, instance):
instance_p = jsonutils.to_primitive(instance)
msg = self.make_msg('get_ec2_ids', instance=instance_p)
return self.call(context, msg, version='1.42')
def compute_confirm_resize(self, context, instance, migration_ref):
migration_p = jsonutils.to_primitive(migration_ref)
if not self.can_send_version('1.52'):
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
version = '1.46'
else:
version = '1.52'
msg = self.make_msg('compute_confirm_resize', instance=instance,
migration_ref=migration_p)
return self.call(context, msg, version=version)
def compute_unrescue(self, context, instance):
instance_p = jsonutils.to_primitive(instance)
msg = self.make_msg('compute_unrescue', instance=instance_p)
return self.call(context, msg, version='1.48')
def object_class_action(self, context, objname, objmethod, objver,
args, kwargs):
msg = self.make_msg('object_class_action', objname=objname,
objmethod=objmethod, objver=objver,
args=args, kwargs=kwargs)
return self.call(context, msg, version='1.50')
def object_action(self, context, objinst, objmethod, args, kwargs):
msg = self.make_msg('object_action', objinst=objinst,
objmethod=objmethod, args=args, kwargs=kwargs)
return self.call(context, msg, version='1.50')
class ComputeTaskAPI(nova.openstack.common.rpc.proxy.RpcProxy):
"""Client side of the conductor 'compute' namespaced RPC API
API version history:
1.0 - Initial version (empty).
1.1 - Added unified migrate_server call.
1.2 - Added build_instances
1.3 - Added unshelve_instance
1.4 - Added reservations to migrate_server.
"""
BASE_RPC_API_VERSION = '1.0'
RPC_API_NAMESPACE = 'compute_task'
def __init__(self):
super(ComputeTaskAPI, self).__init__(
topic=CONF.conductor.topic,
default_version=self.BASE_RPC_API_VERSION,
serializer=objects_base.NovaObjectSerializer())
def migrate_server(self, context, instance, scheduler_hint, live, rebuild,
flavor, block_migration, disk_over_commit,
reservations=None):
instance_p = jsonutils.to_primitive(instance)
flavor_p = jsonutils.to_primitive(flavor)
msg = self.make_msg('migrate_server', instance=instance_p,
scheduler_hint=scheduler_hint, live=live, rebuild=rebuild,
flavor=flavor_p, block_migration=block_migration,
disk_over_commit=disk_over_commit, reservations=reservations)
return self.call(context, msg, version='1.4')
def build_instances(self, context, instances, image, filter_properties,
admin_password, injected_files, requested_networks,
security_groups, block_device_mapping):
instances_p = [jsonutils.to_primitive(inst) for inst in instances]
image_p = jsonutils.to_primitive(image)
msg = self.make_msg('build_instances', instances=instances_p,
image=image_p, filter_properties=filter_properties,
admin_password=admin_password, injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping)
self.cast(context, msg, version='1.2')
def unshelve_instance(self, context, instance):
msg = self.make_msg('unshelve_instance', instance=instance)
self.cast(context, msg, version='1.3')
|
|
#!/usr/bin/env python3
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import functools
import logging
import multiprocessing
import optparse
import os
import re
import shutil
import sys
import time
import zipfile
import javac_output_processor
from util import build_utils
from util import md5_check
from util import jar_info_utils
from util import server_utils
_JAVAC_EXTRACTOR = os.path.join(build_utils.DIR_SOURCE_ROOT, 'third_party',
'android_prebuilts', 'build_tools', 'common',
'framework', 'javac_extractor.jar')
# Add a check here to cause the suggested fix to be applied while compiling.
# Use this when trying to enable more checks.
ERRORPRONE_CHECKS_TO_APPLY = []
# Full list of checks: https://errorprone.info/bugpatterns
ERRORPRONE_WARNINGS_TO_DISABLE = [
# Temporarily disabling to roll doubledown.
# TODO(wnwen): Re-enable this upstream.
'InlineMeInliner',
# The following are super useful, but existing issues need to be fixed first
# before they can start failing the build on new errors.
'InvalidParam',
'InvalidLink',
'InvalidInlineTag',
'EmptyBlockTag',
'PublicConstructorForAbstractClass',
'InvalidBlockTag',
'StaticAssignmentInConstructor',
'MutablePublicArray',
'UnescapedEntity',
'NonCanonicalType',
'AlmostJavadoc',
'ReturnValueIgnored',
# The following are added for errorprone update: https://crbug.com/1216032
'InlineMeSuggester',
'DoNotClaimAnnotations',
'JavaUtilDate',
'IdentityHashMapUsage',
'UnnecessaryMethodReference',
'LongFloatConversion',
'CharacterGetNumericValue',
'ErroneousThreadPoolConstructorChecker',
'StaticMockMember',
'MissingSuperCall',
'ToStringReturnsNull',
# If possible, this should be automatically fixed if turned on:
'MalformedInlineTag',
# TODO(crbug.com/834807): Follow steps in bug
'DoubleBraceInitialization',
# TODO(crbug.com/834790): Follow steps in bug.
'CatchAndPrintStackTrace',
# TODO(crbug.com/801210): Follow steps in bug.
'SynchronizeOnNonFinalField',
# TODO(crbug.com/802073): Follow steps in bug.
'TypeParameterUnusedInFormals',
# TODO(crbug.com/803484): Follow steps in bug.
'CatchFail',
# TODO(crbug.com/803485): Follow steps in bug.
'JUnitAmbiguousTestClass',
# Android platform default is always UTF-8.
# https://developer.android.com/reference/java/nio/charset/Charset.html#defaultCharset()
'DefaultCharset',
# Low priority since there are lots of tags that don't fit this check.
'UnrecognisedJavadocTag',
# Low priority since the alternatives still work.
'JdkObsolete',
# We don't use that many lambdas.
'FunctionalInterfaceClash',
# There are lots of times when we just want to post a task.
'FutureReturnValueIgnored',
# Nice to be explicit about operators, but not necessary.
'OperatorPrecedence',
# Just false positives in our code.
'ThreadJoinLoop',
# Low priority corner cases with String.split.
# Linking Guava and using Splitter was rejected
# in the https://chromium-review.googlesource.com/c/chromium/src/+/871630.
'StringSplitter',
# Preferred to use another method since it propagates exceptions better.
'ClassNewInstance',
# Nice to have static inner classes but not necessary.
'ClassCanBeStatic',
# Explicit is better than implicit.
'FloatCast',
# Results in false positives.
'ThreadLocalUsage',
# Also just false positives.
'Finally',
# False positives for Chromium.
'FragmentNotInstantiable',
# Low priority to fix.
'HidingField',
# Low priority.
'IntLongMath',
# Low priority.
'BadComparable',
# Low priority.
'EqualsHashCode',
# Nice to fix but low priority.
'TypeParameterShadowing',
# Good to have immutable enums, also low priority.
'ImmutableEnumChecker',
# False positives for testing.
'InputStreamSlowMultibyteRead',
# Nice to have better primitives.
'BoxedPrimitiveConstructor',
# Not necessary for tests.
'OverrideThrowableToString',
# Nice to have better type safety.
'CollectionToArraySafeParameter',
# Makes logcat debugging more difficult, and does not provide obvious
# benefits in the Chromium codebase.
'ObjectToString',
# Triggers on private methods that are @CalledByNative.
'UnusedMethod',
# Triggers on generated R.java files.
'UnusedVariable',
# Not that useful.
'UnsafeReflectiveConstructionCast',
# Not that useful.
'MixedMutabilityReturnType',
# Nice to have.
'EqualsGetClass',
# A lot of false-positives from CharSequence.equals().
'UndefinedEquals',
# Nice to have.
'ExtendingJUnitAssert',
# Nice to have.
'SystemExitOutsideMain',
# Nice to have.
'TypeParameterNaming',
# Nice to have.
'UnusedException',
# Nice to have.
'UngroupedOverloads',
# Nice to have.
'FunctionalInterfaceClash',
# Nice to have.
'InconsistentOverloads',
# Dagger generated code triggers this.
'SameNameButDifferent',
# Nice to have.
'UnnecessaryLambda',
# Nice to have.
'UnnecessaryAnonymousClass',
# Nice to have.
'LiteProtoToString',
# Nice to have.
'MissingSummary',
# Nice to have.
'ReturnFromVoid',
# Nice to have.
'EmptyCatch',
# Nice to have.
'BadImport',
# Nice to have.
'UseCorrectAssertInTests',
# Nice to have.
'InlineFormatString',
# Nice to have.
'DefaultPackage',
# Must be off since we are now passing in annotation processor generated
# code as a source jar (deduplicating work with turbine).
'RefersToDaggerCodegen',
# We already have presubmit checks for this. Not necessary to warn on
# every build.
'RemoveUnusedImports',
# We do not care about unnecessary parenthesis enough to check for them.
'UnnecessaryParentheses',
# The only time we trigger this is when it is better to be explicit in a
# list of unicode characters, e.g. FindAddress.java
'UnicodeEscape',
# Nice to have.
'AlreadyChecked',
]
# Full list of checks: https://errorprone.info/bugpatterns
# Only those marked as "experimental" need to be listed here in order to be
# enabled.
ERRORPRONE_WARNINGS_TO_ENABLE = [
'BinderIdentityRestoredDangerously',
'EmptyIf',
'EqualsBrokenForNull',
'InvalidThrows',
'LongLiteralLowerCaseSuffix',
'MultiVariableDeclaration',
'RedundantOverride',
'StaticQualifiedUsingExpression',
'StringEquality',
'TimeUnitMismatch',
'UnnecessaryStaticImport',
'UseBinds',
'WildcardImport',
]
def ProcessJavacOutput(output, target_name):
# These warnings cannot be suppressed even for third party code. Deprecation
# warnings especially do not help since we must support older android version.
deprecated_re = re.compile(
r'(Note: .* uses? or overrides? a deprecated API.)$')
unchecked_re = re.compile(
r'(Note: .* uses? unchecked or unsafe operations.)$')
recompile_re = re.compile(r'(Note: Recompile with -Xlint:.* for details.)$')
activity_re = re.compile(r'^(?P<prefix>\s*location: )class Activity$')
def ApplyFilters(line):
return not (deprecated_re.match(line) or unchecked_re.match(line)
or recompile_re.match(line))
def Elaborate(line):
if activity_re.match(line):
prefix = ' ' * activity_re.match(line).end('prefix')
return '{}\n{}Expecting a FragmentActivity? See {}'.format(
line, prefix, 'docs/ui/android/bytecode_rewriting.md')
return line
output = build_utils.FilterReflectiveAccessJavaWarnings(output)
lines = (l for l in output.split('\n') if ApplyFilters(l))
lines = (Elaborate(l) for l in lines)
output_processor = javac_output_processor.JavacOutputProcessor(target_name)
lines = output_processor.Process(lines)
return '\n'.join(lines)
def _ParsePackageAndClassNames(java_file):
package_name = ''
class_names = []
with open(java_file) as f:
for l in f:
# Strip unindented comments.
# Considers a leading * as a continuation of a multi-line comment (our
# linter doesn't enforce a space before it like there should be).
l = re.sub(r'^(?://.*|/?\*.*?(?:\*/\s*|$))', '', l)
# Stripping things between double quotes (strings), so if the word "class"
# shows up in a string this doesn't trigger. This isn't strictly correct
# (with escaped quotes) but covers a very large percentage of cases.
l = re.sub('(?:".*?")', '', l)
m = re.match(r'package\s+(.*?);', l)
if m and not package_name:
package_name = m.group(1)
# Not exactly a proper parser, but works for sources that Chrome uses.
# In order to not match nested classes, it just checks for lack of indent.
m = re.match(r'(?:\S.*?)?(?:class|@?interface|enum)\s+(.+?)\b', l)
if m:
class_names.append(m.group(1))
return package_name, class_names
def _ProcessJavaFileForInfo(java_file):
package_name, class_names = _ParsePackageAndClassNames(java_file)
return java_file, package_name, class_names
class _InfoFileContext:
"""Manages the creation of the class->source file .info file."""
def __init__(self, chromium_code, excluded_globs):
self._chromium_code = chromium_code
self._excluded_globs = excluded_globs
# Map of .java path -> .srcjar/nested/path.java.
self._srcjar_files = {}
# List of generators from pool.imap_unordered().
self._results = []
# Lazily created multiprocessing.Pool.
self._pool = None
def AddSrcJarSources(self, srcjar_path, extracted_paths, parent_dir):
for path in extracted_paths:
# We want the path inside the srcjar so the viewer can have a tree
# structure.
self._srcjar_files[path] = '{}/{}'.format(
srcjar_path, os.path.relpath(path, parent_dir))
def SubmitFiles(self, java_files):
if self._pool is None:
# Restrict to just one process to not slow down compiling. Compiling
# is always slower.
self._pool = multiprocessing.Pool(1)
logging.info('Submitting %d files for info', len(java_files))
self._results.append(
self._pool.imap_unordered(
_ProcessJavaFileForInfo, java_files, chunksize=1000))
def _CheckPathMatchesClassName(self, java_file, package_name, class_name):
parts = package_name.split('.') + [class_name + '.java']
expected_path_suffix = os.path.sep.join(parts)
if not java_file.endswith(expected_path_suffix):
raise Exception(('Java package+class name do not match its path.\n'
'Actual path: %s\nExpected path: %s') %
(java_file, expected_path_suffix))
def _ProcessInfo(self, java_file, package_name, class_names, source):
for class_name in class_names:
yield '{}.{}'.format(package_name, class_name)
# Skip aidl srcjars since they don't indent code correctly.
if '_aidl.srcjar' in source:
continue
assert not self._chromium_code or len(class_names) == 1, (
'Chromium java files must only have one class: {}'.format(source))
if self._chromium_code:
# This check is not necessary but nice to check this somewhere.
self._CheckPathMatchesClassName(java_file, package_name, class_names[0])
def _ShouldIncludeInJarInfo(self, fully_qualified_name):
name_as_class_glob = fully_qualified_name.replace('.', '/') + '.class'
return not build_utils.MatchesGlob(name_as_class_glob, self._excluded_globs)
def _Collect(self):
if self._pool is None:
return {}
ret = {}
for result in self._results:
for java_file, package_name, class_names in result:
source = self._srcjar_files.get(java_file, java_file)
for fully_qualified_name in self._ProcessInfo(java_file, package_name,
class_names, source):
if self._ShouldIncludeInJarInfo(fully_qualified_name):
ret[fully_qualified_name] = java_file
return ret
def Close(self):
# Work around for Python 2.x bug with multiprocessing and daemon threads:
# https://bugs.python.org/issue4106
if self._pool is not None:
logging.info('Joining multiprocessing.Pool')
self._pool.terminate()
self._pool.join()
logging.info('Done.')
def Commit(self, output_path):
"""Writes a .jar.info file.
Maps fully qualified names for classes to either the java file that they
are defined in or the path of the srcjar that they came from.
"""
logging.info('Collecting info file entries')
entries = self._Collect()
logging.info('Writing info file: %s', output_path)
with build_utils.AtomicOutput(output_path, mode='wb') as f:
jar_info_utils.WriteJarInfoFile(f, entries, self._srcjar_files)
logging.info('Completed info file: %s', output_path)
def _CreateJarFile(jar_path, service_provider_configuration_dir,
additional_jar_files, classes_dir):
logging.info('Start creating jar file: %s', jar_path)
with build_utils.AtomicOutput(jar_path) as f:
with zipfile.ZipFile(f.name, 'w') as z:
build_utils.ZipDir(z, classes_dir)
if service_provider_configuration_dir:
config_files = build_utils.FindInDirectory(
service_provider_configuration_dir)
for config_file in config_files:
zip_path = os.path.relpath(config_file,
service_provider_configuration_dir)
build_utils.AddToZipHermetic(z, zip_path, src_path=config_file)
if additional_jar_files:
for src_path, zip_path in additional_jar_files:
build_utils.AddToZipHermetic(z, zip_path, src_path=src_path)
logging.info('Completed jar file: %s', jar_path)
def _OnStaleMd5(changes, options, javac_cmd, javac_args, java_files):
logging.info('Starting _OnStaleMd5')
if options.enable_kythe_annotations:
# Kythe requires those env variables to be set and compile_java.py does the
# same
if not os.environ.get('KYTHE_ROOT_DIRECTORY') or \
not os.environ.get('KYTHE_OUTPUT_DIRECTORY'):
raise Exception('--enable-kythe-annotations requires '
'KYTHE_ROOT_DIRECTORY and KYTHE_OUTPUT_DIRECTORY '
'environment variables to be set.')
javac_extractor_cmd = build_utils.JavaCmd() + [
'-jar',
_JAVAC_EXTRACTOR,
]
try:
# _RunCompiler()'s partial javac implementation does not support
# generating outputs in $KYTHE_OUTPUT_DIRECTORY.
_RunCompiler(changes,
options,
javac_extractor_cmd + javac_args,
java_files,
options.jar_path + '.javac_extractor',
enable_partial_javac=False)
except build_utils.CalledProcessError as e:
# Having no index for particular target is better than failing entire
# codesearch. Log and error and move on.
logging.error('Could not generate kzip: %s', e)
intermediates_out_dir = None
jar_info_path = None
if not options.enable_errorprone:
# Delete any stale files in the generated directory. The purpose of
# options.generated_dir is for codesearch.
shutil.rmtree(options.generated_dir, True)
intermediates_out_dir = options.generated_dir
jar_info_path = options.jar_path + '.info'
# Compiles with Error Prone take twice as long to run as pure javac. Thus GN
# rules run both in parallel, with Error Prone only used for checks.
_RunCompiler(changes,
options,
javac_cmd + javac_args,
java_files,
options.jar_path,
jar_info_path=jar_info_path,
intermediates_out_dir=intermediates_out_dir,
enable_partial_javac=True)
logging.info('Completed all steps in _OnStaleMd5')
def _RunCompiler(changes,
options,
javac_cmd,
java_files,
jar_path,
jar_info_path=None,
intermediates_out_dir=None,
enable_partial_javac=False):
"""Runs java compiler.
Args:
changes: md5_check.Changes object.
options: Object with command line flags.
javac_cmd: Command to execute.
java_files: List of java files passed from command line.
jar_path: Path of output jar file.
jar_info_path: Path of the .info file to generate.
If None, .info file will not be generated.
intermediates_out_dir: Directory for saving intermediate outputs.
If None a temporary directory is used.
enable_partial_javac: Enables compiling only Java files which have changed
in the special case that no method signatures have changed. This is
useful for large GN targets.
Not supported if compiling generates outputs other than |jar_path| and
|jar_info_path|.
"""
logging.info('Starting _RunCompiler')
java_files = java_files.copy()
java_srcjars = options.java_srcjars
save_info_file = jar_info_path is not None
# Use jar_path's directory to ensure paths are relative (needed for goma).
temp_dir = jar_path + '.staging'
shutil.rmtree(temp_dir, True)
os.makedirs(temp_dir)
info_file_context = None
try:
classes_dir = os.path.join(temp_dir, 'classes')
service_provider_configuration = os.path.join(
temp_dir, 'service_provider_configuration')
if java_files:
os.makedirs(classes_dir)
if enable_partial_javac:
all_changed_paths_are_java = all(
p.endswith(".java") for p in changes.IterChangedPaths())
if (all_changed_paths_are_java and not changes.HasStringChanges()
and os.path.exists(jar_path)
and (jar_info_path is None or os.path.exists(jar_info_path))):
# Log message is used by tests to determine whether partial javac
# optimization was used.
logging.info('Using partial javac optimization for %s compile' %
(jar_path))
# Header jar corresponding to |java_files| did not change.
# As a build speed optimization (crbug.com/1170778), re-compile only
# java files which have changed. Re-use old jar .info file.
java_files = list(changes.IterChangedPaths())
java_srcjars = None
# Reuse old .info file.
save_info_file = False
build_utils.ExtractAll(jar_path, classes_dir)
if save_info_file:
info_file_context = _InfoFileContext(options.chromium_code,
options.jar_info_exclude_globs)
if intermediates_out_dir is None:
input_srcjars_dir = os.path.join(temp_dir, 'input_srcjars')
else:
input_srcjars_dir = os.path.join(intermediates_out_dir, 'input_srcjars')
if java_srcjars:
logging.info('Extracting srcjars to %s', input_srcjars_dir)
build_utils.MakeDirectory(input_srcjars_dir)
for srcjar in options.java_srcjars:
extracted_files = build_utils.ExtractAll(
srcjar, no_clobber=True, path=input_srcjars_dir, pattern='*.java')
java_files.extend(extracted_files)
if save_info_file:
info_file_context.AddSrcJarSources(srcjar, extracted_files,
input_srcjars_dir)
logging.info('Done extracting srcjars')
if options.header_jar:
logging.info('Extracting service provider configs')
# Extract META-INF/services/* so that it can be copied into the output
# .jar
build_utils.ExtractAll(options.header_jar,
no_clobber=True,
path=service_provider_configuration,
pattern='META-INF/services/*')
logging.info('Done extracting service provider configs')
if save_info_file and java_files:
info_file_context.SubmitFiles(java_files)
if java_files:
# Don't include the output directory in the initial set of args since it
# being in a temp dir makes it unstable (breaks md5 stamping).
cmd = list(javac_cmd)
cmd += ['-d', classes_dir]
if options.classpath:
cmd += ['-classpath', ':'.join(options.classpath)]
# Pass source paths as response files to avoid extremely long command
# lines that are tedius to debug.
java_files_rsp_path = os.path.join(temp_dir, 'files_list.txt')
with open(java_files_rsp_path, 'w') as f:
f.write(' '.join(java_files))
cmd += ['@' + java_files_rsp_path]
process_javac_output_partial = functools.partial(
ProcessJavacOutput, target_name=options.target_name)
logging.debug('Build command %s', cmd)
start = time.time()
build_utils.CheckOutput(cmd,
print_stdout=options.chromium_code,
stdout_filter=process_javac_output_partial,
stderr_filter=process_javac_output_partial,
fail_on_output=options.warnings_as_errors)
end = time.time() - start
logging.info('Java compilation took %ss', end)
_CreateJarFile(jar_path, service_provider_configuration,
options.additional_jar_files, classes_dir)
if save_info_file:
info_file_context.Commit(jar_info_path)
logging.info('Completed all steps in _RunCompiler')
finally:
if info_file_context:
info_file_context.Close()
shutil.rmtree(temp_dir)
def _ParseOptions(argv):
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option('--target-name', help='Fully qualified GN target name.')
parser.add_option('--skip-build-server',
action='store_true',
help='Avoid using the build server.')
parser.add_option('--use-build-server',
action='store_true',
help='Always use the build server.')
parser.add_option(
'--java-srcjars',
action='append',
default=[],
help='List of srcjars to include in compilation.')
parser.add_option(
'--generated-dir',
help='Subdirectory within target_gen_dir to place extracted srcjars and '
'annotation processor output for codesearch to find.')
parser.add_option(
'--bootclasspath',
action='append',
default=[],
help='Boot classpath for javac. If this is specified multiple times, '
'they will all be appended to construct the classpath.')
parser.add_option(
'--java-version',
help='Java language version to use in -source and -target args to javac.')
parser.add_option('--classpath', action='append', help='Classpath to use.')
parser.add_option(
'--processorpath',
action='append',
help='GN list of jars that comprise the classpath used for Annotation '
'Processors.')
parser.add_option(
'--processor-arg',
dest='processor_args',
action='append',
help='key=value arguments for the annotation processors.')
parser.add_option(
'--additional-jar-file',
dest='additional_jar_files',
action='append',
help='Additional files to package into jar. By default, only Java .class '
'files are packaged into the jar. Files should be specified in '
'format <filename>:<path to be placed in jar>.')
parser.add_option(
'--jar-info-exclude-globs',
help='GN list of exclude globs to filter from generated .info files.')
parser.add_option(
'--chromium-code',
type='int',
help='Whether code being compiled should be built with stricter '
'warnings for chromium code.')
parser.add_option(
'--gomacc-path', help='When set, prefix javac command with gomacc')
parser.add_option(
'--errorprone-path', help='Use the Errorprone compiler at this path.')
parser.add_option(
'--enable-errorprone',
action='store_true',
help='Enable errorprone checks')
parser.add_option(
'--warnings-as-errors',
action='store_true',
help='Treat all warnings as errors.')
parser.add_option('--jar-path', help='Jar output path.')
parser.add_option(
'--javac-arg',
action='append',
default=[],
help='Additional arguments to pass to javac.')
parser.add_option(
'--enable-kythe-annotations',
action='store_true',
help='Enable generation of Kythe kzip, used for codesearch. Ensure '
'proper environment variables are set before using this flag.')
parser.add_option(
'--header-jar',
help='This is the header jar for the current target that contains '
'META-INF/services/* files to be included in the output jar.')
options, args = parser.parse_args(argv)
build_utils.CheckOptions(options, parser, required=('jar_path', ))
options.bootclasspath = build_utils.ParseGnList(options.bootclasspath)
options.classpath = build_utils.ParseGnList(options.classpath)
options.processorpath = build_utils.ParseGnList(options.processorpath)
options.java_srcjars = build_utils.ParseGnList(options.java_srcjars)
options.jar_info_exclude_globs = build_utils.ParseGnList(
options.jar_info_exclude_globs)
additional_jar_files = []
for arg in options.additional_jar_files or []:
filepath, jar_filepath = arg.split(':')
additional_jar_files.append((filepath, jar_filepath))
options.additional_jar_files = additional_jar_files
java_files = []
for arg in args:
# Interpret a path prefixed with @ as a file containing a list of sources.
if arg.startswith('@'):
java_files.extend(build_utils.ReadSourcesList(arg[1:]))
else:
java_files.append(arg)
return options, java_files
def main(argv):
build_utils.InitLogging('JAVAC_DEBUG')
argv = build_utils.ExpandFileArgs(argv)
options, java_files = _ParseOptions(argv)
# Only use the build server for errorprone runs.
if (options.enable_errorprone and not options.skip_build_server
and server_utils.MaybeRunCommand(name=options.target_name,
argv=sys.argv,
stamp_file=options.jar_path,
force=options.use_build_server)):
return
javac_cmd = []
if options.gomacc_path:
javac_cmd.append(options.gomacc_path)
javac_cmd.append(build_utils.JAVAC_PATH)
javac_args = [
'-g',
# Chromium only allows UTF8 source files. Being explicit avoids
# javac pulling a default encoding from the user's environment.
'-encoding',
'UTF-8',
# Prevent compiler from compiling .java files not listed as inputs.
# See: http://blog.ltgt.net/most-build-tools-misuse-javac/
'-sourcepath',
':',
]
if options.enable_errorprone:
# All errorprone args are passed space-separated in a single arg.
errorprone_flags = ['-Xplugin:ErrorProne']
# Make everything a warning so that when treat_warnings_as_errors is false,
# they do not fail the build.
errorprone_flags += ['-XepAllErrorsAsWarnings']
# Don't check generated files.
errorprone_flags += ['-XepDisableWarningsInGeneratedCode']
errorprone_flags.extend('-Xep:{}:OFF'.format(x)
for x in ERRORPRONE_WARNINGS_TO_DISABLE)
errorprone_flags.extend('-Xep:{}:WARN'.format(x)
for x in ERRORPRONE_WARNINGS_TO_ENABLE)
if ERRORPRONE_CHECKS_TO_APPLY:
errorprone_flags += [
'-XepPatchLocation:IN_PLACE',
'-XepPatchChecks:,' + ','.join(ERRORPRONE_CHECKS_TO_APPLY)
]
javac_args += ['-XDcompilePolicy=simple', ' '.join(errorprone_flags)]
# This flag quits errorprone after checks and before code generation, since
# we do not need errorprone outputs, this speeds up errorprone by 4 seconds
# for chrome_java.
if not ERRORPRONE_CHECKS_TO_APPLY:
javac_args += ['-XDshould-stop.ifNoError=FLOW']
if options.java_version:
javac_args.extend([
'-source',
options.java_version,
'-target',
options.java_version,
])
if options.java_version == '1.8':
# Android's boot jar doesn't contain all java 8 classes.
options.bootclasspath.append(build_utils.RT_JAR_PATH)
# This effectively disables all annotation processors, even including
# annotation processors in service provider configuration files named
# META-INF/. See the following link for reference:
# https://docs.oracle.com/en/java/javase/11/tools/javac.html
javac_args.extend(['-proc:none'])
if options.bootclasspath:
javac_args.extend(['-bootclasspath', ':'.join(options.bootclasspath)])
if options.processorpath:
javac_args.extend(['-processorpath', ':'.join(options.processorpath)])
if options.processor_args:
for arg in options.processor_args:
javac_args.extend(['-A%s' % arg])
javac_args.extend(options.javac_arg)
classpath_inputs = (
options.bootclasspath + options.classpath + options.processorpath)
depfile_deps = classpath_inputs
# Files that are already inputs in GN should go in input_paths.
input_paths = depfile_deps + options.java_srcjars + java_files
if options.header_jar:
input_paths.append(options.header_jar)
input_paths += [x[0] for x in options.additional_jar_files]
output_paths = [options.jar_path]
if not options.enable_errorprone:
output_paths += [options.jar_path + '.info']
input_strings = javac_cmd + javac_args + options.classpath + java_files + [
options.warnings_as_errors, options.jar_info_exclude_globs
]
# Use md5_check for |pass_changes| feature.
md5_check.CallAndWriteDepfileIfStale(lambda changes: _OnStaleMd5(
changes, options, javac_cmd, javac_args, java_files),
options,
depfile_deps=depfile_deps,
input_paths=input_paths,
input_strings=input_strings,
output_paths=output_paths,
pass_changes=True)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
|
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Useful functions used by the rest of paramiko.
"""
from __future__ import generators
import array
from binascii import hexlify, unhexlify
import errno
import sys
import struct
import traceback
import threading
from paramiko.common import *
from paramiko.config import SSHConfig
# Change by RogerB - python < 2.3 doesn't have enumerate so we implement it
if sys.version_info < (2,3):
class enumerate:
def __init__ (self, sequence):
self.sequence = sequence
def __iter__ (self):
count = 0
for item in self.sequence:
yield (count, item)
count += 1
def inflate_long(s, always_positive=False):
"turns a normalized byte string into a long-int (adapted from Crypto.Util.number)"
out = 0L
negative = 0
if not always_positive and (len(s) > 0) and (ord(s[0]) >= 0x80):
negative = 1
if len(s) % 4:
filler = '\x00'
if negative:
filler = '\xff'
s = filler * (4 - len(s) % 4) + s
for i in range(0, len(s), 4):
out = (out << 32) + struct.unpack('>I', s[i:i+4])[0]
if negative:
out -= (1L << (8 * len(s)))
return out
def deflate_long(n, add_sign_padding=True):
"turns a long-int into a normalized byte string (adapted from Crypto.Util.number)"
# after much testing, this algorithm was deemed to be the fastest
s = ''
n = long(n)
while (n != 0) and (n != -1):
s = struct.pack('>I', n & 0xffffffffL) + s
n = n >> 32
# strip off leading zeros, FFs
for i in enumerate(s):
if (n == 0) and (i[1] != '\000'):
break
if (n == -1) and (i[1] != '\xff'):
break
else:
# degenerate case, n was either 0 or -1
i = (0,)
if n == 0:
s = '\000'
else:
s = '\xff'
s = s[i[0]:]
if add_sign_padding:
if (n == 0) and (ord(s[0]) >= 0x80):
s = '\x00' + s
if (n == -1) and (ord(s[0]) < 0x80):
s = '\xff' + s
return s
def format_binary_weird(data):
out = ''
for i in enumerate(data):
out += '%02X' % ord(i[1])
if i[0] % 2:
out += ' '
if i[0] % 16 == 15:
out += '\n'
return out
def format_binary(data, prefix=''):
x = 0
out = []
while len(data) > x + 16:
out.append(format_binary_line(data[x:x+16]))
x += 16
if x < len(data):
out.append(format_binary_line(data[x:]))
return [prefix + x for x in out]
def format_binary_line(data):
left = ' '.join(['%02X' % ord(c) for c in data])
right = ''.join([('.%c..' % c)[(ord(c)+63)//95] for c in data])
return '%-50s %s' % (left, right)
def hexify(s):
return hexlify(s).upper()
def unhexify(s):
return unhexlify(s)
def safe_string(s):
out = ''
for c in s:
if (ord(c) >= 32) and (ord(c) <= 127):
out += c
else:
out += '%%%02X' % ord(c)
return out
# ''.join([['%%%02X' % ord(c), c][(ord(c) >= 32) and (ord(c) <= 127)] for c in s])
def bit_length(n):
norm = deflate_long(n, 0)
hbyte = ord(norm[0])
if hbyte == 0:
return 1
bitlen = len(norm) * 8
while not (hbyte & 0x80):
hbyte <<= 1
bitlen -= 1
return bitlen
def tb_strings():
return ''.join(traceback.format_exception(*sys.exc_info())).split('\n')
def generate_key_bytes(hashclass, salt, key, nbytes):
"""
Given a password, passphrase, or other human-source key, scramble it
through a secure hash into some keyworthy bytes. This specific algorithm
is used for encrypting/decrypting private key files.
@param hashclass: class from L{Crypto.Hash} that can be used as a secure
hashing function (like C{MD5} or C{SHA}).
@type hashclass: L{Crypto.Hash}
@param salt: data to salt the hash with.
@type salt: string
@param key: human-entered password or passphrase.
@type key: string
@param nbytes: number of bytes to generate.
@type nbytes: int
@return: key data
@rtype: string
"""
keydata = ''
digest = ''
if len(salt) > 8:
salt = salt[:8]
while nbytes > 0:
hash_obj = hashclass.new()
if len(digest) > 0:
hash_obj.update(digest)
hash_obj.update(key)
hash_obj.update(salt)
digest = hash_obj.digest()
size = min(nbytes, len(digest))
keydata += digest[:size]
nbytes -= size
return keydata
def load_host_keys(filename):
"""
Read a file of known SSH host keys, in the format used by openssh, and
return a compound dict of C{hostname -> keytype ->} L{PKey <paramiko.pkey.PKey>}.
The hostname may be an IP address or DNS name. The keytype will be either
C{"ssh-rsa"} or C{"ssh-dss"}.
This type of file unfortunately doesn't exist on Windows, but on posix,
it will usually be stored in C{os.path.expanduser("~/.ssh/known_hosts")}.
Since 1.5.3, this is just a wrapper around L{HostKeys}.
@param filename: name of the file to read host keys from
@type filename: str
@return: dict of host keys, indexed by hostname and then keytype
@rtype: dict(hostname, dict(keytype, L{PKey <paramiko.pkey.PKey>}))
"""
from paramiko.hostkeys import HostKeys
return HostKeys(filename)
def parse_ssh_config(file_obj):
"""
Provided only as a backward-compatible wrapper around L{SSHConfig}.
"""
config = SSHConfig()
config.parse(file_obj)
return config
def lookup_ssh_host_config(hostname, config):
"""
Provided only as a backward-compatible wrapper around L{SSHConfig}.
"""
return config.lookup(hostname)
def mod_inverse(x, m):
# it's crazy how small python can make this function.
u1, u2, u3 = 1, 0, m
v1, v2, v3 = 0, 1, x
while v3 > 0:
q = u3 // v3
u1, v1 = v1, u1 - v1 * q
u2, v2 = v2, u2 - v2 * q
u3, v3 = v3, u3 - v3 * q
if u2 < 0:
u2 += m
return u2
_g_thread_ids = {}
_g_thread_counter = 0
_g_thread_lock = threading.Lock()
def get_thread_id():
global _g_thread_ids, _g_thread_counter, _g_thread_lock
tid = id(threading.currentThread())
try:
return _g_thread_ids[tid]
except KeyError:
_g_thread_lock.acquire()
try:
_g_thread_counter += 1
ret = _g_thread_ids[tid] = _g_thread_counter
finally:
_g_thread_lock.release()
return ret
def log_to_file(filename, level=DEBUG):
"send paramiko logs to a logfile, if they're not already going somewhere"
l = logging.getLogger("paramiko")
if len(l.handlers) > 0:
return
l.setLevel(level)
f = open(filename, 'w')
lh = logging.StreamHandler(f)
lh.setFormatter(logging.Formatter('%(levelname)-.3s [%(asctime)s.%(msecs)03d] thr=%(_threadid)-3d %(name)s: %(message)s',
'%Y%m%d-%H:%M:%S'))
l.addHandler(lh)
# make only one filter object, so it doesn't get applied more than once
class PFilter (object):
def filter(self, record):
record._threadid = get_thread_id()
return True
_pfilter = PFilter()
def get_logger(name):
l = logging.getLogger(name)
l.addFilter(_pfilter)
return l
def retry_on_signal(function):
"""Retries function until it doesn't raise an EINTR error"""
while True:
try:
return function()
except EnvironmentError, e:
if e.errno != errno.EINTR:
raise
class Counter (object):
"""Stateful counter for CTR mode crypto"""
def __init__(self, nbits, initial_value=1L, overflow=0L):
self.blocksize = nbits / 8
self.overflow = overflow
# start with value - 1 so we don't have to store intermediate values when counting
# could the iv be 0?
if initial_value == 0:
self.value = array.array('c', '\xFF' * self.blocksize)
else:
x = deflate_long(initial_value - 1, add_sign_padding=False)
self.value = array.array('c', '\x00' * (self.blocksize - len(x)) + x)
def __call__(self):
"""Increament the counter and return the new value"""
i = self.blocksize - 1
while i > -1:
c = self.value[i] = chr((ord(self.value[i]) + 1) % 256)
if c != '\x00':
return self.value.tostring()
i -= 1
# counter reset
x = deflate_long(self.overflow, add_sign_padding=False)
self.value = array.array('c', '\x00' * (self.blocksize - len(x)) + x)
return self.value.tostring()
def new(cls, nbits, initial_value=1L, overflow=0L):
return cls(nbits, initial_value=initial_value, overflow=overflow)
new = classmethod(new)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.importer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import random
import numpy as np
from tensorflow.core.util import test_log_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
# Used by SomeRandomBenchmark class below.
_ran_somebenchmark_1 = [False]
_ran_somebenchmark_2 = [False]
_ran_somebenchmark_but_shouldnt = [False]
class SomeRandomBenchmark(test.Benchmark):
"""This Benchmark should automatically be registered in the registry."""
def _dontRunThisBenchmark(self):
_ran_somebenchmark_but_shouldnt[0] = True
def notBenchmarkMethod(self):
_ran_somebenchmark_but_shouldnt[0] = True
def benchmark1(self):
_ran_somebenchmark_1[0] = True
def benchmark2(self):
_ran_somebenchmark_2[0] = True
class TestReportingBenchmark(test.Benchmark):
"""This benchmark (maybe) reports some stuff."""
def benchmarkReport1(self):
self.report_benchmark(iters=1)
def benchmarkReport2(self):
self.report_benchmark(
iters=2,
name="custom_benchmark_name",
extras={"number_key": 3,
"other_key": "string"})
def benchmark_times_an_op(self):
input_size = 5
with session.Session(config=benchmark.benchmark_config()) as sess:
a = array_ops.placeholder(dtype=dtypes.float32, shape=(input_size))
a_plus_a = a + a
return self.run_op_benchmark(
sess,
a_plus_a,
feed_dict={a: np.arange(input_size)},
min_iters=1000,
store_trace=True,
name="op_benchmark")
class BenchmarkTest(test.TestCase):
def testGlobalBenchmarkRegistry(self):
registry = list(benchmark.GLOBAL_BENCHMARK_REGISTRY)
self.assertEqual(len(registry), 2)
self.assertTrue(SomeRandomBenchmark in registry)
self.assertTrue(TestReportingBenchmark in registry)
def testRunSomeRandomBenchmark(self):
# Validate that SomeBenchmark has not run yet
self.assertFalse(_ran_somebenchmark_1[0])
self.assertFalse(_ran_somebenchmark_2[0])
self.assertFalse(_ran_somebenchmark_but_shouldnt[0])
# Run other benchmarks, but this wont run the one we care about
benchmark._run_benchmarks("unrelated")
# Validate that SomeBenchmark has not run yet
self.assertFalse(_ran_somebenchmark_1[0])
self.assertFalse(_ran_somebenchmark_2[0])
self.assertFalse(_ran_somebenchmark_but_shouldnt[0])
# Run all the benchmarks, avoid generating any reports
if benchmark.TEST_REPORTER_TEST_ENV in os.environ:
del os.environ[benchmark.TEST_REPORTER_TEST_ENV]
benchmark._run_benchmarks("SomeRandom")
# Validate that SomeRandomBenchmark ran correctly
self.assertTrue(_ran_somebenchmark_1[0])
self.assertTrue(_ran_somebenchmark_2[0])
self.assertFalse(_ran_somebenchmark_but_shouldnt[0])
_ran_somebenchmark_1[0] = False
_ran_somebenchmark_2[0] = False
_ran_somebenchmark_but_shouldnt[0] = False
# Test running a specific method of SomeRandomBenchmark
if benchmark.TEST_REPORTER_TEST_ENV in os.environ:
del os.environ[benchmark.TEST_REPORTER_TEST_ENV]
benchmark._run_benchmarks("SomeRandom.*1$")
self.assertTrue(_ran_somebenchmark_1[0])
self.assertFalse(_ran_somebenchmark_2[0])
self.assertFalse(_ran_somebenchmark_but_shouldnt[0])
@test_util.disable_xla("b/123744455") # GPU memory is incorrect
def testReportingBenchmark(self):
tempdir = test.get_temp_dir()
try:
gfile.MakeDirs(tempdir)
except OSError as e:
# It's OK if the directory already exists.
if " exists:" not in str(e):
raise e
prefix = os.path.join(tempdir,
"reporting_bench_%016x_" % random.getrandbits(64))
expected_output_file = "%s%s" % (prefix,
"TestReportingBenchmark.benchmarkReport1")
expected_output_file_2 = "%s%s" % (
prefix, "TestReportingBenchmark.custom_benchmark_name")
expected_output_file_3 = "%s%s" % (prefix,
"TestReportingBenchmark.op_benchmark")
try:
self.assertFalse(gfile.Exists(expected_output_file))
# Run benchmark but without env, shouldn't write anything
if benchmark.TEST_REPORTER_TEST_ENV in os.environ:
del os.environ[benchmark.TEST_REPORTER_TEST_ENV]
reporting = TestReportingBenchmark()
reporting.benchmarkReport1() # This should run without writing anything
self.assertFalse(gfile.Exists(expected_output_file))
# Runbenchmark with env, should write
os.environ[benchmark.TEST_REPORTER_TEST_ENV] = prefix
reporting = TestReportingBenchmark()
reporting.benchmarkReport1() # This should write
reporting.benchmarkReport2() # This should write
benchmark_values3 = reporting.benchmark_times_an_op() # This should write
# Check the files were written
self.assertTrue(gfile.Exists(expected_output_file))
self.assertTrue(gfile.Exists(expected_output_file_2))
self.assertTrue(gfile.Exists(expected_output_file_3))
# Check the contents are correct
expected_1 = test_log_pb2.BenchmarkEntry()
expected_1.name = "TestReportingBenchmark.benchmarkReport1"
expected_1.iters = 1
expected_2 = test_log_pb2.BenchmarkEntry()
expected_2.name = "TestReportingBenchmark.custom_benchmark_name"
expected_2.iters = 2
expected_2.extras["number_key"].double_value = 3
expected_2.extras["other_key"].string_value = "string"
expected_3 = test_log_pb2.BenchmarkEntry()
expected_3.name = "TestReportingBenchmark.op_benchmark"
expected_3.iters = 1000
def read_benchmark_entry(f):
s = gfile.GFile(f, "rb").read()
entries = test_log_pb2.BenchmarkEntries.FromString(s)
self.assertEquals(1, len(entries.entry))
return entries.entry[0]
read_benchmark_1 = read_benchmark_entry(expected_output_file)
self.assertProtoEquals(expected_1, read_benchmark_1)
read_benchmark_2 = read_benchmark_entry(expected_output_file_2)
self.assertProtoEquals(expected_2, read_benchmark_2)
read_benchmark_3 = read_benchmark_entry(expected_output_file_3)
self.assertEquals(expected_3.name, read_benchmark_3.name)
self.assertEquals(expected_3.iters, read_benchmark_3.iters)
self.assertGreater(read_benchmark_3.wall_time, 0)
# Trace is not stored in benchmark entry. Instead we get it from
# return value of `run_op_benchmark` call.
full_trace = benchmark_values3["extras"]["full_trace_chrome_format"]
json_trace = json.loads(full_trace)
self.assertTrue(isinstance(json_trace, dict))
self.assertTrue("traceEvents" in json_trace.keys())
allocator_keys = [k for k in read_benchmark_3.extras.keys()
if k.startswith("allocator_maximum_num_bytes_")]
self.assertGreater(len(allocator_keys), 0)
for k in allocator_keys:
self.assertGreater(read_benchmark_3.extras[k].double_value, 0)
finally:
gfile.DeleteRecursively(tempdir)
if __name__ == "__main__":
test.main()
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for the internal ops used by tfdbg v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.core.protobuf import debug_event_pb2
from tensorflow.python.debug.lib import debug_events_reader
from tensorflow.python.debug.lib import debug_events_writer
from tensorflow.python.debug.lib import dumping_callback_test_lib
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_debug_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
class DebugIdentityV2OpTest(dumping_callback_test_lib.DumpingCallbackTestBase):
"""Tests for DebugIdentityV2Op: when DebugEventsWriter is initialized.
DebugEventsWriter being initialized prior to DebugIdentityV2 ops being invoked
for the first time is the typical case (e.g., tfdbg2 running on a local
machine with only local devices.)
"""
def setUp(self):
super(DebugIdentityV2OpTest, self).setUp()
# Testing using a small circular-buffer size.
self.circular_buffer_size = 4
self.tfdbg_run_id = "test_tfdbg_run"
self.writer = debug_events_writer.DebugEventsWriter(
self.dump_root, self.tfdbg_run_id, self.circular_buffer_size)
def tearDown(self):
self.writer.Close()
super(DebugIdentityV2OpTest, self).tearDown()
@test_util.run_in_graph_and_eager_modes
def testSingleTensorFullTensorDebugModeWithCircularBufferBehavior(self):
@def_function.function
def write_debug_trace(x):
square = math_ops.square(x)
gen_debug_ops.debug_identity_v2(
square,
tfdbg_context_id="deadbeaf",
op_name="Square",
output_slot=0,
tensor_debug_mode=debug_event_pb2.TensorDebugMode.FULL_TENSOR,
debug_urls=["file://%s" % self.dump_root])
sqrt = math_ops.sqrt(x)
gen_debug_ops.debug_identity_v2(
sqrt,
tfdbg_context_id="beafdead",
op_name="Sqrt",
output_slot=0,
tensor_debug_mode=debug_event_pb2.TensorDebugMode.FULL_TENSOR,
debug_urls=["file://%s" % self.dump_root])
return square + sqrt
x = np.array([3.0, 4.0])
# Only the graph-execution trace of the last iteration should be written
# to self.dump_root.
for _ in range(self.circular_buffer_size // 2 + 1):
self.assertAllClose(
write_debug_trace(x), [9.0 + np.sqrt(3.0), 16.0 + 2.0])
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
# Check that the .metadata DebugEvents data file has been created, even
# before FlushExecutionFiles() is called.
self.assertGreater(reader.starting_wall_time(), 0)
self.assertTrue(reader.tensorflow_version())
self.assertTrue(reader.tfdbg_file_version().startswith("debug.Event"))
graph_trace_iter = reader.graph_execution_traces_iterators()[0]
# Before FlushExecutionFiles() is called, the .graph_execution_traces file
# ought to be empty.
with self.assertRaises(StopIteration):
next(graph_trace_iter)
# Flush the circular buffer.
self.writer.FlushExecutionFiles()
graph_trace_iter = reader.graph_execution_traces_iterators()[0]
# The circular buffer has a size of 4. So only the data from the
# last two iterations should have been written to self.dump_root.
for _ in range(2):
debug_event = next(graph_trace_iter).debug_event
self.assertGreater(debug_event.wall_time, 0)
trace = debug_event.graph_execution_trace
self.assertEqual(trace.tfdbg_context_id, "deadbeaf")
self.assertEqual(trace.op_name, "Square")
self.assertEqual(trace.output_slot, 0)
self.assertEqual(trace.tensor_debug_mode,
debug_event_pb2.TensorDebugMode.FULL_TENSOR)
tensor_value = tensor_util.MakeNdarray(trace.tensor_proto)
self.assertAllClose(tensor_value, [9.0, 16.0])
debug_event = next(graph_trace_iter).debug_event
self.assertGreater(debug_event.wall_time, 0)
trace = debug_event.graph_execution_trace
self.assertEqual(trace.tfdbg_context_id, "beafdead")
self.assertEqual(trace.op_name, "Sqrt")
self.assertEqual(trace.output_slot, 0)
self.assertEqual(trace.tensor_debug_mode,
debug_event_pb2.TensorDebugMode.FULL_TENSOR)
tensor_value = tensor_util.MakeNdarray(trace.tensor_proto)
self.assertAllClose(tensor_value, [np.sqrt(3.0), 2.0])
# Only the graph-execution trace of the last iteration should be written
# to self.dump_root.
with self.assertRaises(StopIteration):
next(graph_trace_iter)
@test_util.run_in_graph_and_eager_modes
def testControlFlow(self):
@def_function.function
def collatz(x):
counter = constant_op.constant(0, dtype=dtypes.int32)
while math_ops.greater(x, 1):
counter = counter + 1
gen_debug_ops.debug_identity_v2(
x,
tfdbg_context_id="deadbeaf",
op_name="x",
output_slot=0,
tensor_debug_mode=debug_event_pb2.TensorDebugMode.FULL_TENSOR,
debug_urls=["file://%s" % self.dump_root])
if math_ops.equal(x % 2, 0):
x = math_ops.div(x, 2)
else:
x = x * 3 + 1
return counter
x = constant_op.constant(10, dtype=dtypes.int32)
self.evaluate(collatz(x))
self.writer.FlushExecutionFiles()
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
graph_trace_iter = reader.graph_execution_traces_iterators()[0]
try:
x_values = []
timestamp = 0
while True:
debug_event = next(graph_trace_iter).debug_event
self.assertGreater(debug_event.wall_time, timestamp)
timestamp = debug_event.wall_time
trace = debug_event.graph_execution_trace
self.assertEqual(trace.tfdbg_context_id, "deadbeaf")
self.assertEqual(trace.op_name, "x")
self.assertEqual(trace.output_slot, 0)
self.assertEqual(trace.tensor_debug_mode,
debug_event_pb2.TensorDebugMode.FULL_TENSOR)
x_values.append(int(tensor_util.MakeNdarray(trace.tensor_proto)))
except StopIteration:
pass
# Due to the circular buffer, only the last 4 iterations of
# [10, 5, 16, 8, 4, 2] should have been written.
self.assertAllEqual(x_values, [16, 8, 4, 2])
@test_util.run_in_graph_and_eager_modes
def testTwoDumpRoots(self):
another_dump_root = os.path.join(self.dump_root, "another")
another_debug_url = "file://%s" % another_dump_root
another_writer = debug_events_writer.DebugEventsWriter(
another_dump_root, "test_tfdbg_run")
@def_function.function
def write_debug_trace(x):
# DebugIdentityV2 is a stateful op. It ought to be included by auto
# control dependency.
square = math_ops.square(x)
gen_debug_ops.debug_identity_v2(
square,
tfdbg_context_id="deadbeaf",
tensor_debug_mode=debug_event_pb2.TensorDebugMode.FULL_TENSOR,
debug_urls=["file://%s" % self.dump_root, another_debug_url])
return square + 1.0
x = np.array([3.0, 4.0])
self.assertAllClose(write_debug_trace(x), np.array([10.0, 17.0]))
self.writer.FlushExecutionFiles()
another_writer.FlushExecutionFiles()
another_writer.Close()
for debug_root in (self.dump_root, another_dump_root):
with debug_events_reader.DebugEventsReader(debug_root) as reader:
graph_trace_iter = reader.graph_execution_traces_iterators()[0]
debug_event = next(graph_trace_iter).debug_event
trace = debug_event.graph_execution_trace
self.assertEqual(trace.tfdbg_context_id, "deadbeaf")
self.assertEqual(trace.op_name, "")
self.assertEqual(trace.tensor_debug_mode,
debug_event_pb2.TensorDebugMode.FULL_TENSOR)
tensor_value = tensor_util.MakeNdarray(trace.tensor_proto)
self.assertAllClose(tensor_value, [9.0, 16.0])
with self.assertRaises(StopIteration):
next(graph_trace_iter)
class DebugIdentityV2OpUninitializedWriterTest(
dumping_callback_test_lib.DumpingCallbackTestBase):
"""Tests for DebugIdentityV2Op: when DebugEventsWriter is not initialized.
This case can occur when DebugIdentityV2Ops are running on a remote
TensorFlow server (e.g., a TPU worker).
"""
@test_util.run_in_graph_and_eager_modes
def testInvokingDebugIdentityV2OpBeforeCreatingDebugEventsWriterWorks(self):
circular_buffer_size = 3
@def_function.function
def write_debug_trace(x):
# DebugIdentityV2 is a stateful op. It ought to be included by auto
# control dependency.
square = math_ops.square(x)
gen_debug_ops.debug_identity_v2(
square,
tfdbg_context_id="deadbeaf",
op_name="Square",
output_slot=0,
tensor_debug_mode=debug_event_pb2.TensorDebugMode.FULL_TENSOR,
debug_urls=["file://%s" % self.dump_root],
circular_buffer_size=circular_buffer_size)
return square
# The DebugIdentityV2 ops are invokes *before* a DebugEventsWriter at the
# same dump root is created.
for i in range(circular_buffer_size * 2):
self.assertAllClose(
write_debug_trace(np.array([i]).astype(np.float32)), [i**2.0])
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
"test_tfdbg_run",
circular_buffer_size)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
graph_trace_iter = reader.graph_execution_traces_iterators()[0]
graph_execution_traces = []
while True:
try:
graph_execution_traces.append(
next(graph_trace_iter).debug_event.graph_execution_trace)
except StopIteration:
break
self.assertLen(graph_execution_traces, circular_buffer_size)
for i in range(circular_buffer_size):
self.assertAllClose(
tensor_util.MakeNdarray(graph_execution_traces[i].tensor_proto),
[(i + circular_buffer_size)**2.0])
class DebugNumericSummaryV2Test(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpReduceInfNanThreeSlots(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(
debug_event_pb2.TensorDebugMode.REDUCE_INF_NAN_THREE_SLOTS)))
self.assertAllEqual(
debug_summary(constant_op.constant([])), [0.0, 0.0, 0.0])
self.assertAllEqual(
debug_summary(constant_op.constant(42.0)), [0.0, 0.0, 0.0])
self.assertAllEqual(
debug_summary(constant_op.constant([3.0, 4.0])), [0.0, 0.0, 0.0])
self.assertAllEqual(
debug_summary(constant_op.constant(np.array([3.0, -np.inf]))),
[-np.inf, 0.0, 0.0])
self.assertAllEqual(
debug_summary(constant_op.constant(np.array([[0, 0], [np.nan, 0]]))),
[0.0, 0.0, np.nan])
self.assertAllEqual(
debug_summary(
constant_op.constant(np.array([[0, 0], [np.nan, np.inf]]))),
[0.0, np.inf, np.nan])
self.assertAllEqual(
debug_summary(
constant_op.constant(np.array([[0, np.inf], [np.nan, -np.inf]]))),
[-np.inf, np.inf, np.nan])
x = np.zeros([100, 100], dtype=np.float16)
x[32, 47] = np.nan
self.assertAllEqual(
debug_summary(constant_op.constant(x)), [0.0, 0.0, np.nan])
x = np.zeros([97, 97], dtype=np.float32)
x[50, 83] = -np.inf
self.assertAllEqual(
debug_summary(constant_op.constant(x)), [-np.inf, 0.0, 0.0])
x[1, 41] = np.nan
self.assertAllEqual(
debug_summary(constant_op.constant(x)), [-np.inf, 0.0, np.nan])
x = np.zeros([9701], dtype=np.float64)
x[9700] = np.nan
self.assertAllEqual(
debug_summary(constant_op.constant(x)), [0.0, 0.0, np.nan])
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpLargeTensorIDError(self):
modes = [
debug_event_pb2.TensorDebugMode.CURT_HEALTH,
debug_event_pb2.TensorDebugMode.CONCISE_HEALTH,
debug_event_pb2.TensorDebugMode.SHAPE,
]
# Maximum allowed tensor_id
tensor_id = np.power(2, 53, dtype=np.int64)
for mode in modes:
self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
constant_op.constant(42.0),
tensor_debug_mode=mode,
tensor_id=tensor_id,
output_dtype=dtypes.float64))
# Incrementing by one should error
tensor_id += 1
for mode in modes:
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
constant_op.constant(42.0),
tensor_debug_mode=mode,
tensor_id=tensor_id,
output_dtype=dtypes.float64))
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpCurtHealthValuesSmall(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.CURT_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
tensor, tensor_id = debug_summary(constant_op.constant([]))
self.assertAllEqual(tensor, [tensor_id, 0.0])
tensor, tensor_id = debug_summary(constant_op.constant(42.0))
self.assertAllEqual(tensor, [tensor_id, 0.0])
tensor, tensor_id = debug_summary(constant_op.constant([3.0, 4.0]))
self.assertAllEqual(tensor, [tensor_id, 0.0])
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([3.0, -np.inf])))
self.assertAllEqual(tensor, [tensor_id, 1.0])
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([[0, 0], [np.nan, 0]])))
self.assertAllEqual(tensor, [tensor_id, 1.0])
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([[0, 0], [np.nan, np.inf]])))
self.assertAllEqual(tensor, [tensor_id, 1.0])
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([[0, np.inf], [np.nan, -np.inf]])))
self.assertAllEqual(tensor, [tensor_id, 1.0])
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpCurtHealthValuesLarge(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.CURT_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
x = np.zeros([100, 100], dtype=np.float16)
x[32, 47] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [tensor_id, 1.0])
x = np.zeros([97, 97], dtype=np.float32)
x[50, 83] = -np.inf
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [tensor_id, 1.0])
x[1, 41] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [tensor_id, 1.0])
x = np.zeros([9701], dtype=np.float64)
x[9700] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [tensor_id, 1.0])
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpCurtHealthConsistency(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.CURT_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
x = np.zeros([100, 100], dtype=np.float16)
x[43, 99] = np.nan
c = constant_op.constant(x)
tensor_1, tensor_id_1 = debug_summary(c)
tensor_2, tensor_id_2 = debug_summary(c)
self.assertAllEqual(tensor_1, tensor_2)
self.assertEqual(tensor_id_1, tensor_id_2)
x = np.zeros([100, 100, 50], dtype=np.float64)
x[0, 0, 1] = np.inf
c = constant_op.constant(x)
tensor_1, tensor_id_1 = debug_summary(c)
tensor_2, tensor_id_2 = debug_summary(c)
self.assertAllEqual(tensor_1, tensor_2)
self.assertEqual(tensor_id_1, tensor_id_2)
c = constant_op.constant(np.ones((100, 200), np.double))
tensor_1, tensor_id_1 = debug_summary(c)
tensor_2, tensor_id_2 = debug_summary(c)
self.assertAllEqual(tensor_1, tensor_2)
self.assertEqual(tensor_id_1, tensor_id_2)
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpConciseHealthSmall(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(
debug_event_pb2.TensorDebugMode.CONCISE_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
tensor, tensor_id = debug_summary(constant_op.constant([]))
self.assertAllEqual(tensor, [tensor_id, 0.0, 0.0, 0.0, 0.0])
tensor, tensor_id = debug_summary(constant_op.constant(42.0))
self.assertAllEqual(tensor, [tensor_id, 1.0, 0.0, 0.0, 0.0])
tensor, tensor_id = debug_summary(constant_op.constant([3.0, 4.0]))
self.assertAllEqual(tensor, [tensor_id, 2.0, 0.0, 0.0, 0.0])
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([3.0, -np.inf])))
self.assertAllEqual(tensor, [tensor_id, 2.0, 1.0, 0.0, 0.0])
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([[0, 0], [np.nan, 0]])))
self.assertAllEqual(tensor, [tensor_id, 4.0, 0.0, 0.0, 1.0])
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([[0, 0], [np.nan, np.inf]])))
self.assertAllEqual(tensor, [tensor_id, 4.0, 0.0, 1.0, 1.0])
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([[0, np.inf], [np.nan, -np.inf]])))
self.assertAllEqual(tensor, [tensor_id, 4.0, 1.0, 1.0, 1.0])
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpConciseHealthLarge(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(
debug_event_pb2.TensorDebugMode.CONCISE_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
x = np.zeros([100, 100], dtype=np.float16)
x[32, :] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [tensor_id, 10000.0, 0.0, 0.0, 100.0])
x = np.zeros([97, 97], dtype=np.float32)
x[50, 83:85] = -np.inf
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [tensor_id, 97 * 97, 2.0, 0.0, 0.0])
x[1:9, 41] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [tensor_id, 97 * 97, 2.0, 0.0, 8.0])
x = np.zeros([9701], dtype=np.float64)
x[9700] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [tensor_id, 9701, 0.0, 0.0, 1.0])
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpConciseHealthConsistency(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(
debug_event_pb2.TensorDebugMode.CONCISE_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
# Assert the same op is returns a consistent value
x = np.zeros([100, 100], dtype=np.float16)
x[3, 4] = -np.inf
c = constant_op.constant(x)
tensor_1, tensor_id_1 = debug_summary(c)
tensor_2, tensor_id_2 = debug_summary(c)
self.assertAllEqual(tensor_1, tensor_2)
self.assertEqual(tensor_id_1, tensor_id_2)
c = constant_op.constant(np.ones((100, 200), np.double))
tensor_1, tensor_id_1 = debug_summary(c)
tensor_2, tensor_id_2 = debug_summary(c)
self.assertAllEqual(tensor_1, tensor_2)
self.assertEqual(tensor_id_1, tensor_id_2)
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpShapeEmpty(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.SHAPE),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
tensor, tensor_id = debug_summary(constant_op.constant(0.0))
self.assertAllEqual(
tensor, [tensor_id, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpShapeSmall(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.SHAPE),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
x = np.zeros([3, 4], dtype=np.float32)
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(
tensor, [tensor_id, 1.0, 2.0, 12.0, 3.0, 4.0, 0.0, 0.0, 0.0, 0.0])
x = np.ones([1, 2, 3, 4, 5, 6], dtype=np.float16)
x[0, 1, 2, 2, 2, 2] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(
tensor,
[tensor_id, 19, 6.0, 2 * 3 * 4 * 5 * 6, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
x = np.zeros([2], dtype=np.float32)
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(
tensor, [tensor_id, 1.0, 1.0, 2.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0])
tensor, tensor_id = debug_summary(constant_op.constant([]))
self.assertAllEqual(
tensor, [tensor_id, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpShapeLarge(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.SHAPE),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
x = np.ones([1, 2, 3, 4, 5, 6, 7], dtype=np.double)
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [
tensor_id, 2.0, 7.0, 2 * 3 * 4 * 5 * 6 * 7, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0
])
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpFullHealthSmall(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.FULL_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
tensor, tensor_id = debug_summary(constant_op.constant([]))
expected = [tensor_id, -1, 1, 1, 0, 0, 0, 0, 0, 0, 0]
self.assertAllEqual(tensor, expected)
tensor, tensor_id = debug_summary(constant_op.constant(42.0))
expected = [tensor_id, -1, 1, 0, 1, 0, 0, 0, 0, 0, 1]
self.assertAllEqual(tensor, expected)
tensor, tensor_id = debug_summary(constant_op.constant([3.0, 4.0]))
expected = [tensor_id, -1, 1, 1, 2, 0, 0, 0, 0, 0, 2]
self.assertAllEqual(tensor, expected)
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([3, -np.inf], dtype=np.float32)))
expected = [tensor_id, -1, 1, 1, 2, 1, 0, 0, 0, 0, 1]
self.assertAllEqual(tensor, expected)
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([[0, 0], [np.nan, 0]], dtype=np.float64)))
expected = [tensor_id, -1, 2, 2, 4, 0, 0, 1, 0, 3, 0]
self.assertAllEqual(tensor, expected)
tensor, tensor_id = debug_summary(
constant_op.constant(
np.array([[0, 0], [np.nan, np.inf]], dtype=np.float16)))
expected = [tensor_id, -1, 19, 2, 4, 0, 1, 1, 0, 2, 0]
self.assertAllEqual(tensor, expected)
tensor, tensor_id = debug_summary(
constant_op.constant(
np.array([[0, np.inf], [np.nan, -np.inf]], dtype=np.float32)))
expected = [tensor_id, -1, 1, 2, 4, 1, 1, 1, 0, 1, 0]
self.assertAllEqual(tensor, expected)
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpFullHealthLarge(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.FULL_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
def tensor_counts(arr):
counts = [len(np.shape(arr)), np.size(arr), 0, 0, 0, 0, 0, 0]
for n in np.ravel(arr):
if np.isneginf(n):
counts[2] += 1
elif np.isposinf(n):
counts[3] += 1
elif np.isnan(n):
counts[4] += 1
elif n < 0.:
counts[5] += 1
elif n == 0.:
counts[6] += 1
else:
counts[7] += 1
return counts
x = np.zeros([50, 50], dtype=np.float16)
x[32, 47] = np.nan
x[0:4, 3] = np.inf
x[40:50, 40:50] = 10
x[3, 20] = -10
tensor, tensor_id = debug_summary(constant_op.constant(x))
expected = [tensor_id, -1, 19] + tensor_counts(x)
self.assertAllEqual(tensor, expected)
x = np.ones([25, 25, 50], dtype=np.float32) * np.inf
x[:, :, 1] = np.nan
x[:, :, 2] = -np.inf
x[:, :, 3] = -1
x[:, :, 4] = 0
x[:, :, 5] = 1
tensor, tensor_id = debug_summary(constant_op.constant(x))
expected = [tensor_id, -1, 1] + tensor_counts(x)
self.assertAllEqual(tensor, expected)
x[0, 0, 0] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
expected = [
tensor_id,
-1,
1,
] + tensor_counts(x)
self.assertAllEqual(tensor, expected)
x = np.zeros([9701], dtype=np.float64)
x[9700] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
expected = [tensor_id, -1, 2] + tensor_counts(x)
self.assertAllEqual(tensor, expected)
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpFullHealthConsistency(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.FULL_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
# Assert the same op is returns a consistent value
x = np.zeros([100, 100], dtype=np.float16)
x[32, 47] = np.nan
x[0:4, 3] = np.inf
x[90:100, 90:100] = 10
x[3, 20] = -10
c = constant_op.constant(x)
tensor_1, tensor_id_1 = debug_summary(c)
tensor_2, tensor_id_2 = debug_summary(c)
self.assertAllEqual(tensor_1, tensor_2)
self.assertEqual(tensor_id_1, tensor_id_2)
x = np.ones((100, 200, 3, 10), np.double)
x[1, 30, 2] = 10
x[5, :, 0, 1] = np.nan
x[90:100, 150, :, :] = np.inf
c = constant_op.constant(x)
tensor_1, tensor_id_1 = debug_summary(c)
tensor_2, tensor_id_2 = debug_summary(c)
self.assertAllEqual(tensor_1, tensor_2)
self.assertEqual(tensor_id_1, tensor_id_2)
def testCheckNumericsV2OpNegativeAndPositiveInf(self):
"""Test that CheckNumericsV2 op distinguishes negative and positive infs."""
with self.session(graph=ops.Graph()):
t1 = constant_op.constant([-1.0, 1.0])
t2 = constant_op.constant([0.0, 0.0])
with self.assertRaisesRegex(
errors.InvalidArgumentError,
r"pass through test.*had -Inf and \+Inf values"):
self.evaluate(
array_ops.check_numerics_v2(t1 / t2, message="pass through test"))
def testCheckNumericsV2OpNegativeAndPositiveInfAndNaN(self):
"""CheckNumericsV2 op distinguishes - & + infs when nan is present."""
with self.session(graph=ops.Graph()):
t1 = constant_op.constant([-1.0, 1.0, 0.0])
t2 = constant_op.constant([0.0, 0.0, 0.0])
with self.assertRaisesRegex(
errors.InvalidArgumentError,
r"pass through test.*had -Inf, \+Inf, and NaN values"):
self.evaluate(
array_ops.check_numerics_v2(t1 / t2, message="pass through test"))
def testCheckNumericsV2PositiveInfAndNaN(self):
"""Test that CheckNumericsV2 op shows sign of inf when nan is present."""
with self.session(graph=ops.Graph()):
t1 = constant_op.constant([0.0, 1.0])
t2 = constant_op.constant([0.0, 0.0])
with self.assertRaisesRegex(
errors.InvalidArgumentError,
r"pass through test.*had \+Inf and NaN values"):
self.evaluate(
array_ops.check_numerics_v2(t1 / t2, message="pass through test"))
if __name__ == "__main__":
ops.enable_eager_execution()
googletest.main()
|
|
import unittest
import time
from requests.exceptions import RequestException
from testcases import WebserverTestCase
from locust.stats import RequestStats, StatsEntry, global_stats
from locust.core import HttpLocust, Locust, TaskSet, task
from locust.inspectlocust import get_task_ratio_dict
from locust.rpc.protocol import Message
class TestRequestStats(unittest.TestCase):
def setUp(self):
self.stats = RequestStats()
self.stats.start_time = time.time()
self.s = StatsEntry(self.stats, "test_entry", "GET")
self.s.log(45, 0)
self.s.log(135, 0)
self.s.log(44, 0)
self.s.log_error(Exception("dummy fail"))
self.s.log_error(Exception("dummy fail"))
self.s.log(375, 0)
self.s.log(601, 0)
self.s.log(35, 0)
self.s.log(79, 0)
self.s.log_error(Exception("dummy fail"))
def test_percentile(self):
s = StatsEntry(self.stats, "percentile_test", "GET")
for x in xrange(100):
s.log(x, 0)
self.assertEqual(s.get_response_time_percentile(0.5), 50)
self.assertEqual(s.get_response_time_percentile(0.6), 60)
self.assertEqual(s.get_response_time_percentile(0.95), 95)
def test_median(self):
self.assertEqual(self.s.median_response_time, 79)
def test_total_rps(self):
self.assertEqual(self.s.total_rps, 7)
def test_current_rps(self):
self.stats.last_request_timestamp = int(time.time()) + 4
self.assertEqual(self.s.current_rps, 3.5)
self.stats.last_request_timestamp = int(time.time()) + 25
self.assertEqual(self.s.current_rps, 0)
def test_num_reqs_fails(self):
self.assertEqual(self.s.num_requests, 7)
self.assertEqual(self.s.num_failures, 3)
def test_avg(self):
self.assertEqual(self.s.avg_response_time, 187.71428571428571428571428571429)
def test_reset(self):
self.s.reset()
self.s.log(756, 0)
self.s.log_error(Exception("dummy fail after reset"))
self.s.log(85, 0)
self.assertEqual(self.s.total_rps, 2)
self.assertEqual(self.s.num_requests, 2)
self.assertEqual(self.s.num_failures, 1)
self.assertEqual(self.s.avg_response_time, 420.5)
self.assertEqual(self.s.median_response_time, 85)
def test_reset_min_response_time(self):
self.s.reset()
self.s.log(756, 0)
self.assertEqual(756, self.s.min_response_time)
def test_aggregation(self):
s1 = StatsEntry(self.stats, "aggregate me!", "GET")
s1.log(12, 0)
s1.log(12, 0)
s1.log(38, 0)
s1.log_error("Dummy exzeption")
s2 = StatsEntry(self.stats, "aggregate me!", "GET")
s2.log_error("Dummy exzeption")
s2.log_error("Dummy exzeption")
s2.log(12, 0)
s2.log(99, 0)
s2.log(14, 0)
s2.log(55, 0)
s2.log(38, 0)
s2.log(55, 0)
s2.log(97, 0)
s = StatsEntry(self.stats, "GET", "")
s.extend(s1, full_request_history=True)
s.extend(s2, full_request_history=True)
self.assertEqual(s.num_requests, 10)
self.assertEqual(s.num_failures, 3)
self.assertEqual(s.median_response_time, 38)
self.assertEqual(s.avg_response_time, 43.2)
def test_serialize_through_message(self):
"""
Serialize a RequestStats instance, then serialize it through a Message,
and unserialize the whole thing again. This is done "IRL" when stats are sent
from slaves to master.
"""
s1 = StatsEntry(self.stats, "test", "GET")
s1.log(10, 0)
s1.log(20, 0)
s1.log(40, 0)
u1 = StatsEntry.unserialize(s1.serialize())
data = Message.unserialize(Message("dummy", s1.serialize(), "none").serialize()).data
u1 = StatsEntry.unserialize(data)
self.assertEqual(20, u1.median_response_time)
class TestRequestStatsWithWebserver(WebserverTestCase):
def test_request_stats_content_length(self):
class MyLocust(HttpLocust):
host = "http://127.0.0.1:%i" % self.port
locust = MyLocust()
locust.client.get("/ultra_fast")
self.assertEqual(global_stats.get("/ultra_fast", "GET").avg_content_length, len("This is an ultra fast response"))
locust.client.get("/ultra_fast")
self.assertEqual(global_stats.get("/ultra_fast", "GET").avg_content_length, len("This is an ultra fast response"))
def test_request_stats_no_content_length(self):
class MyLocust(HttpLocust):
host = "http://127.0.0.1:%i" % self.port
l = MyLocust()
path = "/no_content_length"
r = l.client.get(path)
self.assertEqual(global_stats.get(path, "GET").avg_content_length, len("This response does not have content-length in the header"))
def test_request_stats_no_content_length_streaming(self):
class MyLocust(HttpLocust):
host = "http://127.0.0.1:%i" % self.port
l = MyLocust()
path = "/no_content_length"
r = l.client.get(path, stream=True)
self.assertEqual(0, global_stats.get(path, "GET").avg_content_length)
def test_request_stats_named_endpoint(self):
class MyLocust(HttpLocust):
host = "http://127.0.0.1:%i" % self.port
locust = MyLocust()
locust.client.get("/ultra_fast", name="my_custom_name")
self.assertEqual(1, global_stats.get("my_custom_name", "GET").num_requests)
def test_request_stats_query_variables(self):
class MyLocust(HttpLocust):
host = "http://127.0.0.1:%i" % self.port
locust = MyLocust()
locust.client.get("/ultra_fast?query=1")
self.assertEqual(1, global_stats.get("/ultra_fast?query=1", "GET").num_requests)
def test_request_connection_error(self):
class MyLocust(HttpLocust):
host = "http://localhost:1"
locust = MyLocust()
response = locust.client.get("/", timeout=0.1)
self.assertEqual(response.status_code, 0)
self.assertEqual(1, global_stats.get("/", "GET").num_failures)
self.assertEqual(0, global_stats.get("/", "GET").num_requests)
def test_max_requests(self):
class MyTaskSet(TaskSet):
@task
def my_task(self):
self.client.get("/ultra_fast")
class MyLocust(HttpLocust):
host = "http://127.0.0.1:%i" % self.port
task_set = MyTaskSet
min_wait = 1
max_wait = 1
try:
from locust.exception import StopLocust
global_stats.clear_all()
global_stats.max_requests = 2
l = MyLocust()
self.assertRaises(StopLocust, lambda: l.task_set(l).run())
self.assertEqual(2, global_stats.num_requests)
global_stats.clear_all()
global_stats.max_requests = 2
self.assertEqual(0, global_stats.num_requests)
l.run()
self.assertEqual(2, global_stats.num_requests)
finally:
global_stats.clear_all()
global_stats.max_requests = None
def test_max_requests_failed_requests(self):
class MyTaskSet(TaskSet):
@task
def my_task(self):
self.client.get("/ultra_fast")
self.client.get("/fail")
self.client.get("/fail")
class MyLocust(HttpLocust):
host = "http://127.0.0.1:%i" % self.port
task_set = MyTaskSet
min_wait = 1
max_wait = 1
try:
from locust.exception import StopLocust
global_stats.clear_all()
global_stats.max_requests = 3
l = MyLocust()
self.assertRaises(StopLocust, lambda: l.task_set(l).run())
self.assertEqual(1, global_stats.num_requests)
self.assertEqual(2, global_stats.num_failures)
global_stats.clear_all()
global_stats.max_requests = 2
self.assertEqual(0, global_stats.num_requests)
self.assertEqual(0, global_stats.num_failures)
l.run()
self.assertEqual(1, global_stats.num_requests)
self.assertEqual(1, global_stats.num_failures)
finally:
global_stats.clear_all()
global_stats.max_requests = None
class MyTaskSet(TaskSet):
@task(75)
def root_task(self):
pass
@task(25)
class MySubTaskSet(TaskSet):
@task
def task1(self):
pass
@task
def task2(self):
pass
class TestInspectLocust(unittest.TestCase):
def test_get_task_ratio_dict_relative(self):
ratio = get_task_ratio_dict([MyTaskSet])
self.assertEqual(1.0, ratio["MyTaskSet"]["ratio"])
self.assertEqual(0.75, ratio["MyTaskSet"]["tasks"]["root_task"]["ratio"])
self.assertEqual(0.25, ratio["MyTaskSet"]["tasks"]["MySubTaskSet"]["ratio"])
self.assertEqual(0.5, ratio["MyTaskSet"]["tasks"]["MySubTaskSet"]["tasks"]["task1"]["ratio"])
self.assertEqual(0.5, ratio["MyTaskSet"]["tasks"]["MySubTaskSet"]["tasks"]["task2"]["ratio"])
def test_get_task_ratio_dict_total(self):
ratio = get_task_ratio_dict([MyTaskSet], total=True)
self.assertEqual(1.0, ratio["MyTaskSet"]["ratio"])
self.assertEqual(0.75, ratio["MyTaskSet"]["tasks"]["root_task"]["ratio"])
self.assertEqual(0.25, ratio["MyTaskSet"]["tasks"]["MySubTaskSet"]["ratio"])
self.assertEqual(0.125, ratio["MyTaskSet"]["tasks"]["MySubTaskSet"]["tasks"]["task1"]["ratio"])
self.assertEqual(0.125, ratio["MyTaskSet"]["tasks"]["MySubTaskSet"]["tasks"]["task2"]["ratio"])
|
|
from gi.repository import Gtk, Gdk, GLib
import cairo
import math
import shm
import user_config as conf
import pickle
from shm import navigation_desires
from var_util import SmoothVar
from misc.log import with_logging
MAX_ZOOM = 8
MIN_ZOOM = 0.3
REFRESH = 1./25 #effectively sets the draw framerate
@with_logging
class Canvas(Gtk.DrawingArea):
def __init__ (self):
Gtk.DrawingArea.__init__(self)
self.connect('draw', self._do_expose)
self.set_events(Gdk.EventMask.BUTTON_PRESS_MASK)
self.connect('button-press-event', self.on_button_press)
self.follow = False
self.followHeading = False
self.killed = False
self.last_pos = [self.sub_pos()]
self.refresh_matrix()
self.smooth_zoom = None
self.smooth_rot = None
self.smooth_pan = None
self.refresh()
self.i = 0
def on_button_press(self, widget, event):
xx, yx, xy, yy, x0, y0 = self.grid_mat
x, y = event.x, event.y
dx, dy = x0 - x, y - y0 + 17 #Weird offset
dx_prime, dy_prime = xx*dx + xy*dy, yx*dx + yy*dy
navigation_desires.north.set(dy_prime/conf.MAGNIFY)
navigation_desires.east.set(dx_prime/conf.MAGNIFY)
def _do_expose(self, widget, cr):
''' This method is called internally, if you want to redraw the canvas,
call self.redraw()
Every draw, this method is called, consider it the starting point.
'''
allocation = self.get_allocation()
self.width = allocation.width
self.height = allocation.height
# This width / height is the current size of the canvas
# If the canvas is resized, these values will changed (and
# this do_expose method will be triggered)
self.set_transform(cr)
self.draw_grid(cr)
self.draw_sub(cr)
# Disabled because the needed shm variables are now gone :(
#self.draw_tags(cr)
self.i += 1
def redraw(self):
self.queue_draw()
def refresh(self):
''' Run to periodically refresh the display '''
self.redraw()
if not self.killed:
GLib.timeout_add(int(REFRESH*1000), self.refresh)
def kill(self):
self.killed = True
def refresh_matrix(self):
self.scale = 1.0
self.grid_mat = cairo.Matrix()
self.pan_tup = (0,0)
def set_transform(self, cr):
''' Handles affine transformations for the canvas matrix
Shifts the display to the center of the display and adjusts for
any user pan
'''
xx, yx, xy, yy, x0, y0 = self.grid_mat
x0 = self.width/2 + self.pan_tup[0]
y0 = self.height/2 + self.pan_tup[1]
self.grid_mat = cairo.Matrix(xx, yx, xy, yy, x0, y0)
self.set_follow(cr)
def _ticker(self):
''' Ticks periodically while there exists some variable that needs
ticking '''
if self.smooth_zoom is not None:
if self.smooth_zoom.running():
self.smooth_zoom.tick()
self.zoom(None)
GLib.timeout_add(int(conf.PERIOD*1000), self._ticker)
else:
self.smooth_zoom = None
if self.smooth_rot is not None:
if self.smooth_rot.running():
self.smooth_rot.tick()
self.rotate(None)
GLib.timeout_add(int(conf.PERIOD*1000), self._ticker)
else:
self.smooth_rot = None
if self.smooth_pan is not None:
if self.smooth_pan.running():
self.smooth_pan.tick()
self.pan(None, None)
GLib.timeout_add(int(conf.PERIOD*1000), self._ticker)
else:
self.smooth_pan = None
def set_follow(self, cr):
''' Changes display matrix if sub is being followed '''
if self.follow:
sub_head = self.sub_head()
xx, yx, xy, yy, x0, y0 = self.grid_mat
rot_mat = cairo.Matrix(xx, yx, xy, yy, self.width/2, self.height/2)
if self.followHeading:
rot_mat.rotate(sub_head*math.pi/180.)
x,y = self.sub_pos()
rot_mat.translate(-x, -y)
cr.set_matrix(rot_mat)
else:
cr.set_matrix(self.grid_mat)
def pan(self, x, y):
''' Shift the screen by x, y '''
if self.follow:
return
if conf.SMOOTHING:
if x is None and y is None:
d = self.smooth_pan.value
(x, y) = self.smooth_pan.initial
else:
d = 1.
self.smooth_pan = SmoothVar(d/SmoothVar.steps, thresh=0.2/SmoothVar.steps, initial=(x,y))
self.smooth_pan.setDesire(0)
self.smooth_pan.setVelocity(0.02)
factor = self.smooth_pan.value
self._ticker()
else:
d = 1
self.pan_tup = (self.pan_tup[0]+x*d,self.pan_tup[1]+y*d)
self.redraw()
def center(self):
self.refresh_matrix()
self.redraw()
def zoom(self, factor):
if conf.SMOOTHING:
if factor is None:
factor = self.smooth_zoom.value
if self.smooth_zoom.value > 1 and self.scale > self.smooth_zoom.final \
or self.smooth_zoom.value < 1 and self.scale < self.smooth_zoom.final:
self.smooth_zoom = None
else:
self.smooth_zoom = SmoothVar(1-(1-factor)/SmoothVar.steps)
self.smooth_zoom.setDesire(1)
self.smooth_zoom.setFinal(self.scale*factor)
factor = self.smooth_zoom.value
self._ticker()
if not (self.scale > MAX_ZOOM and factor > 1 or \
self.scale < MIN_ZOOM and factor < 1):
self.grid_mat.scale(factor, factor)
self.scale *= factor
self.redraw()
else:
self.smooth_zoom = None
self.log.info("Restraint is a virtue.")
def rotate(self, factor):
if conf.SMOOTHING:
if factor is None:
factor = self.smooth_rot.value
else:
self.smooth_rot = SmoothVar(factor/SmoothVar.steps, thresh=0.1/SmoothVar.steps, initial=0)
self.smooth_rot.setDesire(0)
factor = self.smooth_rot.value
self._ticker()
if not self.follow:
self.grid_mat.rotate(factor)
self.redraw()
def follow_sub(self, followHeading):
self.followHeading = followHeading
self.follow = not self.follow
if not self.follow:
self.center()
self.redraw()
def reset_path(self):
self.last_pos = []
self.redraw()
def draw_grid(self, cr):
''' Draws many lines for the grid '''
cr.set_source_rgba(*conf.GRID_COLOR)
num = conf.GRID_LINES
space = conf.GRID_SPACE * conf.MAGNIFY
cr.set_line_width(2/self.scale)
for i in xrange(-num, num+1):
cr.move_to(i*space, -space*num)
cr.line_to(i*space, space*num)
for i in xrange(-num, num+1):
cr.move_to(-space*num, i*space)
cr.line_to(space*num, i*space)
cr.stroke()
cr.save()
def sub_pos(self):
''' Get sub's translated position, with respect to canvas
MAGNIFY scales the shm variables to reasonable pixel distances'''
x = conf.MAGNIFY*shm.kalman.north.get()
y = conf.MAGNIFY*shm.kalman.east.get()
return (-y, x)
def sub_head(self):
''' Get sub's translated heading '''
return 180-shm.kalman.heading.get()
def draw_sub(self, cr):
# TODO: show scale... meter/pixel ratio?
''' Draw the sub at position x, y, which come from shm
1. translates matrix to -x, -y
2. Rotates to match heading
3. Un-rotates, and translates back to 0,0 '''
# Draws path
if len(self.last_pos) > 0:
cr.push_group()
cr.move_to(*self.last_pos[0])
cr.set_line_width(2/self.scale)
for p in self.last_pos:
cr.line_to(*p)
if len(self.last_pos) > 1:
cr.line_to(*self.sub_pos())
cr.stroke()
path_pattern = cr.pop_group()
cr.set_source_rgba(*conf.PATH_COLOR)
cr.mask(path_pattern)
cr.set_source_rgba(*conf.SUB_COLOR)
# Draws the sub triangle figure
x, y = self.sub_pos()
cr.push_group()
cr.move_to(x, y)
cr.line_to(x-conf.SUB_SIZE/2, y-conf.SUB_SIZE)
cr.line_to(x, y+conf.SUB_SIZE)
cr.line_to(x+conf.SUB_SIZE/2, y-conf.SUB_SIZE)
cr.line_to(x-conf.SUB_SIZE/2, y-conf.SUB_SIZE)
cr.close_path()
cr.fill()
sub_pattern = cr.pop_group()
# Rotates/translates and plots the sub
angle = self.sub_head()*math.pi/180.
sub_mat = sub_pattern.get_matrix()
rot_mat = cairo.Matrix.init_rotate(angle + math.pi)
sub_mat = cairo.Matrix.multiply(rot_mat, sub_mat)
phi = -math.atan2(y, x)
theta = math.pi - angle
r = math.sqrt(x*x + y*y)
dx = r * math.cos(phi-theta) - x
dy = r * math.sin(-phi+theta) - y
sub_mat.translate(dx, dy)
xx, yx, xy, yy, x0, y0 = sub_mat
sub_mat.translate(0, 0)
sub_pattern.set_matrix(sub_mat)
cr.set_source_rgba(*conf.SUB_COLOR)
cr.mask(sub_pattern)
'''
# pngs??
sub_surface = cairo.ImageSurface.create_from_png('trogdor.png')
sub = cairo.SurfacePattern(sub_surface)
sub.set_matrix(sub_mat)
cr.mask(sub)
'''
self.add_point(self.sub_pos())
def add_point(self, p):
''' Adds a point to the path only if it is within the path resolution
Also trims the path if needed '''
if len(self.last_pos) == 0:
self.last_pos.append(p)
elif self.dist(self.last_pos[-1],p) > 1./conf.PATH_RES:
self.last_pos.append(p)
if conf.PATH_LEN >= 0 and len(self.last_pos) > conf.PATH_LEN:
del self.last_pos[0]
def dist(self, a,b):
return math.sqrt((a[0]-b[0])**2+(a[1]-b[1])**2)
def draw_tags(self, cr):
points_to_draw = []
# Add tag
points_to_draw.append(((shm.mission.last_forward.get(), shm.mission.last_sway.get()), conf.TAG_COLOR))
# Add layout points
arx = shm.layout.state.get()
if len(arx) > 0:
thg = pickle.loads(arx)
n_offset = thg[0]
e_offset = thg[1]
ar = thg[2]
npts = map(lambda x: x - n_offset, ar[0::2])
epts = map(lambda x: x - e_offset, ar[1::2])
pts = zip(npts, epts)
for pt in pts:
points_to_draw.append((pt, conf.ELEMENT_COLOR))
for pt, clr in points_to_draw:
cr.set_source_rgba(*clr)
y = pt[0] * conf.MAGNIFY #east
x = -pt[1] * conf.MAGNIFY #north
cr.rectangle(x-conf.TAG_SIZE/2.,y-conf.TAG_SIZE/2., conf.TAG_SIZE, conf.TAG_SIZE)
cr.fill()
|
|
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Parameter estimation by iterated filtering."""
import collections
import contextlib
import functools
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import invert
from tensorflow_probability.python.distributions import distribution
from tensorflow_probability.python.distributions import independent
from tensorflow_probability.python.distributions import joint_distribution_named
from tensorflow_probability.python.distributions import joint_distribution_sequential
from tensorflow_probability.python.distributions import normal
from tensorflow_probability.python.distributions import transformed_distribution
from tensorflow_probability.python.experimental.mcmc import infer_trajectories
from tensorflow_probability.python.internal import distribution_util as dist_util
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.internal import tensorshape_util
__all__ = [
'geometric_cooling_schedule',
'IteratedFilter'
]
JAX_MODE = False # Overwritten by rewrite script.
NUMPY_MODE = False
# Utility to avoid breakage when passed-in structures are mutated externally.
_copy_structure = lambda struct: tf.nest.map_structure(lambda x: x, struct)
ParametersAndState = collections.namedtuple('ParametersAndState',
['unconstrained_parameters',
'state'])
def geometric_cooling_schedule(cooling_fraction_per_k_iterations, k=1.):
"""Defines a cooling schedule following a geometric sequence.
This returns a function `f` such that
```python
f(iteration) = cooling_fraction_per_k_iterations**(iteration / k)
```
Args:
cooling_fraction_per_k_iterations: float `Tensor` ratio by which the
original value should be scaled once `k` iterations have been completed.
k: int `Tensor` number of iterations used to define the schedule.
Returns:
f: Python `callable` representing the cooling schedule.
"""
cooling_fraction_per_k_iterations = tf.convert_to_tensor(
cooling_fraction_per_k_iterations,
dtype_hint=tf.float32,
name='cooling_fraction_per_k_iterations')
dtype = cooling_fraction_per_k_iterations.dtype
k = tf.cast(k, dtype=dtype, name='k')
def f(iteration):
iteration = tf.cast(iteration, dtype=dtype, name='iteration')
return cooling_fraction_per_k_iterations ** (iteration / k)
return f
class DeterministicEmpirical(distribution.Distribution):
"""Dummy 'proposal' distribution that just returns samples we pass in."""
def __init__(self, values_with_sample_dim, batch_ndims=0, validate_args=False,
name=None):
"""Initializes an empirical distribution with a list of samples.
Args:
values_with_sample_dim: nested structure of `Tensor`s, each of shape
prefixed by `[num_samples, B1, ..., Bn]`, where `num_samples` as well as
`B1, ..., Bn` are batch dimensions shared across all `Tensor`s.
batch_ndims: optional scalar int `Tensor`, or structure matching
`values_with_sample_dim` of scalar int `Tensor`s, specifying the number
of batch dimensions. Used to determine the batch and event shapes of the
distribution.
Default value: `0`.
validate_args: Python `bool` indicating whether to perform runtime checks
that may have performance cost.
Default value: `False`.
name: Python `str` name for ops created by this distribution.
"""
parameters = dict(locals())
with tf.name_scope(name or 'DeterministicEmpirical') as name:
# Ensure we don't break if the passed-in structures are externally
# mutated.
values_with_sample_dim = _copy_structure(values_with_sample_dim)
batch_ndims = _copy_structure(batch_ndims)
# Prevent tf.Module from wrapping passed-in values, because the
# wrapper breaks JointDistributionNamed (and maybe other JDs). Instead, we
# save a separate ref to the input that is used only by tf.Module
# tracking.
self._values_for_tracking = values_with_sample_dim
self._values_with_sample_dim = self._no_dependency(values_with_sample_dim)
if not tf.nest.is_nested(batch_ndims):
batch_ndims = tf.nest.map_structure(
lambda _: batch_ndims, values_with_sample_dim)
self._batch_ndims = batch_ndims
self._max_num_samples = ps.reduce_min(
[ps.size0(x) for x in tf.nest.flatten(values_with_sample_dim)])
super(DeterministicEmpirical, self).__init__(
dtype=tf.nest.map_structure(
lambda x: x.dtype, self.values_with_sample_dim),
reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=True,
name=name)
self._parameters = self._no_dependency(parameters)
@property
def batch_ndims(self):
return _copy_structure(self._batch_ndims)
@property
def max_num_samples(self):
return self._max_num_samples
@property
def values_with_sample_dim(self):
return _copy_structure(self._values_with_sample_dim)
def _event_shape(self):
return tf.nest.map_structure(
lambda x, nd: tf.TensorShape(x.shape[1 + nd:]),
self.values_with_sample_dim,
self.batch_ndims)
def _event_shape_tensor(self):
return tf.nest.map_structure(
lambda x, nd: tf.shape(x)[1 + nd:],
self.values_with_sample_dim,
self.batch_ndims)
def _batch_shape(self):
return tf.nest.map_structure(
lambda x, nd: tf.TensorShape(x.shape[1 : 1 + nd]),
self.values_with_sample_dim,
self.batch_ndims)
def _batch_shape_tensor(self):
return tf.nest.map_structure(
lambda x, nd: tf.shape(x)[1 : 1 + nd],
self.values_with_sample_dim,
self.batch_ndims)
# TODO(b/152797117): Override _sample_n, once it supports joint distributions.
def sample(self, sample_shape=(), seed=None, name=None):
with tf.name_scope(name or 'sample'):
# Grab the required number of values from the provided tensors.
sample_shape = dist_util.expand_to_vector(sample_shape)
n = ps.cast(ps.reduce_prod(sample_shape), dtype=tf.int32)
# Check that we're not trying to draw too many samples.
assertions = []
will_overflow_ = tf.get_static_value(n > self.max_num_samples)
if will_overflow_:
raise ValueError('Trying to draw {} samples from a '
'`DeterministicEmpirical` instance for which only {} '
'samples were provided.'.format(
tf.get_static_value(n),
tf.get_static_value(self.max_num_samples)))
elif (will_overflow_ is None # Couldn't determine statically.
and self.validate_args):
assertions.append(
tf.debugging.assert_less_equal(
n, self.max_num_samples, message='Number of samples to draw '
'from a `DeterministicEmpirical` instance must not exceed the '
'number provided at construction.'))
# Extract the appropriate number of sampled values.
with tf.control_dependencies(assertions):
sampled = tf.nest.map_structure(
lambda x: x[:n, ...], self.values_with_sample_dim)
# Reshape the values to the appropriate sample shape.
return tf.nest.map_structure(
lambda x: tf.reshape(x, # pylint: disable=g-long-lambda
ps.concat([ps.cast(sample_shape, tf.int32),
ps.cast(ps.shape(x)[1:], tf.int32)],
axis=0)),
sampled)
def _prob(self, x):
flat_values = tf.nest.flatten(self.values_with_sample_dim)
return tf.cast(
tf.reduce_all([
tf.equal(a, b[:ps.size0(a)])
for (a, b) in zip(tf.nest.flatten(x), flat_values)]),
dtype=flat_values[0].dtype)
def _maybe_build_joint_distribution(structure_of_distributions):
"""Turns a (potentially nested) structure of dists into a single dist."""
# Base case: if we already have a Distribution, return it.
if dist_util.is_distribution_instance(structure_of_distributions):
return structure_of_distributions
# Otherwise, recursively convert all interior nested structures into JDs.
outer_structure = tf.nest.map_structure(
_maybe_build_joint_distribution,
structure_of_distributions)
if (hasattr(outer_structure, '_asdict') or
isinstance(outer_structure, collections.abc.Mapping)):
return joint_distribution_named.JointDistributionNamed(outer_structure)
else:
return joint_distribution_sequential.JointDistributionSequential(
outer_structure)
def augment_transition_fn_with_parameters(parameter_prior,
parameterized_transition_fn,
parameter_constraining_bijector):
"""Wraps a transition fn on states to act on `ParametersAndState` tuples."""
def params_and_state_transition_fn(step,
params_and_state,
perturbation_scale,
**kwargs):
"""Transition function operating on a `ParamsAndState` namedtuple."""
# Extract the state, to pass through to the observation fn.
unconstrained_params, state = params_and_state
if 'state_history' in kwargs:
kwargs['state_history'] = kwargs['state_history'].state
# Perturb each (unconstrained) parameter with normally-distributed noise.
if not tf.nest.is_nested(perturbation_scale):
perturbation_scale = tf.nest.map_structure(
lambda x: tf.convert_to_tensor(perturbation_scale, # pylint: disable=g-long-lambda
name='perturbation_scale',
dtype=x.dtype),
unconstrained_params)
perturbed_unconstrained_parameter_dists = tf.nest.map_structure(
lambda x, p, s: independent.Independent( # pylint: disable=g-long-lambda
normal.Normal(loc=x, scale=p),
reinterpreted_batch_ndims=ps.rank_from_shape(s)),
unconstrained_params,
perturbation_scale,
parameter_prior.event_shape_tensor())
# For the joint transition, pass the perturbed parameters
# into the original transition fn (after pushing them into constrained
# space).
return joint_distribution_named.JointDistributionNamed(
ParametersAndState(
unconstrained_parameters=_maybe_build_joint_distribution(
perturbed_unconstrained_parameter_dists),
state=lambda unconstrained_parameters: ( # pylint: disable=g-long-lambda
parameterized_transition_fn(
step,
state,
parameters=parameter_constraining_bijector.forward(
unconstrained_parameters),
**kwargs))))
return params_and_state_transition_fn
def augment_observation_fn_with_parameters(parameterized_observation_fn,
parameter_constraining_bijector):
"""Augments an observation fn to take `ParametersAndState` namedtuples."""
def observation_from_params_and_state_fn(step,
params_and_state,
**kwargs):
# Extract the state, to pass through to the observation fn.
unconstrained_parameters, state = params_and_state
if 'state_history' in kwargs:
_, kwargs['state_history'] = kwargs['state_history']
return parameterized_observation_fn(
step,
state,
parameters=parameter_constraining_bijector.forward(
unconstrained_parameters),
**kwargs)
return observation_from_params_and_state_fn
def joint_prior_on_parameters_and_state(parameter_prior,
parameterized_initial_state_prior_fn,
parameter_constraining_bijector,
prior_is_constrained=True):
"""Constructs a joint dist. from p(parameters) and p(state | parameters)."""
if prior_is_constrained:
parameter_prior = transformed_distribution.TransformedDistribution(
parameter_prior,
invert.Invert(parameter_constraining_bijector),
name='unconstrained_parameter_prior')
return joint_distribution_named.JointDistributionNamed(
ParametersAndState(
unconstrained_parameters=parameter_prior,
state=lambda unconstrained_parameters: ( # pylint: disable=g-long-lambda
parameterized_initial_state_prior_fn(
parameter_constraining_bijector.forward(
unconstrained_parameters)))))
class IteratedFilter(object):
"""A model augmented with parameter perturbations for iterated filtering."""
def __init__(self,
parameter_prior,
parameterized_initial_state_prior_fn,
parameterized_transition_fn,
parameterized_observation_fn,
parameterized_initial_state_proposal_fn=None,
parameterized_proposal_fn=None,
parameter_constraining_bijector=None,
name=None):
"""Builds an iterated filter for parameter estimation in sequential models.
Iterated filtering is a parameter estimation method in which parameters
are included in an augmented state space, with dynamics that introduce
parameter perturbations, and a filtering
algorithm such as particle filtering is run several times with perturbations
of decreasing size. This class implements the IF2 algorithm of
[Ionides et al., 2015][1], for which, under appropriate conditions
(including a uniform prior) the final parameter distribution approaches a
point mass at the maximum likelihood estimate. If a non-uniform prior is
provided, the final parameter distribution will (under appropriate
conditions) approach a point mass at the maximum a posteriori (MAP) value.
This class augments the state space of a sequential model to include
parameter perturbations, and provides utilities to run particle filtering
on that augmented model. Alternately, the augmented components may be passed
directly into a filtering algorithm of the user's choice.
Args:
parameter_prior: prior `tfd.Distribution` over parameters (may be a joint
distribution).
parameterized_initial_state_prior_fn: `callable` with signature
`initial_state_prior = parameterized_initial_state_prior_fn(parameters)`
where `parameters` has the form of a sample from `parameter_prior`,
and `initial_state_prior` is a distribution over the initial state.
parameterized_transition_fn: `callable` with signature
`next_state_dist = parameterized_transition_fn(
step, state, parameters, **kwargs)`.
parameterized_observation_fn: `callable` with signature
`observation_dist = parameterized_observation_fn(
step, state, parameters, **kwargs)`.
parameterized_initial_state_proposal_fn: optional `callable` with
signature `initial_state_proposal =
parameterized_initial_state_proposal_fn(parameters)` where `parameters`
has the form of a sample from `parameter_prior`, and
`initial_state_proposal` is a distribution over the initial state.
parameterized_proposal_fn: optional `callable` with signature
`next_state_dist = parameterized_transition_fn(
step, state, parameters, **kwargs)`.
Default value: `None`.
parameter_constraining_bijector: optional `tfb.Bijector` instance
such that `parameter_constraining_bijector.forward(x)` returns valid
parameters for any real-valued `x` of the same structure and shape
as `parameters`. If `None`, the default bijector of the provided
`parameter_prior` will be used.
Default value: `None`.
name: `str` name for ops constructed by this object.
Default value: `iterated_filter`.
#### Example
We'll walk through applying iterated filtering to a toy
Susceptible-Infected-Recovered (SIR) model, a [compartmental model](
https://en.wikipedia.org/wiki/Compartmental_models_in_epidemiology#The_SIR_model)
of infectious disease. Note that the model we use here is extremely
simplified and is intended as a pedagogical example; it should not be
interpreted to describe disease spread in the real world.
We begin by specifying a prior distribution over the parameters to be
inferred, thus defining the structure of the parameter space and the support
of the parameters (which will imply a default constraining bijector). Here
we'll use uniform priors over ranges that we expect to contain the
parameters:
```python
parameter_prior = tfd.JointDistributionNamed({
'infection_rate': tfd.Uniform(low=0., high=3.),
'recovery_rate': tfd.Uniform(low=0., high=3.),
})
```
The model specification itself is identical to that used by
`tfp.experimental.mcmc.infer_trajectories`, except that each component
accepts an additional `parameters` keyword argument. We start by specifying
a parameterized prior on initial states. In this case, our state
includes the current number of susceptible and infected individuals
(the third compartment, recovered individuals, is implicitly defined
to include the remaining population). We'll also include, as auxiliary
variables, the daily counts of new infections and new recoveries; these
will help ensure that people shift consistently across compartments.
```python
population_size = 1000
initial_state_prior_fn = lambda parameters: tfd.JointDistributionNamed({
'new_infections': tfd.Poisson(parameters['infection_rate']),
'new_recoveries': tfd.Deterministic(
tf.broadcast_to(0., tf.shape(parameters['recovery_rate']))),
'susceptible': (lambda new_infections:
tfd.Deterministic(population_size - new_infections)),
'infected': (lambda new_infections:
tfd.Deterministic(new_infections))})
```
**Note**: the state prior must have the same batch shape as the
passed-in parameters; equivalently, it must sample a full state for each
parameter particle. If any part of the state prior does not depend
on the parameters, you must manually ensure that it has the appropriate
batch shape. For example, in the definition of `new_recoveries` above,
applying `broadcast_to` with the shape of a parameter ensures that
the batch shape is maintained.
Next, we specify a transition model. This takes the state at the
previous day, along with parameters, and returns a distribution
over the state for the current day.
```python
def parameterized_infection_dynamics(_, previous_state, parameters):
new_infections = tfd.Poisson(
parameters['infection_rate'] * previous_state['infected'] *
previous_state['susceptible'] / population_size)
new_recoveries = tfd.Poisson(
previous_state['infected'] * parameters['recovery_rate'])
return tfd.JointDistributionNamed({
'new_infections': new_infections,
'new_recoveries': new_recoveries,
'susceptible': lambda new_infections: tfd.Deterministic(
tf.maximum(0., previous_state['susceptible'] - new_infections)),
'infected': lambda new_infections, new_recoveries: tfd.Deterministic(
tf.maximum(0.,
(previous_state['infected'] +
new_infections - new_recoveries)))})
```
Finally, assume that every day we get to observe noisy counts of new
infections and recoveries.
```python
def parameterized_infection_observations(_, state, parameters):
del parameters # Not used.
return tfd.JointDistributionNamed({
'new_infections': tfd.Poisson(state['new_infections'] + 0.1),
'new_recoveries': tfd.Poisson(state['new_recoveries'] + 0.1)})
```
Combining these components, an `IteratedFilter` augments
the state space to include parameters that may change over time.
```python
iterated_filter = tfp.experimental.sequential.IteratedFilter(
parameter_prior=parameter_prior,
parameterized_initial_state_prior_fn=initial_state_prior_fn,
parameterized_transition_fn=parameterized_infection_dynamics,
parameterized_observation_fn=parameterized_infection_observations)
```
We may then run the filter to estimate parameters from a series
of observations:
```python
# Simulated with `infection_rate=1.2` and `recovery_rate=0.1`.
observed_values = {
'new_infections': tf.convert_to_tensor([
2., 7., 14., 24., 45., 93., 160., 228., 252., 158., 17.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]),
'new_recoveries': tf.convert_to_tensor([
0., 0., 3., 4., 3., 8., 12., 31., 49., 73., 85., 65., 71.,
58., 42., 65., 36., 31., 32., 27., 31., 20., 19., 19., 14., 27.])
}
parameter_particles = iterated_filter.estimate_parameters(
observations=observed_values,
num_iterations=20,
num_particles=4096,
initial_perturbation_scale=1.0,
cooling_schedule=(
tfp.experimental.sequential.geometric_cooling_schedule(
0.001, k=20)),
seed=test_util.test_seed())
print('Mean of parameter particles from final iteration: {}'.format(
tf.nest.map_structure(lambda x: tf.reduce_mean(x[-1], axis=0),
parameter_particles)))
print('Standard deviation of parameter particles from '
'final iteration: {}'.format(
tf.nest.map_structure(lambda x: tf.math.reduce_std(x[-1], axis=0),
parameter_particles)))
```
For more control, we could alternately choose to run filtering iterations
on the augmented model manually, using the filter of our choice.
For example, manually invoking `infer_trajectories` would allow us
to inspect the parameter and state values at all timesteps, and their
corresponding log-probabilities:
```python
trajectories, lps = tfp.experimental.mcmc.infer_trajectories(
observations=observations,
initial_state_prior=iterated_filter.joint_initial_state_prior,
transition_fn=functools.partial(
iterated_filter.joint_transition_fn,
perturbation_scale=perturbation_scale),
observation_fn=iterated_filter.joint_observation_fn,
proposal_fn=iterated_filter.joint_proposal_fn,
initial_state_proposal=iterated_filter.joint_initial_state_proposal(
initial_unconstrained_parameters),
num_particles=4096)
```
#### References:
[1] Edward L. Ionides, Dao Nguyen, Yves Atchade, Stilian Stoev, and Aaron A.
King. Inference for dynamic and latent variable models via iterated,
perturbed Bayes maps. _Proceedings of the National Academy of Sciences_
112, no. 3: 719-724, 2015.
https://www.pnas.org/content/pnas/112/3/719.full.pdf
"""
name = name or 'IteratedFilter'
with tf.name_scope(name):
self._parameter_prior = parameter_prior
self._parameterized_initial_state_prior_fn = (
parameterized_initial_state_prior_fn)
if parameter_constraining_bijector is None:
parameter_constraining_bijector = (
parameter_prior.experimental_default_event_space_bijector())
self._parameter_constraining_bijector = parameter_constraining_bijector
# Augment the prior to include both parameters and states.
self._joint_initial_state_prior = joint_prior_on_parameters_and_state(
parameter_prior,
parameterized_initial_state_prior_fn,
parameter_constraining_bijector,
prior_is_constrained=True)
# Check that prior samples have a consistent number of particles.
# TODO(davmre): remove the need for dummy shape dependencies,
# and this check, by using `JointDistributionNamedAutoBatched` with
# auto-vectorization enabled in `joint_prior_on_parameters_and_state`.
num_particles_canary = 13
canary_seed = samplers.sanitize_seed([0, 1])
def _get_shape_1(x):
if hasattr(x, 'state'):
x = x.state
return tf.TensorShape(x.shape[1:2])
prior_static_sample_shapes = tf.nest.map_structure(
# Sample shape [0, num_particles_canary] particles (size will be zero)
# then trim off the leading 0 and (possibly) any event shape.
# We expect shape [num_particles_canary] to remain.
_get_shape_1,
self._joint_initial_state_prior.sample([0, num_particles_canary],
seed=canary_seed))
if not all([
tensorshape_util.is_compatible_with(s[:1], [num_particles_canary])
for s in tf.nest.flatten(prior_static_sample_shapes)
]):
raise ValueError(
'The specified prior does not generate consistent '
'shapes when sampled. Please verify that all parts of '
'`initial_state_prior_fn` have batch shape matching '
'that of the parameters. This may require creating '
'"dummy" dependencies on parameters; for example: '
'`tf.broadcast_to(value, tf.shape(parameter))`. (in a '
f'test sample with {num_particles_canary} particles, we expected '
'all) values to have shape compatible with '
f'[{num_particles_canary}, ...]; '
f'saw shapes {prior_static_sample_shapes})')
# Augment the transition and observation fns to cover both
# parameters and states.
self._joint_transition_fn = augment_transition_fn_with_parameters(
parameter_prior,
parameterized_transition_fn,
parameter_constraining_bijector)
self._joint_observation_fn = augment_observation_fn_with_parameters(
parameterized_observation_fn,
parameter_constraining_bijector)
# If given a proposal for the initial state, augment it into a joint
# proposal over parameters and states.
joint_initial_state_proposal = None
if parameterized_initial_state_proposal_fn:
joint_initial_state_proposal = joint_prior_on_parameters_and_state(
parameter_prior,
parameterized_initial_state_proposal_fn,
parameter_constraining_bijector)
else:
parameterized_initial_state_proposal_fn = (
parameterized_initial_state_prior_fn)
self._joint_initial_state_proposal = joint_initial_state_proposal
self._parameterized_initial_state_proposal_fn = (
parameterized_initial_state_proposal_fn)
# If given a conditional proposal fn (for non-initial states), augment
# it to be joint over states and parameters.
self._joint_proposal_fn = None
if parameterized_proposal_fn:
self._joint_proposal_fn = augment_transition_fn_with_parameters(
parameter_prior,
parameterized_proposal_fn,
parameter_constraining_bijector)
self._batch_ndims = tf.nest.map_structure(
ps.rank_from_shape,
parameter_prior.batch_shape_tensor())
self._name = name
@property
def batch_ndims(self):
return _copy_structure(self._batch_ndims)
@property
def joint_initial_state_prior(self):
"""Initial state prior for the joint (augmented) model."""
return self._joint_initial_state_prior
def joint_initial_state_proposal(self, initial_unconstrained_parameters=None):
"""Proposal to initialize the model with given parameter particles."""
if initial_unconstrained_parameters is None:
joint_initial_state_proposal = self._joint_initial_state_proposal
else:
# Hack: DeterministicEmpirical is a fake distribution whose `sample`
# just proposes *exactly* the parameters we pass in.
unconstrained_parameter_proposal = DeterministicEmpirical(
initial_unconstrained_parameters,
batch_ndims=self.batch_ndims)
# Propose initial state conditioned on the parameters.
joint_initial_state_proposal = joint_prior_on_parameters_and_state(
unconstrained_parameter_proposal,
self.parameterized_initial_state_proposal_fn,
parameter_constraining_bijector=(
self.parameter_constraining_bijector),
prior_is_constrained=False)
# May return `None` if no initial proposal or params were specified.
return joint_initial_state_proposal
@property
def joint_transition_fn(self):
"""Transition function for the joint (augmented) model."""
return self._joint_transition_fn
@property
def joint_observation_fn(self):
"""Observation function for the joint (augmented) model."""
return self._joint_observation_fn
@property
def joint_proposal_fn(self):
"""Proposal function for the joint (augmented) model."""
return self._joint_proposal_fn
@property
def name(self):
return self._name
@property
def parameter_constraining_bijector(self):
"""Bijector mapping unconstrained real values into the parameter space."""
return self._parameter_constraining_bijector
@property
def parameterized_initial_state_prior_fn(self):
"""Prior function that was passed in at construction."""
return self._parameterized_initial_state_prior_fn
@property
def parameterized_initial_state_proposal_fn(self):
"""Initial proposal function passed in at construction."""
return self._parameterized_initial_state_proposal_fn
@property
def parameter_prior(self):
"""Prior distribution on parameters passed in at construction."""
return self._parameter_prior
def one_step(self,
observations,
perturbation_scale,
num_particles,
initial_unconstrained_parameters=None,
seed=None,
name=None,
**kwargs):
"""Runs one step of filtering to sharpen parameter estimates.
Args:
observations: observed `Tensor` value(s) on which to condition the
parameter estimate.
perturbation_scale: scalar float `Tensor`, or any structure of float
`Tensor`s broadcasting to the same shape as the unconstrained
parameters, specifying the scale (standard deviation) of Gaussian
perturbations to each parameter at each timestep.
num_particles: scalar int `Tensor` number of particles to use. Must match
the batch dimension of `initial_unconstrained_parameters`, if specified.
initial_unconstrained_parameters: optional structure of `Tensor`s, of
shape matching
`self.joint_initial_state_prior.sample([
num_particles]).unconstrained_parameters`,
used to initialize the filter.
Default value: `None`.
seed: PRNG seed; see `tfp.random.sanitize_seed` for details.
name: `str` name for ops constructed by this method.
**kwargs: additional keyword arguments passed to
`tfp.experimental.mcmc.infer_trajectories`.
Returns:
final_unconstrained_parameters: structure of `Tensor`s matching
`initial_unconstrained_parameters`, containing samples of
unconstrained parameters at the final timestep, as computed by
`self.filter_fn`.
"""
with self._name_scope(name or 'one_step'):
# Run the particle filter.
(unconstrained_parameter_trajectories, _), _ = (
infer_trajectories(
observations=observations,
initial_state_prior=self.joint_initial_state_prior,
transition_fn=functools.partial(
self.joint_transition_fn,
perturbation_scale=perturbation_scale),
observation_fn=self.joint_observation_fn,
proposal_fn=self.joint_proposal_fn,
initial_state_proposal=self.joint_initial_state_proposal(
initial_unconstrained_parameters),
num_particles=num_particles,
seed=seed,
**kwargs))
# Return the parameter estimates from the final step of the trajectory.
return tf.nest.map_structure(
lambda part: part[-1],
unconstrained_parameter_trajectories)
def estimate_parameters(self,
observations,
num_iterations,
num_particles,
initial_perturbation_scale,
cooling_schedule,
seed=None,
name=None,
**kwargs):
"""Runs multiple iterations of filtering following a cooling schedule.
Args:
observations: observed `Tensor` value(s) on which to condition the
parameter estimate.
num_iterations: int `Tensor` number of filtering iterations to run.
num_particles: scalar int `Tensor` number of particles to use.
initial_perturbation_scale: scalar float `Tensor`, or any structure of
float `Tensor`s broadcasting to the same shape as the (unconstrained)
parameters, specifying the scale (standard deviation) of Gaussian
perturbations to each parameter at the first timestep.
cooling_schedule: callable with signature
`cooling_factor = cooling_schedule(iteration)` for `iteration` in
`[0, ..., num_iterations - 1]`. The filter is
invoked with perturbations of scale
`initial_perturbation_scale * cooling_schedule(iteration)`.
seed: PRNG seed; see `tfp.random.sanitize_seed` for details.
name: `str` name for ops constructed by this method.
**kwargs: additional keyword arguments passed to
`tfp.experimental.mcmc.infer_trajectories`.
Returns:
final_parameter_particles: structure of `Tensor`s matching
`self.parameter_prior`, each with batch shape
`[num_iterations, num_particles]`. These are the populations
of particles representing the parameter estimate after each iteration
of filtering.
"""
with self._name_scope(name or 'estimate_parameters'):
step_seed, initial_seed = samplers.split_seed(seed)
initial_perturbation_scale = tf.convert_to_tensor(
initial_perturbation_scale, name='initial_perturbation_scale')
# Get initial parameter particles from the first filtering iteration.
initial_unconstrained_parameters = self.one_step(
observations=observations,
num_particles=num_particles,
perturbation_scale=initial_perturbation_scale,
seed=step_seed,
**kwargs)
# Run the remaining iterations and accumulate the results.
@tf.function(autograph=False)
def loop_body(unconstrained_parameters_seed, cooling_fraction):
unconstrained_parameters, seed = unconstrained_parameters_seed
step_seed, seed = samplers.split_seed(seed)
return (self.one_step(
observations=observations,
num_particles=num_particles,
perturbation_scale=tf.nest.map_structure(
lambda s: cooling_fraction * s, initial_perturbation_scale),
initial_unconstrained_parameters=unconstrained_parameters,
seed=step_seed,
**kwargs), seed)
estimated_unconstrained_parameters, _ = tf.scan(
fn=loop_body,
elems=cooling_schedule(ps.range(1, num_iterations)),
initializer=(initial_unconstrained_parameters, initial_seed))
return self.parameter_constraining_bijector.forward(
estimated_unconstrained_parameters)
@contextlib.contextmanager
def _name_scope(self, name):
with tf.name_scope(self.name):
with tf.name_scope(name) as name_scope:
yield name_scope
|
|
#! /usr/bin/env python
import os
import sys
sadm_path = os.path.join( os.environ['HOME'], 'sadm' )
sys.path.append( sadm_path )
sys.path.append( os.path.join( sadm_path, 'lib' ) )
sys.path.append( os.path.join( sadm_path, 'buildscripts' ) )
import ircbot
import irclib
import command
import sadm_config
import buildinfo
SADM_CHANNEL = '#sadm'
AUTHORIZED_NICKS = [ 'psbot' ]
IRC_SERVER = 'bazaar.example.com' ## TODO make me part of a conf...
IRC_PORT = 6667
IRC_PASSWORD = 'password' # TODO make me part of a conf...
DEFAULT_NICK = 'unnamed-box'
class SadmBot(ircbot.SingleServerIRCBot):
def __init__(self, channels, nickname, server, port=6667, password=None, ssl=False):
if password is None:
ircbot.SingleServerIRCBot.__init__(self,
[(server,port)],
"[%s]" % nickname,
"[%s]" % nickname, ssl=ssl)
else:
ircbot.SingleServerIRCBot.__init__(self,
[(server, port, password)],
"[%s]" % nickname,
"[%s]" % nickname, ssl=ssl)
self.join_channels = channels
self.nickname = nickname
self.authorized_nicks = set()
self.cmds_run = 0
def on_nicknameinuse(self, c, e):
c.nick(c.get_nickname() + "_")
def on_welcome(self, c, e):
for channel in self.join_channels:
c.join(channel)
c.privmsg( SADM_CHANNEL, "#%s is ready" % (self.nickname) )
self.refresh_users()
def on_privmsg(self, c, e):
self.do_cmd(e, e.arguments()[0], self.nickname)
def on_pubmsg(self, c, e):
src_nick = e.source().split('!')[0]
if self.cmds_run % 10 == 0:
self.refresh_users()
a = e.arguments()[0].split(":", 1)
if len(a) > 1 and irclib.irc_lower(a[0]) == irclib.irc_lower(self.connection.get_nickname()):
cmd = a[1].strip()
addressed_to=self.nickname
elif e.arguments()[0].startswith( "%s:" % self.nickname ):
cmd = a[-1].strip()
addressed_to=self.nickname
elif e.arguments()[0].startswith( "all:"):
cmd = a[-1].strip()
addressed_to='all'
elif e.target().lower() == "#%s" % self.nickname.lower() and e.arguments()[0].startswith('~'):
cmd = e.arguments()[0][1:].strip()
addressed_to=self.nickname
else:
cmd = None
addressed_to = None
if cmd is not None and src_nick in self.authorized_nicks:
self.cmds_run += 1
self.do_cmd(e, cmd, addressed_to=addressed_to )
elif cmd is not None:
#c.notice( self.authorized_nicks[0], "%s is bothering me!" % src_nick )
c.privmsg( SADM_CHANNEL, "~lart %s for bothering me" % src_nick )
print( "%s is bothering me!" % src_nick )
def on_dccmsg(self, c, e):
c.privmsg("You said: " + e.arguments()[0])
def on_dccchat(self, c, e):
if len(e.arguments()) != 2:
return
args = e.arguments()[1].split()
if len(args) == 4:
try:
address = irclib.ip_numstr_to_quad(args[2])
port = int(args[3])
except ValueError:
return
self.dcc_connect(address, port)
def run_cmd( self, nick, cmd ):
print( "%s: running %s" % ( nick, cmd ) )
#self.connection.privmsg( SADM_CHANNEL, "%s: Starting cmd %s" % (nick, cmd.split()[0]) )
cmd_obj = command.Command( cmd )
#self.connection.privmsg( SADM_CHANNEL, "%s: %s returned." % ( nick, cmd.split()[0]) )
output = cmd_obj.get_output()
while output is not None:
print( "Recieved: %s" % output )
self.connection.privmsg( '#%s' % self.nickname, "%s: %s" % ( nick, output ) )
output = cmd_obj.get_output()
result = cmd_obj.wait()
self.connection.privmsg( '#%s' % self.nickname, "%s: %s returned: %s" % ( nick, cmd.split()[0], result ) )
return (result, nick, cmd)
def get_sandboxes( self ):
sbs=[]
sbs_folder = sadm_config.Config().sandbox_container_folder
for i in os.listdir( sbs_folder ):
if os.path.isdir(os.path.join( sbs_folder, i )) and i.count('.') >= 2:
sbs.append( i )
return sbs
def do_cmd(self, e, cmd, addressed_to):
nick = irclib.nm_to_n(e.source())
c = self.connection
cmd_segs = cmd.split(' ')
c.privmsg( SADM_CHANNEL, "%s: %s output in #%s" % ( nick, cmd, self.nickname ) )
c.privmsg( '#%s' % self.nickname, "%s: Running %s" % ( nick, cmd ) )
print( "log: %s %s %s %s" % ( repr(e.arguments()), repr(e.eventtype()), repr(e.source()), repr(e.target()) ) )
if cmd == "disconnect":
### Kick everyone out of my private channel ###
for i in self.channels['#%s' % self.nickname].users():
c.kick( '#%s' % self.nickname, i, "Goodbye, the server is leaving the channel." )
self.disconnect()
elif cmd == "die":
### Kick everyone out of my private channel ###
for i in self.channels['#%s' % self.nickname].users():
c.kick( '#%s' % self.nickname, i, "Goodbye, the server is leaving the channel." )
self.die()
elif cmd_segs[0] == "sadm":
if len(cmd_segs) < 2 or cmd_segs[1] in ['config', 'foreach', 'loop']:
c.privmsg( "#%s" % self.nickname, "%s: I don't want to do that." % nick )
c.kick( e.target(), nick, "Go away." )
return
self.run_cmd( nick, cmd + ' --no-color' )
elif cmd_segs[0] == "bzr":
c.privmsg( "#%s" % self.nickname, "%s: bzr cmds are not implemented." % nick )
elif cmd_segs[0] == "sb":
c.privmsg( "#%s" % self.nickname, "%s: sb cmds are not implemented." % nick )
elif cmd_segs[0] == "sysinfo":
c.privmsg( "#%s" % self.nickname, "%s: sysinfo cmds are not implemented." % nick )
elif cmd_segs[0].lower() == "exists":
print( "%s: %s" % ( nick, cmd ) )
if len( cmd_segs ) < 3:
c.notice( nick, "Usage: <machine>: exists [branch|component|type|sandbox] <name>" )
print( "cmd_segs: %s" % repr( cmd_segs ) )
elif cmd_segs[1] == "branch":
branches = set([ x.split('.')[1] for x in self.get_sandboxes() ])
if cmd_segs[-1] in branches:
c.privmsg( "#%s" % self.nickname, "%s: %s has %s" % ( nick, self.nickname, cmd_segs[-1] ) )
elif addressed_to != "all":
c.privmsg( "#%s" % self.nickname, "%s: %s does not have %s" % ( nick, self.nickname, cmd_segs[-1] ) )
elif cmd_segs[1] == "component":
branches = set([ x.split('.')[0] for x in self.get_sandboxes() ])
if cmd_segs[-1] in branches:
c.privmsg( "#%s" % self.nickname, "%s: %s has %s" % ( nick, self.nickname, cmd_segs[-1] ) )
elif addressed_to != "all":
c.privmsg( "#%s" % self.nickname, "%s: %s does not have %s" % ( nick, self.nickname, cmd_segs[-1] ) )
elif cmd_segs[1] == "type":
branches = set([ x.split('.')[2] for x in self.get_sandboxes() ])
if cmd_segs[-1] in branches:
c.privmsg( "#%s" % self.nickname, "%s: %s has %s" % ( nick, self.nickname, cmd_segs[-1] ) )
elif addressed_to != "all":
c.privmsg( "#%s" % self.nickname, "%s: %s does not have %s" % ( nick, self.nickname, cmd_segs[-1] ) )
elif cmd_segs[1] == "sandbox":
sbs = self.get_sandboxes()
if cmd_segs[-1] in sbs:
c.privmsg( "#%s" % self.nickname, "%s: %s has %s" % ( nick, self.nickname, cmd_segs[-1] ) )
elif addressed_to != "all":
c.privmsg( "#%s" % self.nickname, "%s: %s does not have %s" % ( nick, self.nickname, cmd_segs[-1] ) )
elif cmd_segs[0].lower() == "remove":
c.privmsg( "#%s" % self.nickname, "%s: remove is not implemented." )
elif cmd_segs[0].lower() == "list":
print( "%s: %s" % ( nick, cmd ) )
if len( cmd_segs ) < 2:
c.notice( nick, "Usage: <machine>: list [branches|components|types|sandboxes]" )
elif cmd_segs[1] == "branches":
branches = set([ x.split('.')[1] for x in self.get_sandboxes() ])
for branch in branches:
c.privmsg( '#%s' % self.nickname, "%s: %s" % ( nick, branch ) )
if len(branches) == 0:
c.privmsg( '#%s' % self.nickname, "%s: None" % (nick) )
elif cmd_segs[1] == "components":
branches = set([ x.split('.')[0] for x in self.get_sandboxes() ])
for branch in branches:
c.privmsg( '#%s' % self.nickname, "%s: %s" % ( nick, branch ) )
if len(branches) == 0:
c.privmsg( '#%s' % self.nickname, "%s: None" % (nick) )
elif cmd_segs[1] == "types":
branches = set([ x.split('.')[2] for x in self.get_sandboxes() ])
for branch in branches:
c.privmsg( '#%s' % self.nickname, "%s: %s" % ( nick, branch ) )
if len(branches) == 0:
c.privmsg( '#%s' % self.nickname, "%s: None" % (nick) )
elif cmd_segs[1] == "sandboxes":
sbs = self.get_sandboxes()
for s in sbs:
c.privmsg( '#%s' % self.nickname, "%s: %s" % ( nick, s ) )
if len(sbs) == 0:
c.privmsg( '#%s' % self.nickname, "%s: None" % (nick) )
elif cmd == "stats":
c.privmsg(e.target(), "%s: Channels: %s" % (nick, repr(self.join_channels)))
c.privmsg(e.target(), "%s: Authorized: %s" % (nick, repr(self.authorized_nicks)))
c.privmsg(e.target(), "%s: Commands Run: %s" % (nick, repr(self.cmds_run)))
#elif cmd == "dcc":
# dcc = self.dcc_listen()
# c.ctcp("DCC", nick, "CHAT chat %s %d" % (
# irclib.ip_quad_to_numstr(dcc.localaddress),
# dcc.localport))
elif cmd_segs[0].lower() == "authorize" and cmd_segs[-1] != "authorize":
self.authorized_nicks.add( cmd_segs[-1] )
elif cmd_segs[0].lower() == "unauthorize" and cmd_segs[-1] != "unauthorize":
if cmd_segs[-1] in self.authorized_nicks and cmd_segs[-1] not in AUTHORIZED_NICKS:
self.authorized_nicks.remove( cmd_segs[-1] )
elif cmd_segs[0].lower() == "users":
return_users = [ x for x in self.authorized_nicks if x in self.channels[SADM_CHANNEL].users() ]
c.privmsg( e.target(), "I will only listen to %s" % repr( return_users ) )
elif cmd_segs[0].lower() == "refreshusers":
self.refresh_users()
elif cmd_segs[0].lower() == "help":
c.privmsg( e.target(), "%s: list, exists, sadm, refreshusers, bzr, sb, ..." % nick )
else:
c.privmsg( e.target(), "%s: what is %s?" % (nick, cmd) )
def refresh_users(self):
if not "psbot" in self.authorized_nicks:
self.authorized_nicks.add( 'psbot' )
if not SADM_CHANNEL in self.channels:
return
for i in self.channels[SADM_CHANNEL].users():
if self.channels[SADM_CHANNEL].is_voiced(i) or self.channels[SADM_CHANNEL].is_oper(i) and i not in self.authorized_nicks:
self.authorized_nicks.add(i)
for i in self.authorized_nicks:
print( "%s (voice: %s, op: %s)" % ( i, self.channels[SADM_CHANNEL].is_voiced(i), self.channels[SADM_CHANNEL].is_oper(i) ) )
def main():
#server = '10.10.10.100'
#port = 6667
server = IRC_SERVER
port = IRC_PORT
build_info = buildinfo.BuildInfo()
print( "Build Info: %s" % repr( (build_info.os, build_info.host, build_info.version, build_info.bitness, build_info.stamp) ) )
nickname = sys.argv[-1]
if len(sys.argv) <= 1:
nickname = build_info.host
if nickname == "localhost":
nickname = DEFAULT_NICK
channels = [SADM_CHANNEL, '#%s' % nickname ]
password = IRC_PASSWORD
print( "Starting %s on %s:%s %s" % ( nickname, server, port, channels ) )
bot = SadmBot(channels, nickname, server, port, password, ssl=True)
bot.start()
if __name__ == "__main__":
main()
|
|
# Copyright 2014 eBay Inc.
#
# Author: Ron Rickard <rrickard@ebaysf.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import oslo_messaging as messaging
from oslo_config import cfg
from mock import call
from mock import patch
from designate import exceptions
from designate import objects
from designate.backend import impl_fake
from designate.central import rpcapi as central_rpcapi
from designate.mdns import rpcapi as mdns_rpcapi
from designate.tests.test_pool_manager import PoolManagerTestCase
class PoolManagerServiceNoopTest(PoolManagerTestCase):
def setUp(self):
super(PoolManagerServiceNoopTest, self).setUp()
self.config(
threshold_percentage=100,
enable_recovery_timer=False,
enable_sync_timer=False,
cache_driver='noop',
group='service:pool_manager')
# TODO(kiall): Rework all this pool config etc into a fixture..
# Configure the Pool ID
self.config(
pool_id='794ccc2c-d751-44fe-b57f-8894c9f5c842',
group='service:pool_manager')
# Configure the Pool
section_name = 'pool:794ccc2c-d751-44fe-b57f-8894c9f5c842'
section_opts = [
cfg.ListOpt('targets', default=[
'f278782a-07dc-4502-9177-b5d85c5f7c7e',
'a38703f2-b71e-4e5b-ab22-30caaed61dfd',
]),
cfg.ListOpt('nameservers', default=[
'c5d64303-4cba-425a-9f3c-5d708584dde4',
'c67cdc95-9a9e-4d2a-98ed-dc78cbd85234',
]),
cfg.ListOpt('also_notifies', default=[]),
]
cfg.CONF.register_group(cfg.OptGroup(name=section_name))
cfg.CONF.register_opts(section_opts, group=section_name)
# Configure the Pool Targets
section_name = 'pool_target:f278782a-07dc-4502-9177-b5d85c5f7c7e'
section_opts = [
cfg.StrOpt('type', default='fake'),
cfg.ListOpt('masters', default=['127.0.0.1:5354']),
cfg.DictOpt('options', default={})
]
cfg.CONF.register_group(cfg.OptGroup(name=section_name))
cfg.CONF.register_opts(section_opts, group=section_name)
section_name = 'pool_target:a38703f2-b71e-4e5b-ab22-30caaed61dfd'
section_opts = [
cfg.StrOpt('type', default='fake'),
cfg.ListOpt('masters', default=['127.0.0.1:5354']),
cfg.DictOpt('options', default={})
]
cfg.CONF.register_group(cfg.OptGroup(name=section_name))
cfg.CONF.register_opts(section_opts, group=section_name)
# Configure the Pool Nameservers
section_name = 'pool_nameserver:c5d64303-4cba-425a-9f3c-5d708584dde4'
section_opts = [
cfg.StrOpt('host', default='127.0.0.1'),
cfg.StrOpt('port', default=5355),
]
cfg.CONF.register_group(cfg.OptGroup(name=section_name))
cfg.CONF.register_opts(section_opts, group=section_name)
section_name = 'pool_nameserver:c67cdc95-9a9e-4d2a-98ed-dc78cbd85234'
section_opts = [
cfg.StrOpt('host', default='127.0.0.1'),
cfg.StrOpt('port', default=5356),
]
cfg.CONF.register_group(cfg.OptGroup(name=section_name))
cfg.CONF.register_opts(section_opts, group=section_name)
# Start the Service
self.service = self.start_service('pool_manager')
self.cache = self.service.cache
@staticmethod
def _build_domain(name, action, status):
values = {
'id': '75ea1626-eea7-46b5-acb7-41e5897c2d40',
'name': name,
'pool_id': '794ccc2c-d751-44fe-b57f-8894c9f5c842',
'action': action,
'serial': 1422062497,
'status': status
}
return objects.Domain.from_dict(values)
@patch.object(mdns_rpcapi.MdnsAPI, 'get_serial_number',
side_effect=messaging.MessagingException)
@patch.object(mdns_rpcapi.MdnsAPI, 'poll_for_serial_number')
@patch.object(mdns_rpcapi.MdnsAPI, 'notify_zone_changed')
@patch.object(central_rpcapi.CentralAPI, 'update_status')
def test_create_domain(
self, mock_update_status, mock_notify_zone_changed,
mock_poll_for_serial_number, _):
domain = self._build_domain('example.org.', 'CREATE', 'PENDING')
self.service.create_domain(self.admin_context, domain)
create_statuses = self.service._retrieve_statuses(
self.admin_context, domain, 'CREATE')
# Even though _retrieve_statuses tries to get from mdns, mdns does
# not return any status
self.assertEqual(0, len(create_statuses))
# Ensure poll_for_serial_number was called for each nameserver.
self.assertEqual(2, mock_poll_for_serial_number.call_count)
self.assertEqual(
[call(self.admin_context, domain,
self.service.pool.nameservers[0], 30, 15, 10, 5),
call(self.admin_context, domain,
self.service.pool.nameservers[1], 30, 15, 10, 5)],
mock_poll_for_serial_number.call_args_list)
# Pool manager needs to call into mdns to calculate consensus as
# there is no cache. So update_status is never called.
self.assertEqual(False, mock_update_status.called)
@patch.object(mdns_rpcapi.MdnsAPI, 'get_serial_number',
side_effect=messaging.MessagingException)
@patch.object(impl_fake.FakeBackend, 'create_domain')
@patch.object(mdns_rpcapi.MdnsAPI, 'poll_for_serial_number')
@patch.object(mdns_rpcapi.MdnsAPI, 'notify_zone_changed')
@patch.object(central_rpcapi.CentralAPI, 'update_status')
def test_create_domain_target_both_failure(
self, mock_update_status, mock_notify_zone_changed,
mock_poll_for_serial_number, mock_create_domain, _):
domain = self._build_domain('example.org.', 'CREATE', 'PENDING')
mock_create_domain.side_effect = exceptions.Backend
self.service.create_domain(self.admin_context, domain)
create_statuses = self.service._retrieve_statuses(
self.admin_context, domain, 'CREATE')
self.assertEqual(0, len(create_statuses))
# Ensure notify_zone_changed and poll_for_serial_number
# were never called.
self.assertEqual(False, mock_notify_zone_changed.called)
self.assertEqual(False, mock_poll_for_serial_number.called)
# Since consensus is not reached this early, we immediatly call
# central's update_status.
self.assertEqual(True, mock_update_status.called)
@patch.object(mdns_rpcapi.MdnsAPI, 'get_serial_number',
side_effect=messaging.MessagingException)
@patch.object(impl_fake.FakeBackend, 'create_domain')
@patch.object(mdns_rpcapi.MdnsAPI, 'poll_for_serial_number')
@patch.object(mdns_rpcapi.MdnsAPI, 'notify_zone_changed')
@patch.object(central_rpcapi.CentralAPI, 'update_status')
def test_create_domain_target_one_failure(
self, mock_update_status, mock_notify_zone_changed,
mock_poll_for_serial_number, mock_create_domain, _):
domain = self._build_domain('example.org.', 'CREATE', 'PENDING')
mock_create_domain.side_effect = [None, exceptions.Backend]
self.service.create_domain(self.admin_context, domain)
create_statuses = self.service._retrieve_statuses(
self.admin_context, domain, 'CREATE')
self.assertEqual(0, len(create_statuses))
# Since consensus is not reached this early, we immediatly call
# central's update_status.
self.assertEqual(True, mock_update_status.called)
@patch.object(mdns_rpcapi.MdnsAPI, 'get_serial_number',
side_effect=messaging.MessagingException)
@patch.object(impl_fake.FakeBackend, 'create_domain')
@patch.object(mdns_rpcapi.MdnsAPI, 'poll_for_serial_number')
@patch.object(mdns_rpcapi.MdnsAPI, 'notify_zone_changed')
@patch.object(central_rpcapi.CentralAPI, 'update_status')
def test_create_domain_target_one_failure_consensus(
self, mock_update_status, mock_notify_zone_changed,
mock_poll_for_serial_number, mock_create_domain, _):
self.service.stop()
self.config(
threshold_percentage=50,
group='service:pool_manager')
self.service = self.start_service('pool_manager')
domain = self._build_domain('example.org.', 'CREATE', 'PENDING')
mock_create_domain.side_effect = [None, exceptions.Backend]
self.service.create_domain(self.admin_context, domain)
create_statuses = self.service._retrieve_statuses(
self.admin_context, domain, 'CREATE')
self.assertEqual(0, len(create_statuses))
# Ensure poll_for_serial_number was called for each nameserver.
self.assertEqual(
[call(self.admin_context, domain,
self.service.pool.nameservers[0], 30, 15, 10, 5),
call(self.admin_context, domain,
self.service.pool.nameservers[1], 30, 15, 10, 5)],
mock_poll_for_serial_number.call_args_list)
self.assertEqual(False, mock_update_status.called)
@patch.object(impl_fake.FakeBackend, 'delete_domain',
side_effect=exceptions.Backend)
@patch.object(central_rpcapi.CentralAPI, 'update_status')
def test_delete_domain(self, mock_update_status, _):
domain = self._build_domain('example.org.', 'DELETE', 'PENDING')
self.service.delete_domain(self.admin_context, domain)
mock_update_status.assert_called_once_with(
self.admin_context, domain.id, 'ERROR', domain.serial)
@patch.object(impl_fake.FakeBackend, 'delete_domain')
@patch.object(central_rpcapi.CentralAPI, 'update_status')
def test_delete_domain_target_both_failure(
self, mock_update_status, mock_delete_domain):
domain = self._build_domain('example.org.', 'DELETE', 'PENDING')
mock_delete_domain.side_effect = exceptions.Backend
self.service.delete_domain(self.admin_context, domain)
mock_update_status.assert_called_once_with(
self.admin_context, domain.id, 'ERROR', domain.serial)
@patch.object(impl_fake.FakeBackend, 'delete_domain')
@patch.object(central_rpcapi.CentralAPI, 'update_status')
def test_delete_domain_target_one_failure(
self, mock_update_status, mock_delete_domain):
domain = self._build_domain('example.org.', 'DELETE', 'PENDING')
mock_delete_domain.side_effect = [None, exceptions.Backend]
self.service.delete_domain(self.admin_context, domain)
mock_update_status.assert_called_once_with(
self.admin_context, domain.id, 'ERROR', domain.serial)
@patch.object(impl_fake.FakeBackend, 'delete_domain')
@patch.object(central_rpcapi.CentralAPI, 'update_status')
def test_delete_domain_target_one_failure_consensus(
self, mock_update_status, mock_delete_domain):
self.service.stop()
self.config(
threshold_percentage=50,
group='service:pool_manager')
self.service = self.start_service('pool_manager')
domain = self._build_domain('example.org.', 'DELETE', 'PENDING')
mock_delete_domain.side_effect = [None, exceptions.Backend]
self.service.delete_domain(self.admin_context, domain)
mock_update_status.assert_called_once_with(
self.admin_context, domain.id, 'ERROR', domain.serial)
@patch.object(mdns_rpcapi.MdnsAPI, 'get_serial_number',
side_effect=messaging.MessagingException)
@patch.object(central_rpcapi.CentralAPI, 'update_status')
def test_update_status(self, mock_update_status, _):
domain = self._build_domain('example.org.', 'UPDATE', 'PENDING')
self.service.update_status(self.admin_context, domain,
self.service.pool.nameservers[0],
'SUCCESS', domain.serial)
update_statuses = self.service._retrieve_statuses(
self.admin_context, domain, 'UPDATE')
self.assertEqual(0, len(update_statuses))
# Ensure update_status was not called.
self.assertEqual(False, mock_update_status.called)
self.service.update_status(self.admin_context, domain,
self.service.pool.nameservers[1],
'SUCCESS', domain.serial)
update_statuses = self.service._retrieve_statuses(
self.admin_context, domain, 'UPDATE')
self.assertEqual(0, len(update_statuses))
# Ensure update_status was not called.
self.assertEqual(False, mock_update_status.called)
@patch.object(mdns_rpcapi.MdnsAPI, 'get_serial_number',
side_effect=messaging.MessagingException)
@patch.object(central_rpcapi.CentralAPI, 'update_status')
def test_update_status_both_failure(self, mock_update_status, _):
domain = self._build_domain('example.org.', 'UPDATE', 'PENDING')
self.service.update_status(self.admin_context, domain,
self.service.pool.nameservers[0],
'ERROR', domain.serial)
update_statuses = self.service._retrieve_statuses(
self.admin_context, domain, 'UPDATE')
self.assertEqual(0, len(update_statuses))
mock_update_status.assert_called_once_with(
self.admin_context, domain.id, 'ERROR', 0)
# Reset the mock call attributes.
mock_update_status.reset_mock()
self.service.update_status(self.admin_context, domain,
self.service.pool.nameservers[1],
'ERROR', domain.serial)
update_statuses = self.service._retrieve_statuses(
self.admin_context, domain, 'UPDATE')
self.assertEqual(0, len(update_statuses))
mock_update_status.assert_called_once_with(
self.admin_context, domain.id, 'ERROR', 0)
@patch.object(mdns_rpcapi.MdnsAPI, 'get_serial_number',
side_effect=messaging.MessagingException)
@patch.object(central_rpcapi.CentralAPI, 'update_status')
def test_update_status_one_failure(self, mock_update_status, _):
domain = self._build_domain('example.org.', 'UPDATE', 'PENDING')
self.service.update_status(self.admin_context, domain,
self.service.pool.nameservers[0],
'SUCCESS', domain.serial)
update_statuses = self.service._retrieve_statuses(
self.admin_context, domain, 'UPDATE')
self.assertEqual(0, len(update_statuses))
# Ensure update_status was not called.
self.assertEqual(False, mock_update_status.called)
self.service.update_status(self.admin_context, domain,
self.service.pool.nameservers[1],
'ERROR', domain.serial)
update_statuses = self.service._retrieve_statuses(
self.admin_context, domain, 'UPDATE')
self.assertEqual(0, len(update_statuses))
mock_update_status.assert_called_once_with(
self.admin_context, domain.id, 'ERROR', 0)
@patch.object(mdns_rpcapi.MdnsAPI, 'get_serial_number',
side_effect=messaging.MessagingException)
@patch.object(central_rpcapi.CentralAPI, 'update_status')
def test_update_status_one_failure_consensus(self, mock_update_status, _):
self.service.stop()
self.config(
threshold_percentage=50,
group='service:pool_manager')
self.service = self.start_service('pool_manager')
domain = self._build_domain('example.org.', 'UPDATE', 'PENDING')
self.service.update_status(self.admin_context, domain,
self.service.pool.nameservers[0],
'SUCCESS', domain.serial)
update_statuses = self.service._retrieve_statuses(
self.admin_context, domain, 'UPDATE')
self.assertEqual(0, len(update_statuses))
# Ensure update_status was not called.
self.assertEqual(False, mock_update_status.called)
# Reset the mock call attributes.
mock_update_status.reset_mock()
self.service.update_status(self.admin_context, domain,
self.service.pool.nameservers[1],
'ERROR', domain.serial)
update_statuses = self.service._retrieve_statuses(
self.admin_context, domain, 'UPDATE')
self.assertEqual(0, len(update_statuses))
mock_update_status.assert_called_once_with(
self.admin_context, domain.id, 'ERROR', 0)
|
|
"""HTML5 Push Messaging notification service."""
from datetime import datetime, timedelta
from functools import partial
import json
import logging
import time
from urllib.parse import urlparse
import uuid
from aiohttp.hdrs import AUTHORIZATION
import jwt
from py_vapid import Vapid
from pywebpush import WebPusher
import voluptuous as vol
from voluptuous.humanize import humanize_error
from homeassistant.components import websocket_api
from homeassistant.components.frontend import add_manifest_json_key
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_TARGET,
ATTR_TITLE,
ATTR_TITLE_DEFAULT,
PLATFORM_SCHEMA,
BaseNotificationService,
)
from homeassistant.const import (
HTTP_BAD_REQUEST,
HTTP_INTERNAL_SERVER_ERROR,
HTTP_UNAUTHORIZED,
URL_ROOT,
)
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv
from homeassistant.util import ensure_unique_string
from homeassistant.util.json import load_json, save_json
from .const import DOMAIN, SERVICE_DISMISS
_LOGGER = logging.getLogger(__name__)
REGISTRATIONS_FILE = "html5_push_registrations.conf"
ATTR_GCM_SENDER_ID = "gcm_sender_id"
ATTR_GCM_API_KEY = "gcm_api_key"
ATTR_VAPID_PUB_KEY = "vapid_pub_key"
ATTR_VAPID_PRV_KEY = "vapid_prv_key"
ATTR_VAPID_EMAIL = "vapid_email"
def gcm_api_deprecated(value):
"""Warn user that GCM API config is deprecated."""
if value:
_LOGGER.warning(
"Configuring html5_push_notifications via the GCM api"
" has been deprecated and will stop working after April 11,"
" 2019. Use the VAPID configuration instead. For instructions,"
" see https://www.home-assistant.io/integrations/html5/"
)
return value
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(ATTR_GCM_SENDER_ID): vol.All(cv.string, gcm_api_deprecated),
vol.Optional(ATTR_GCM_API_KEY): cv.string,
vol.Optional(ATTR_VAPID_PUB_KEY): cv.string,
vol.Optional(ATTR_VAPID_PRV_KEY): cv.string,
vol.Optional(ATTR_VAPID_EMAIL): cv.string,
}
)
ATTR_SUBSCRIPTION = "subscription"
ATTR_BROWSER = "browser"
ATTR_NAME = "name"
ATTR_ENDPOINT = "endpoint"
ATTR_KEYS = "keys"
ATTR_AUTH = "auth"
ATTR_P256DH = "p256dh"
ATTR_EXPIRATIONTIME = "expirationTime"
ATTR_TAG = "tag"
ATTR_ACTION = "action"
ATTR_ACTIONS = "actions"
ATTR_TYPE = "type"
ATTR_URL = "url"
ATTR_DISMISS = "dismiss"
ATTR_PRIORITY = "priority"
DEFAULT_PRIORITY = "normal"
ATTR_TTL = "ttl"
DEFAULT_TTL = 86400
ATTR_JWT = "jwt"
WS_TYPE_APPKEY = "notify/html5/appkey"
SCHEMA_WS_APPKEY = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{vol.Required("type"): WS_TYPE_APPKEY}
)
# The number of days after the moment a notification is sent that a JWT
# is valid.
JWT_VALID_DAYS = 7
KEYS_SCHEMA = vol.All(
dict,
vol.Schema(
{vol.Required(ATTR_AUTH): cv.string, vol.Required(ATTR_P256DH): cv.string}
),
)
SUBSCRIPTION_SCHEMA = vol.All(
dict,
vol.Schema(
{
# pylint: disable=no-value-for-parameter
vol.Required(ATTR_ENDPOINT): vol.Url(),
vol.Required(ATTR_KEYS): KEYS_SCHEMA,
vol.Optional(ATTR_EXPIRATIONTIME): vol.Any(None, cv.positive_int),
}
),
)
DISMISS_SERVICE_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_TARGET): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_DATA): dict,
}
)
REGISTER_SCHEMA = vol.Schema(
{
vol.Required(ATTR_SUBSCRIPTION): SUBSCRIPTION_SCHEMA,
vol.Required(ATTR_BROWSER): vol.In(["chrome", "firefox"]),
vol.Optional(ATTR_NAME): cv.string,
}
)
CALLBACK_EVENT_PAYLOAD_SCHEMA = vol.Schema(
{
vol.Required(ATTR_TAG): cv.string,
vol.Required(ATTR_TYPE): vol.In(["received", "clicked", "closed"]),
vol.Required(ATTR_TARGET): cv.string,
vol.Optional(ATTR_ACTION): cv.string,
vol.Optional(ATTR_DATA): dict,
}
)
NOTIFY_CALLBACK_EVENT = "html5_notification"
# Badge and timestamp are Chrome specific (not in official spec)
HTML5_SHOWNOTIFICATION_PARAMETERS = (
"actions",
"badge",
"body",
"dir",
"icon",
"image",
"lang",
"renotify",
"requireInteraction",
"tag",
"timestamp",
"vibrate",
)
def get_service(hass, config, discovery_info=None):
"""Get the HTML5 push notification service."""
json_path = hass.config.path(REGISTRATIONS_FILE)
registrations = _load_config(json_path)
if registrations is None:
return None
vapid_pub_key = config.get(ATTR_VAPID_PUB_KEY)
vapid_prv_key = config.get(ATTR_VAPID_PRV_KEY)
vapid_email = config.get(ATTR_VAPID_EMAIL)
def websocket_appkey(hass, connection, msg):
connection.send_message(websocket_api.result_message(msg["id"], vapid_pub_key))
hass.components.websocket_api.async_register_command(
WS_TYPE_APPKEY, websocket_appkey, SCHEMA_WS_APPKEY
)
hass.http.register_view(HTML5PushRegistrationView(registrations, json_path))
hass.http.register_view(HTML5PushCallbackView(registrations))
gcm_api_key = config.get(ATTR_GCM_API_KEY)
gcm_sender_id = config.get(ATTR_GCM_SENDER_ID)
if gcm_sender_id is not None:
add_manifest_json_key(ATTR_GCM_SENDER_ID, config.get(ATTR_GCM_SENDER_ID))
return HTML5NotificationService(
hass, gcm_api_key, vapid_prv_key, vapid_email, registrations, json_path
)
def _load_config(filename):
"""Load configuration."""
try:
return load_json(filename)
except HomeAssistantError:
pass
return {}
class HTML5PushRegistrationView(HomeAssistantView):
"""Accepts push registrations from a browser."""
url = "/api/notify.html5"
name = "api:notify.html5"
def __init__(self, registrations, json_path):
"""Init HTML5PushRegistrationView."""
self.registrations = registrations
self.json_path = json_path
async def post(self, request):
"""Accept the POST request for push registrations from a browser."""
try:
data = await request.json()
except ValueError:
return self.json_message("Invalid JSON", HTTP_BAD_REQUEST)
try:
data = REGISTER_SCHEMA(data)
except vol.Invalid as ex:
return self.json_message(humanize_error(data, ex), HTTP_BAD_REQUEST)
devname = data.get(ATTR_NAME)
data.pop(ATTR_NAME, None)
name = self.find_registration_name(data, devname)
previous_registration = self.registrations.get(name)
self.registrations[name] = data
try:
hass = request.app["hass"]
await hass.async_add_job(save_json, self.json_path, self.registrations)
return self.json_message("Push notification subscriber registered.")
except HomeAssistantError:
if previous_registration is not None:
self.registrations[name] = previous_registration
else:
self.registrations.pop(name)
return self.json_message(
"Error saving registration.", HTTP_INTERNAL_SERVER_ERROR
)
def find_registration_name(self, data, suggested=None):
"""Find a registration name matching data or generate a unique one."""
endpoint = data.get(ATTR_SUBSCRIPTION).get(ATTR_ENDPOINT)
for key, registration in self.registrations.items():
subscription = registration.get(ATTR_SUBSCRIPTION)
if subscription.get(ATTR_ENDPOINT) == endpoint:
return key
return ensure_unique_string(suggested or "unnamed device", self.registrations)
async def delete(self, request):
"""Delete a registration."""
try:
data = await request.json()
except ValueError:
return self.json_message("Invalid JSON", HTTP_BAD_REQUEST)
subscription = data.get(ATTR_SUBSCRIPTION)
found = None
for key, registration in self.registrations.items():
if registration.get(ATTR_SUBSCRIPTION) == subscription:
found = key
break
if not found:
# If not found, unregistering was already done. Return 200
return self.json_message("Registration not found.")
reg = self.registrations.pop(found)
try:
hass = request.app["hass"]
await hass.async_add_job(save_json, self.json_path, self.registrations)
except HomeAssistantError:
self.registrations[found] = reg
return self.json_message(
"Error saving registration.", HTTP_INTERNAL_SERVER_ERROR
)
return self.json_message("Push notification subscriber unregistered.")
class HTML5PushCallbackView(HomeAssistantView):
"""Accepts push registrations from a browser."""
requires_auth = False
url = "/api/notify.html5/callback"
name = "api:notify.html5/callback"
def __init__(self, registrations):
"""Init HTML5PushCallbackView."""
self.registrations = registrations
def decode_jwt(self, token):
"""Find the registration that signed this JWT and return it."""
# 1. Check claims w/o verifying to see if a target is in there.
# 2. If target in claims, attempt to verify against the given name.
# 2a. If decode is successful, return the payload.
# 2b. If decode is unsuccessful, return a 401.
target_check = jwt.decode(token, verify=False)
if target_check.get(ATTR_TARGET) in self.registrations:
possible_target = self.registrations[target_check[ATTR_TARGET]]
key = possible_target[ATTR_SUBSCRIPTION][ATTR_KEYS][ATTR_AUTH]
try:
return jwt.decode(token, key, algorithms=["ES256", "HS256"])
except jwt.exceptions.DecodeError:
pass
return self.json_message(
"No target found in JWT", status_code=HTTP_UNAUTHORIZED
)
# The following is based on code from Auth0
# https://auth0.com/docs/quickstart/backend/python
def check_authorization_header(self, request):
"""Check the authorization header."""
auth = request.headers.get(AUTHORIZATION, None)
if not auth:
return self.json_message(
"Authorization header is expected", status_code=HTTP_UNAUTHORIZED
)
parts = auth.split()
if parts[0].lower() != "bearer":
return self.json_message(
"Authorization header must " "start with Bearer",
status_code=HTTP_UNAUTHORIZED,
)
if len(parts) != 2:
return self.json_message(
"Authorization header must " "be Bearer token",
status_code=HTTP_UNAUTHORIZED,
)
token = parts[1]
try:
payload = self.decode_jwt(token)
except jwt.exceptions.InvalidTokenError:
return self.json_message("token is invalid", status_code=HTTP_UNAUTHORIZED)
return payload
async def post(self, request):
"""Accept the POST request for push registrations event callback."""
auth_check = self.check_authorization_header(request)
if not isinstance(auth_check, dict):
return auth_check
try:
data = await request.json()
except ValueError:
return self.json_message("Invalid JSON", HTTP_BAD_REQUEST)
event_payload = {
ATTR_TAG: data.get(ATTR_TAG),
ATTR_TYPE: data[ATTR_TYPE],
ATTR_TARGET: auth_check[ATTR_TARGET],
}
if data.get(ATTR_ACTION) is not None:
event_payload[ATTR_ACTION] = data.get(ATTR_ACTION)
if data.get(ATTR_DATA) is not None:
event_payload[ATTR_DATA] = data.get(ATTR_DATA)
try:
event_payload = CALLBACK_EVENT_PAYLOAD_SCHEMA(event_payload)
except vol.Invalid as ex:
_LOGGER.warning(
"Callback event payload is not valid: %s",
humanize_error(event_payload, ex),
)
event_name = "{}.{}".format(NOTIFY_CALLBACK_EVENT, event_payload[ATTR_TYPE])
request.app["hass"].bus.fire(event_name, event_payload)
return self.json({"status": "ok", "event": event_payload[ATTR_TYPE]})
class HTML5NotificationService(BaseNotificationService):
"""Implement the notification service for HTML5."""
def __init__(self, hass, gcm_key, vapid_prv, vapid_email, registrations, json_path):
"""Initialize the service."""
self._gcm_key = gcm_key
self._vapid_prv = vapid_prv
self._vapid_email = vapid_email
self.registrations = registrations
self.registrations_json_path = json_path
async def async_dismiss_message(service):
"""Handle dismissing notification message service calls."""
kwargs = {}
if self.targets is not None:
kwargs[ATTR_TARGET] = self.targets
elif service.data.get(ATTR_TARGET) is not None:
kwargs[ATTR_TARGET] = service.data.get(ATTR_TARGET)
kwargs[ATTR_DATA] = service.data.get(ATTR_DATA)
await self.async_dismiss(**kwargs)
hass.services.async_register(
DOMAIN,
SERVICE_DISMISS,
async_dismiss_message,
schema=DISMISS_SERVICE_SCHEMA,
)
@property
def targets(self):
"""Return a dictionary of registered targets."""
targets = {}
for registration in self.registrations:
targets[registration] = registration
return targets
def dismiss(self, **kwargs):
"""Dismisses a notification."""
data = kwargs.get(ATTR_DATA)
tag = data.get(ATTR_TAG) if data else ""
payload = {ATTR_TAG: tag, ATTR_DISMISS: True, ATTR_DATA: {}}
self._push_message(payload, **kwargs)
async def async_dismiss(self, **kwargs):
"""Dismisses a notification.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(partial(self.dismiss, **kwargs))
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
tag = str(uuid.uuid4())
payload = {
"badge": "/static/images/notification-badge.png",
"body": message,
ATTR_DATA: {},
"icon": "/static/icons/favicon-192x192.png",
ATTR_TAG: tag,
ATTR_TITLE: kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT),
}
data = kwargs.get(ATTR_DATA)
if data:
# Pick out fields that should go into the notification directly vs
# into the notification data dictionary.
data_tmp = {}
for key, val in data.items():
if key in HTML5_SHOWNOTIFICATION_PARAMETERS:
payload[key] = val
else:
data_tmp[key] = val
payload[ATTR_DATA] = data_tmp
if (
payload[ATTR_DATA].get(ATTR_URL) is None
and payload.get(ATTR_ACTIONS) is None
):
payload[ATTR_DATA][ATTR_URL] = URL_ROOT
self._push_message(payload, **kwargs)
def _push_message(self, payload, **kwargs):
"""Send the message."""
timestamp = int(time.time())
ttl = int(kwargs.get(ATTR_TTL, DEFAULT_TTL))
priority = kwargs.get(ATTR_PRIORITY, DEFAULT_PRIORITY)
if priority not in ["normal", "high"]:
priority = DEFAULT_PRIORITY
payload["timestamp"] = timestamp * 1000 # Javascript ms since epoch
targets = kwargs.get(ATTR_TARGET)
if not targets:
targets = self.registrations.keys()
for target in list(targets):
info = self.registrations.get(target)
try:
info = REGISTER_SCHEMA(info)
except vol.Invalid:
_LOGGER.error(
"%s is not a valid HTML5 push notification" " target", target
)
continue
payload[ATTR_DATA][ATTR_JWT] = add_jwt(
timestamp,
target,
payload[ATTR_TAG],
info[ATTR_SUBSCRIPTION][ATTR_KEYS][ATTR_AUTH],
)
webpusher = WebPusher(info[ATTR_SUBSCRIPTION])
if self._vapid_prv and self._vapid_email:
vapid_headers = create_vapid_headers(
self._vapid_email, info[ATTR_SUBSCRIPTION], self._vapid_prv
)
vapid_headers.update({"urgency": priority, "priority": priority})
response = webpusher.send(
data=json.dumps(payload), headers=vapid_headers, ttl=ttl
)
else:
# Only pass the gcm key if we're actually using GCM
# If we don't, notifications break on FireFox
gcm_key = (
self._gcm_key
if "googleapis.com" in info[ATTR_SUBSCRIPTION][ATTR_ENDPOINT]
else None
)
response = webpusher.send(json.dumps(payload), gcm_key=gcm_key, ttl=ttl)
if response.status_code == 410:
_LOGGER.info("Notification channel has expired")
reg = self.registrations.pop(target)
if not save_json(self.registrations_json_path, self.registrations):
self.registrations[target] = reg
_LOGGER.error("Error saving registration")
else:
_LOGGER.info("Configuration saved")
def add_jwt(timestamp, target, tag, jwt_secret):
"""Create JWT json to put into payload."""
jwt_exp = datetime.fromtimestamp(timestamp) + timedelta(days=JWT_VALID_DAYS)
jwt_claims = {
"exp": jwt_exp,
"nbf": timestamp,
"iat": timestamp,
ATTR_TARGET: target,
ATTR_TAG: tag,
}
return jwt.encode(jwt_claims, jwt_secret).decode("utf-8")
def create_vapid_headers(vapid_email, subscription_info, vapid_private_key):
"""Create encrypted headers to send to WebPusher."""
if vapid_email and vapid_private_key and ATTR_ENDPOINT in subscription_info:
url = urlparse(subscription_info.get(ATTR_ENDPOINT))
vapid_claims = {
"sub": f"mailto:{vapid_email}",
"aud": f"{url.scheme}://{url.netloc}",
}
vapid = Vapid.from_string(private_key=vapid_private_key)
return vapid.sign(vapid_claims)
return None
|
|
import logging
import numpy as np # type: ignore
from numpy import insert, dot # type: ignore
from numpy.linalg import inv # type: ignore
from typing import List, Tuple, Union
from opentrons.calibration_storage.types import AttitudeMatrix
from opentrons.config import feature_flags as ff
mod_log = logging.getLogger(__name__)
# (TODO(lc, 8/11/2020): temporary type until
# old calibration data is removed.
AxisPosition = Union[
Tuple[float, float, float], Tuple[float, float]]
SolvePoints = Tuple[
Tuple[float, float, float],
Tuple[float, float, float],
Tuple[float, float, float]]
def identity_deck_transform() -> np.ndarray:
""" The default deck transform """
if ff.enable_calibration_overhaul():
return np.identity(3)
else:
return np.identity(4)
def solve_attitude(
expected: SolvePoints,
actual: SolvePoints
) -> AttitudeMatrix:
ex = np.array([
list(point)
for point in expected
]).transpose()
ac = np.array([
list(point)
for point in actual
]).transpose()
t = np.dot(ac, inv(ex))
mask_transform = np.array([
[True, True, False],
[True, True, False],
[False, False, False]])
masked_array = np.ma.masked_array(t, ~mask_transform)
no_z_component = np.zeros((3, 3))
np.put(no_z_component, [8, 8], 1)
transform = masked_array.filled(0) + no_z_component
return transform.round(4).tolist()
def solve(expected: List[Tuple[float, float]],
actual: List[Tuple[float, float]]) -> np.ndarray:
"""
Takes two lists of 3 x-y points each, and calculates the matrix
representing the transformation from one space to the other.
The 3x3 matrix returned by this method represents the 2-D transformation
matrix from the actual point to the expected point.
Example:
If the expected points are:
[ (1, 1),
(2, 2),
(1, 2) ]
And the actual measured points are:
[ (1.1, 1.1),
(2.1, 2.1),
(1.1, 2.1) ]
(in other words, a shift of exaxtly +0.1 in both x and y)
Then the resulting transformation matrix T should be:
[ 1 0 -0.1 ]
[ 0 1 -0.1 ]
[ 0 0 1 ]
Then, if we take a 3x3 matrix B representing one of the measured points
on the deck:
[ 1 0 1.1 ]
[ 0 1 2.1 ]
[ 0 0 1 ]
The B*T will yeild the "actual" point:
[ 1 0 1 ]
[ 0 1 2 ]
[ 0 0 1 ]
The return value of this function is the transformation matrix T
"""
# Note: input list shape validation is handled by the type checker
# Turn expected and actual matricies into numpy ndarrays with the last row
# of [1 1 1] appended, and then take the dot product of the resulting
# actual matrix with the inverse of the resulting expected matrix.
# Shape of `expected` and `actual`:
# [ (x1, y1),
# (x2, y2),
# (x3, y3) ]
ex = np.array([
list(point) + [1]
for point in expected
]).transpose()
ac = np.array([
list(point) + [1]
for point in actual
]).transpose()
# Shape of `ex` and `ac`:
# [ x1 x2 x3 ]
# [ y1 y2 y3 ]
# [ 1 1 1 ]
transform = np.dot(ac, inv(ex))
# `dot` in numpy is a misnomer. When both arguments are square, N-
# dimensional arrays, the return type is the result of performing matrix
# multiplication, rather than the dot-product (so the return here will be
# a 4x4 matrix)
return transform
def add_z(xy: np.ndarray, z: float) -> np.ndarray:
"""
Turn a 2-D transform matrix into a 3-D transform matrix (scale/shift only,
no rotation).
:param xy: A two-dimensional transform matrix (a 3x3 numpy ndarray) in the
following form:
[ 1 0 x ]
[ 0 1 y ]
[ 0 0 1 ]
:param z: a float for the z component
:return: a three-dimensional transformation matrix (a 4x4 numpy ndarray)
with x, y, and z from the function parameters, in the following form:
[ 1 0 0 x ]
[ 0 1 0 y ]
[ 0 0 1 z ]
[ 0 0 0 1 ]
"""
# First, insert a column of zeros as into the input matrix
interm = insert(xy, 2, [0, 0, 0], axis=1)
# Result:
# [ 1 0 0 x ]
# [ 0 1 0 y ]
# [ 0 0 0 1 ]
# Then, insert the z row to create a properly formed 3-D transform matrix:
xyz = insert(
interm,
2,
[0, 0, 1, z],
axis=0)
# Result:
# [ 1 0 0 x ]
# [ 0 1 0 y ]
# [ 0 0 1 z ]
# [ 0 0 0 1 ]
return xyz.round(11)
def add_matrices(
t1: Tuple[float, float, float],
t2: Tuple[float, float, float]) -> Tuple[float, float, float]:
"""
Simple method to convert tuples to numpy arrays and add them.
"""
return tuple(np.asarray(t1) + np.asarray(t2)) # type: ignore
def apply_transform(
t: Union[List[List[float]], np.ndarray],
pos: AxisPosition,
with_offsets=True) -> Tuple[float, float, float]:
"""
Change of base using a transform matrix. Primarily used to render a point
in space in a way that is more readable for the user.
:param t: A transformation matrix from one 3D space [A] to another [B]
:param pos: XYZ point in space A
:param with_offsets: Whether to apply the transform as an affine transform
or as a standard transform. You might use
with_offsets=False
:return: corresponding XYZ point in space B
"""
if with_offsets:
return tuple(dot(t, list(pos) + [1])[:3]) # type: ignore
else:
return tuple(dot(t, list(pos))[:3]) # type: ignore
def apply_reverse(
t: Union[List[List[float]], np.ndarray],
pos: AxisPosition,
with_offsets=True) -> Tuple[float, float, float]:
""" Like apply_transform but inverts the transform first
"""
return apply_transform(inv(t), pos, with_offsets)
|
|
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
from oslo_config import cfg
from oslo_log import log
from six.moves.urllib import parse
import webob
from manila.api.openstack import api_version_request as api_version
from manila.api.openstack import versioned_method
from manila.i18n import _
api_common_opts = [
cfg.IntOpt(
'osapi_max_limit',
default=1000,
help='The maximum number of items returned in a single response from '
'a collection resource.'),
cfg.StrOpt(
'osapi_share_base_URL',
help='Base URL to be presented to users in links to the Share API'),
]
CONF = cfg.CONF
CONF.register_opts(api_common_opts)
LOG = log.getLogger(__name__)
# Regex that matches alphanumeric characters, periods, hypens,
# colons and underscores:
# ^ assert position at start of the string
# [\w\.\-\:\_] match expression
# $ assert position at end of the string
VALID_KEY_NAME_REGEX = re.compile(r"^[\w\.\-\:\_]+$", re.UNICODE)
def validate_key_names(key_names_list):
"""Validate each item of the list to match key name regex."""
for key_name in key_names_list:
if not VALID_KEY_NAME_REGEX.match(key_name):
return False
return True
def get_pagination_params(request):
"""Return marker, limit tuple from request.
:param request: `wsgi.Request` possibly containing 'marker' and 'limit'
GET variables. 'marker' is the id of the last element
the client has seen, and 'limit' is the maximum number
of items to return. If 'limit' is not specified, 0, or
> max_limit, we default to max_limit. Negative values
for either marker or limit will cause
exc.HTTPBadRequest() exceptions to be raised.
"""
params = {}
if 'limit' in request.GET:
params['limit'] = _get_limit_param(request)
if 'marker' in request.GET:
params['marker'] = _get_marker_param(request)
return params
def _get_limit_param(request):
"""Extract integer limit from request or fail."""
try:
limit = int(request.GET['limit'])
except ValueError:
msg = _('limit param must be an integer')
raise webob.exc.HTTPBadRequest(explanation=msg)
if limit < 0:
msg = _('limit param must be positive')
raise webob.exc.HTTPBadRequest(explanation=msg)
return limit
def _get_marker_param(request):
"""Extract marker ID from request or fail."""
return request.GET['marker']
def limited(items, request, max_limit=CONF.osapi_max_limit):
"""Return a slice of items according to requested offset and limit.
:param items: A sliceable entity
:param request: ``wsgi.Request`` possibly containing 'offset' and 'limit'
GET variables. 'offset' is where to start in the list,
and 'limit' is the maximum number of items to return. If
'limit' is not specified, 0, or > max_limit, we default
to max_limit. Negative values for either offset or limit
will cause exc.HTTPBadRequest() exceptions to be raised.
:kwarg max_limit: The maximum number of items to return from 'items'
"""
try:
offset = int(request.GET.get('offset', 0))
except ValueError:
msg = _('offset param must be an integer')
raise webob.exc.HTTPBadRequest(explanation=msg)
try:
limit = int(request.GET.get('limit', max_limit))
except ValueError:
msg = _('limit param must be an integer')
raise webob.exc.HTTPBadRequest(explanation=msg)
if limit < 0:
msg = _('limit param must be positive')
raise webob.exc.HTTPBadRequest(explanation=msg)
if offset < 0:
msg = _('offset param must be positive')
raise webob.exc.HTTPBadRequest(explanation=msg)
limit = min(max_limit, limit or max_limit)
range_end = offset + limit
return items[offset:range_end]
def limited_by_marker(items, request, max_limit=CONF.osapi_max_limit):
"""Return a slice of items according to the requested marker and limit."""
params = get_pagination_params(request)
limit = params.get('limit', max_limit)
marker = params.get('marker')
limit = min(max_limit, limit)
start_index = 0
if marker:
start_index = -1
for i, item in enumerate(items):
if 'flavorid' in item:
if item['flavorid'] == marker:
start_index = i + 1
break
elif item['id'] == marker or item.get('uuid') == marker:
start_index = i + 1
break
if start_index < 0:
msg = _('marker [%s] not found') % marker
raise webob.exc.HTTPBadRequest(explanation=msg)
range_end = start_index + limit
return items[start_index:range_end]
def remove_version_from_href(href):
"""Removes the first api version from the href.
Given: 'http://www.manila.com/v1.1/123'
Returns: 'http://www.manila.com/123'
Given: 'http://www.manila.com/v1.1'
Returns: 'http://www.manila.com'
"""
parsed_url = parse.urlsplit(href)
url_parts = parsed_url.path.split('/', 2)
# NOTE: this should match vX.X or vX
expression = re.compile(r'^v([0-9]+|[0-9]+\.[0-9]+)(/.*|$)')
if expression.match(url_parts[1]):
del url_parts[1]
new_path = '/'.join(url_parts)
if new_path == parsed_url.path:
msg = 'href %s does not contain version' % href
LOG.debug(msg)
raise ValueError(msg)
parsed_url = list(parsed_url)
parsed_url[2] = new_path
return parse.urlunsplit(parsed_url)
def dict_to_query_str(params):
# TODO(throughnothing): we should just use urllib.urlencode instead of this
# But currently we don't work with urlencoded url's
param_str = ""
for key, val in params.items():
param_str = param_str + '='.join([str(key), str(val)]) + '&'
return param_str.rstrip('&')
class ViewBuilder(object):
"""Model API responses as dictionaries."""
_collection_name = None
_detail_version_modifiers = []
def _get_links(self, request, identifier):
return [{"rel": "self",
"href": self._get_href_link(request, identifier), },
{"rel": "bookmark",
"href": self._get_bookmark_link(request, identifier), }]
def _get_next_link(self, request, identifier):
"""Return href string with proper limit and marker params."""
params = request.params.copy()
params["marker"] = identifier
prefix = self._update_link_prefix(request.application_url,
CONF.osapi_share_base_URL)
url = os.path.join(prefix,
request.environ["manila.context"].project_id,
self._collection_name)
return "%s?%s" % (url, dict_to_query_str(params))
def _get_href_link(self, request, identifier):
"""Return an href string pointing to this object."""
prefix = self._update_link_prefix(request.application_url,
CONF.osapi_share_base_URL)
return os.path.join(prefix,
request.environ["manila.context"].project_id,
self._collection_name,
str(identifier))
def _get_bookmark_link(self, request, identifier):
"""Create a URL that refers to a specific resource."""
base_url = remove_version_from_href(request.application_url)
base_url = self._update_link_prefix(base_url,
CONF.osapi_share_base_URL)
return os.path.join(base_url,
request.environ["manila.context"].project_id,
self._collection_name,
str(identifier))
def _get_collection_links(self, request, items, id_key="uuid"):
"""Retrieve 'next' link, if applicable."""
links = []
limit = int(request.params.get("limit", 0))
if limit and limit == len(items):
last_item = items[-1]
if id_key in last_item:
last_item_id = last_item[id_key]
else:
last_item_id = last_item["id"]
links.append({
"rel": "next",
"href": self._get_next_link(request, last_item_id),
})
return links
def _update_link_prefix(self, orig_url, prefix):
if not prefix:
return orig_url
url_parts = list(parse.urlsplit(orig_url))
prefix_parts = list(parse.urlsplit(prefix))
url_parts[0:2] = prefix_parts[0:2]
return parse.urlunsplit(url_parts)
def update_versioned_resource_dict(self, request, resource_dict, resource):
"""Updates the given resource dict for the given request version.
This method calls every method, that is applicable to the request
version, in _detail_version_modifiers.
"""
for method_name in self._detail_version_modifiers:
method = getattr(self, method_name)
if request.api_version_request.matches_versioned_method(method):
request_context = request.environ['manila.context']
method.func(self, request_context, resource_dict, resource)
@classmethod
def versioned_method(cls, min_ver, max_ver=None, experimental=False):
"""Decorator for versioning API methods.
:param min_ver: string representing minimum version
:param max_ver: optional string representing maximum version
:param experimental: flag indicating an API is experimental and is
subject to change or removal at any time
"""
def decorator(f):
obj_min_ver = api_version.APIVersionRequest(min_ver)
if max_ver:
obj_max_ver = api_version.APIVersionRequest(max_ver)
else:
obj_max_ver = api_version.APIVersionRequest()
# Add to list of versioned methods registered
func_name = f.__name__
new_func = versioned_method.VersionedMethod(
func_name, obj_min_ver, obj_max_ver, experimental, f)
return new_func
return decorator
def remove_invalid_options(context, search_options, allowed_search_options):
"""Remove search options that are not valid for non-admin API/context."""
if context.is_admin:
# Allow all options
return
# Otherwise, strip out all unknown options
unknown_options = [opt for opt in search_options
if opt not in allowed_search_options]
bad_options = ", ".join(unknown_options)
LOG.debug("Removing options '%(bad_options)s' from query",
{"bad_options": bad_options})
for opt in unknown_options:
del search_options[opt]
|
|
# Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006-2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
# Simple test script
#
# "m5 test.py"
import optparse
import sys
import m5
from m5.defines import buildEnv
from m5.objects import *
from m5.util import addToPath, fatal
addToPath('../common')
addToPath('../ruby')
addToPath('../topologies')
import Options
import Ruby
import Simulation
import CacheConfig
from Caches import *
from cpu2000 import *
def get_processes(options):
"""Interprets provided options and returns a list of processes"""
multiprocesses = []
inputs = []
outputs = []
errouts = []
pargs = []
workloads = options.cmd.split(';')
if options.input != "":
inputs = options.input.split(';')
if options.output != "":
outputs = options.output.split(';')
if options.errout != "":
errouts = options.errout.split(';')
if options.options != "":
pargs = options.options.split(';')
idx = 0
for wrkld in workloads:
process = LiveProcess()
process.executable = wrkld
if len(pargs) > idx:
process.cmd = [wrkld] + pargs[idx].split()
else:
process.cmd = [wrkld]
if len(inputs) > idx:
process.input = inputs[idx]
if len(outputs) > idx:
process.output = outputs[idx]
if len(errouts) > idx:
process.errout = errouts[idx]
multiprocesses.append(process)
idx += 1
if options.smt:
assert(options.cpu_type == "detailed" or options.cpu_type == "inorder")
return multiprocesses, idx
else:
return multiprocesses, 1
parser = optparse.OptionParser()
Options.addCommonOptions(parser)
Options.addSEOptions(parser)
if '--ruby' in sys.argv:
Ruby.define_options(parser)
(options, args) = parser.parse_args()
if args:
print "Error: script doesn't take any positional arguments"
sys.exit(1)
multiprocesses = []
numThreads = 1
if options.bench:
apps = options.bench.split("-")
if len(apps) != options.num_cpus:
print "number of benchmarks not equal to set num_cpus!"
sys.exit(1)
for app in apps:
try:
if buildEnv['TARGET_ISA'] == 'alpha':
exec("workload = %s('alpha', 'tru64', 'ref')" % app)
else:
exec("workload = %s(buildEnv['TARGET_ISA'], 'linux', 'ref')" % app)
multiprocesses.append(workload.makeLiveProcess())
except:
print >>sys.stderr, "Unable to find workload for %s: %s" % (buildEnv['TARGET_ISA'], app)
sys.exit(1)
elif options.cmd:
multiprocesses, numThreads = get_processes(options)
else:
print >> sys.stderr, "No workload specified. Exiting!\n"
sys.exit(1)
(CPUClass, test_mem_mode, FutureClass) = Simulation.setCPUClass(options)
CPUClass.clock = options.clock
CPUClass.numThreads = numThreads
# Check -- do not allow SMT with multiple CPUs
if options.smt and options.num_cpus > 1:
fatal("You cannot use SMT with multiple CPUs!")
np = options.num_cpus
system = System(cpu = [CPUClass(cpu_id=i) for i in xrange(np)],
physmem = SimpleMemory(range=AddrRange("512MB")),
membus = CoherentBus(), mem_mode = test_mem_mode)
# Sanity check
if options.fastmem and (options.caches or options.l2cache):
fatal("You cannot use fastmem in combination with caches!")
for i in xrange(np):
if options.smt:
system.cpu[i].workload = multiprocesses
elif len(multiprocesses) == 1:
system.cpu[i].workload = multiprocesses[0]
else:
system.cpu[i].workload = multiprocesses[i]
if options.fastmem:
system.cpu[i].fastmem = True
if options.checker:
system.cpu[i].addCheckerCpu()
if options.ruby:
if not (options.cpu_type == "detailed" or options.cpu_type == "timing"):
print >> sys.stderr, "Ruby requires TimingSimpleCPU or O3CPU!!"
#sys.exit(1)
options.use_map = True
Ruby.create_system(options, system)
assert(options.num_cpus == len(system.ruby._cpu_ruby_ports))
for i in xrange(np):
ruby_port = system.ruby._cpu_ruby_ports[i]
# Create the interrupt controller and connect its ports to Ruby
system.cpu[i].createInterruptController()
# Connect the cpu's cache ports to Ruby
system.cpu[i].icache_port = ruby_port.slave
system.cpu[i].dcache_port = ruby_port.slave
if buildEnv['TARGET_ISA'] == 'x86':
system.cpu[i].interrupts.pio = ruby_port.master
system.cpu[i].interrupts.int_master = ruby_port.slave
system.cpu[i].interrupts.int_slave = ruby_port.master
system.cpu[i].itb.walker.port = ruby_port.slave
system.cpu[i].dtb.walker.port = ruby_port.slave
else:
system.system_port = system.membus.slave
system.physmem.port = system.membus.master
CacheConfig.config_cache(options, system)
root = Root(full_system = False, system = system)
Simulation.run(options, root, system, FutureClass)
|
|
# Copyright (c) 2015-2017 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
import anyconfig
from molecule import interpolation
from molecule import logger
from molecule import platforms
from molecule import scenario
from molecule import state
from molecule import util
from molecule.dependency import ansible_galaxy
from molecule.dependency import gilt
from molecule.driver import delegated
from molecule.driver import docker
from molecule.driver import ec2
from molecule.driver import gce
from molecule.driver import lxc
from molecule.driver import lxd
from molecule.driver import kvm
from molecule.driver import openstack
from molecule.driver import vagrant
from molecule.lint import yamllint
from molecule.model import schema
from molecule.provisioner import ansible
from molecule.verifier import goss
from molecule.verifier import testinfra
LOG = logger.get_logger(__name__)
MOLECULE_DIRECTORY = 'molecule'
MOLECULE_FILE = 'molecule.yml'
MERGE_STRATEGY = anyconfig.MS_DICTS
class Config(object):
"""
Molecule searches the current directory for `molecule.yml` files by
globbing `molecule/*/molecule.yml`. The files are instantiated into
a list of Molecule :class:`.Config` objects, and each Molecule subcommand
operates on this list.
The directory in which the `molecule.yml` resides is the Scenario's
directory. Molecule performs most functions within this directory.
The :class:`.Config` object has instantiated Dependency_, Driver_,
:ref:`root_lint`, Platforms_, Provisioner_, Verifier_,
:ref:`root_scenario`, and State_ references.
"""
def __init__(self,
molecule_file,
args={},
command_args={},
ansible_args=()):
"""
Initialize a new config class and returns None.
:param molecule_file: A string containing the path to the Molecule file
to be parsed.
:param args: An optional dict of options, arguments and commands from
the CLI.
:param command_args: An optional dict of options passed to the
subcommand from the CLI.
:param ansible_args: An optional tuple of arguments provided to the
`ansible-playbook` command.
:returns: None
"""
self.molecule_file = molecule_file
self.args = args
self.command_args = command_args
self.ansible_args = ansible_args
self.config = self._combine()
@property
def debug(self):
return self.args.get('debug', False)
@property
def subcommand(self):
return self.command_args['subcommand']
@property
def ephemeral_directory(self):
return os.path.join(self.scenario.directory, '.molecule')
@property
def project_directory(self):
return os.getcwd()
@property
def molecule_directory(self):
return molecule_directory(self.project_directory)
@property
def dependency(self):
dependency_name = self.config['dependency']['name']
if dependency_name == 'galaxy':
return ansible_galaxy.AnsibleGalaxy(self)
elif dependency_name == 'gilt':
return gilt.Gilt(self)
else:
util.exit_with_invalid_section('dependency', dependency_name)
@property
def driver(self):
driver_name = self._get_driver_name()
driver = None
if driver_name == 'delegated':
driver = delegated.Delegated(self)
elif driver_name == 'docker':
driver = docker.Docker(self)
elif driver_name == 'ec2':
driver = ec2.Ec2(self)
elif driver_name == 'gce':
driver = gce.Gce(self)
elif driver_name == 'lxc':
driver = lxc.Lxc(self)
elif driver_name == 'lxd':
driver = lxd.Lxd(self)
elif driver_name == 'kvm':
driver = kvm.kvm(self)
elif driver_name == 'openstack':
driver = openstack.Openstack(self)
elif driver_name == 'vagrant':
driver = vagrant.Vagrant(self)
else:
util.exit_with_invalid_section('driver', driver_name)
driver.name = driver_name
return driver
@property
def drivers(self):
return molecule_drivers()
@property
def env(self):
return {
'MOLECULE_DEBUG': str(self.debug),
'MOLECULE_FILE': self.molecule_file,
'MOLECULE_INVENTORY_FILE': self.provisioner.inventory_file,
'MOLECULE_EPHEMERAL_DIRECTORY': self.scenario.ephemeral_directory,
'MOLECULE_SCENARIO_DIRECTORY': self.scenario.directory,
'MOLECULE_INSTANCE_CONFIG': self.driver.instance_config,
'MOLECULE_DEPENDENCY_NAME': self.dependency.name,
'MOLECULE_DRIVER_NAME': self.driver.name,
'MOLECULE_LINT_NAME': self.lint.name,
'MOLECULE_PROVISIONER_NAME': self.provisioner.name,
'MOLECULE_SCENARIO_NAME': self.scenario.name,
'MOLECULE_VERIFIER_NAME': self.verifier.name,
}
@property
def lint(self):
lint_name = self.config['lint']['name']
if lint_name == 'yamllint':
return yamllint.Yamllint(self)
else:
util.exit_with_invalid_section('lint', lint_name)
@property
def platforms(self):
return platforms.Platforms(self)
@property
def provisioner(self):
provisioner_name = self.config['provisioner']['name']
if provisioner_name == 'ansible':
return ansible.Ansible(self)
else:
util.exit_with_invalid_section('provisioner', provisioner_name)
@property
def scenario(self):
return scenario.Scenario(self)
@property
def state(self):
return state.State(self)
@property
def verifier(self):
verifier_name = self.config['verifier']['name']
if verifier_name == 'testinfra':
return testinfra.Testinfra(self)
elif verifier_name == 'goss':
return goss.Goss(self)
else:
util.exit_with_invalid_section('verifier', verifier_name)
@property
def verifiers(self):
return molecule_verifiers()
def merge_dicts(self, a, b):
return merge_dicts(a, b)
def _get_driver_name(self):
driver_from_state_file = self.state.driver
driver_from_cli = self.command_args.get('driver_name')
if driver_from_state_file:
driver_name = driver_from_state_file
elif driver_from_cli:
driver_name = driver_from_cli
else:
driver_name = self.config['driver']['name']
if driver_from_cli and (driver_from_cli != driver_name):
msg = ("Instance(s) were created with the '{}' driver, but the "
"subcommand is using '{}' driver.").format(
driver_name, driver_from_cli)
util.sysexit_with_message(msg)
return driver_name
def _combine(self):
"""
Perform a prioritized recursive merge of the `molecule_file` with
defaults, interpolate the result with environment variables, and
returns a new dict.
:return: dict
"""
i = interpolation.Interpolator(interpolation.TemplateWithDefaults,
os.environ)
base = self._get_defaults()
with util.open_file(self.molecule_file) as stream:
interpolated_config = i.interpolate(stream.read())
base = self.merge_dicts(base, util.safe_load(interpolated_config))
schema.validate(base)
return base
def _get_defaults(self):
return {
'dependency': {
'name': 'galaxy',
'enabled': True,
'options': {},
'env': {},
},
'driver': {
'name': 'docker',
'provider': {
'name': None
},
'options': {
'managed': True,
},
'ssh_connection_options': [],
'safe_files': [],
},
'lint': {
'name': 'yamllint',
'enabled': True,
'options': {},
'env': {},
},
'platforms': [],
'provisioner': {
'name': 'ansible',
'config_options': {},
'connection_options': {},
'options': {},
'env': {},
'inventory': {
'host_vars': {},
'group_vars': {},
'links': {},
},
'children': {},
'playbooks': {
'create': 'create.yml',
'converge': 'playbook.yml',
'destroy': 'destroy.yml',
'side_effect': None,
},
'lint': {
'name': 'ansible-lint',
'enabled': True,
'options': {},
'env': {},
},
},
'scenario': {
'name':
'default',
'check_sequence': [
'destroy',
'create',
'converge',
'check',
'destroy',
],
'converge_sequence': [
'create',
'converge',
],
'destroy_sequence': [
'destroy',
],
'test_sequence': [
'destroy',
'dependency',
'syntax',
'create',
'converge',
'idempotence',
'lint',
'side_effect',
'verify',
'destroy',
],
},
'verifier': {
'name': 'testinfra',
'enabled': True,
'directory': 'tests',
'options': {},
'env': {},
'additional_files_or_dirs': [],
'lint': {
'name': 'flake8',
'enabled': True,
'options': {},
'env': {},
},
},
}
def merge_dicts(a, b):
"""
Merges the values of B into A and returns a new dict. Uses the same
merge strategy as ``config._combine``.
::
dict a
b:
- c: 0
- c: 2
d:
e: "aaa"
f: 3
dict b
a: 1
b:
- c: 3
d:
e: "bbb"
Will give an object such as::
{'a': 1, 'b': [{'c': 3}], 'd': {'e': "bbb", 'f': 3}}
:param a: the target dictionary
:param b: the dictionary to import
:return: dict
"""
conf = a
anyconfig.merge(a, b, ac_merge=MERGE_STRATEGY)
return conf
def molecule_directory(path):
return os.path.join(path, MOLECULE_DIRECTORY)
def molecule_file(path):
return os.path.join(path, MOLECULE_FILE)
def molecule_drivers():
return [
delegated.Delegated(None).name,
docker.Docker(None).name,
ec2.Ec2(None).name,
gce.Gce(None).name,
lxc.Lxc(None).name,
kvm.kvm(None).name,
lxd.Lxd(None).name,
openstack.Openstack(None).name,
vagrant.Vagrant(None).name,
]
def molecule_verifiers():
return [goss.Goss(None).name, testinfra.Testinfra(None).name]
|
|
# Copyright (C) 2015 Junzi Sun (TU Delft)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""ADS-B Wrapper.
The ADS-B wrapper also imports functions from the following modules:
- pyModeS.decoder.bds.bds05
Functions: ``airborne_position``, ``airborne_position_with_ref``, ``altitude``
- pyModeS.decoder.bds.bds06
Functions: ``surface_position``, ``surface_position_with_ref``, ``surface_velocity``
- pyModeS.decoder.bds.bds08
Functions: ``category``, ``callsign``
- pyModeS.decoder.bds.bds09
Functions: ``airborne_velocity``, ``altitude_diff``
"""
from __future__ import absolute_import, print_function, division
import pyModeS as pms
from pyModeS.decoder import common
from pyModeS.decoder import uncertainty
# from pyModeS.decoder.bds import bds05, bds06, bds09
from pyModeS.decoder.bds.bds05 import airborne_position, airborne_position_with_ref, altitude
from pyModeS.decoder.bds.bds06 import surface_position, surface_position_with_ref, surface_velocity
from pyModeS.decoder.bds.bds08 import category, callsign
from pyModeS.decoder.bds.bds09 import airborne_velocity, altitude_diff
def df(msg):
return common.df(msg)
def icao(msg):
return common.icao(msg)
def typecode(msg):
return common.typecode(msg)
def position(msg0, msg1, t0, t1, lat_ref=None, lon_ref=None):
"""Decode position from a pair of even and odd position message
(works with both airborne and surface position messages)
Args:
msg0 (string): even message (28 bytes hexadecimal string)
msg1 (string): odd message (28 bytes hexadecimal string)
t0 (int): timestamps for the even message
t1 (int): timestamps for the odd message
Returns:
(float, float): (latitude, longitude) of the aircraft
"""
tc0 = typecode(msg0)
tc1 = typecode(msg1)
if (5<=tc0<=8 and 5<=tc1<=8):
if (not lat_ref) or (not lon_ref):
raise RuntimeError("Surface position encountered, a reference \
position lat/lon required. Location of \
receiver can be used.")
else:
return surface_position(msg0, msg1, t0, t1, lat_ref, lon_ref)
elif (9<=tc0<=18 and 9<=tc1<=18):
# Airborne position with barometric height
return airborne_position(msg0, msg1, t0, t1)
elif (20<=tc0<=22 and 20<=tc1<=22):
# Airborne position with GNSS height
return airborne_position(msg0, msg1, t0, t1)
else:
raise RuntimeError("incorrect or inconsistant message types")
def position_with_ref(msg, lat_ref, lon_ref):
"""Decode position with only one message,
knowing reference nearby location, such as previously
calculated location, ground station, or airport location, etc.
Works with both airborne and surface position messages.
The reference position shall be with in 180NM (airborne) or 45NM (surface)
of the true position.
Args:
msg (string): even message (28 bytes hexadecimal string)
lat_ref: previous known latitude
lon_ref: previous known longitude
Returns:
(float, float): (latitude, longitude) of the aircraft
"""
tc = typecode(msg)
if 5<=tc<=8:
return surface_position_with_ref(msg, lat_ref, lon_ref)
elif 9<=tc<=18 or 20<=tc<=22:
return airborne_position_with_ref(msg, lat_ref, lon_ref)
else:
raise RuntimeError("incorrect or inconsistant message types")
def altitude(msg):
"""Decode aircraft altitude
Args:
msg (string): 28 bytes hexadecimal message string
Returns:
int: altitude in feet
"""
tc = typecode(msg)
if tc<5 or tc==19 or tc>22:
raise RuntimeError("%s: Not a position message" % msg)
if tc>=5 and tc<=8:
# surface position, altitude 0
return 0
msgbin = common.hex2bin(msg)
q = msgbin[47]
if q:
n = common.bin2int(msgbin[40:47]+msgbin[48:52])
alt = n * 25 - 1000
return alt
else:
return None
def velocity(msg):
"""Calculate the speed, heading, and vertical rate
(handles both airborne or surface message)
Args:
msg (string): 28 bytes hexadecimal message string
Returns:
(int, float, int, string): speed (kt), ground track or heading (degree),
rate of climb/descend (ft/min), and speed type
('GS' for ground speed, 'AS' for airspeed)
"""
if 5 <= typecode(msg) <= 8:
return surface_velocity(msg)
elif typecode(msg) == 19:
return airborne_velocity(msg)
else:
raise RuntimeError("incorrect or inconsistant message types, expecting 4<TC<9 or TC=19")
def speed_heading(msg):
"""Get speed and ground track (or heading) from the velocity message
(handles both airborne or surface message)
Args:
msg (string): 28 bytes hexadecimal message string
Returns:
(int, float): speed (kt), ground track or heading (degree)
"""
spd, trk_or_hdg, rocd, tag = velocity(msg)
return spd, trk_or_hdg
def oe_flag(msg):
"""Check the odd/even flag. Bit 54, 0 for even, 1 for odd.
Args:
msg (string): 28 bytes hexadecimal message string
Returns:
int: 0 or 1, for even or odd frame
"""
msgbin = common.hex2bin(msg)
return int(msgbin[53])
def version(msg):
"""ADS-B Version
Args:
msg (string): 28 bytes hexadecimal message string, TC = 31
Returns:
int: version number
"""
tc = typecode(msg)
if tc != 31:
raise RuntimeError("%s: Not a status operation message, expecting TC = 31" % msg)
msgbin = common.hex2bin(msg)
version = common.bin2int(msgbin[72:75])
return version
def nuc_p(msg):
"""Calculate NUCp, Navigation Uncertainty Category - Position (ADS-B version 1)
Args:
msg (string): 28 bytes hexadecimal message string,
Returns:
int: Horizontal Protection Limit
int: 95% Containment Radius - Horizontal (meters)
int: 95% Containment Radius - Vertical (meters)
"""
tc = typecode(msg)
if typecode(msg) < 5 or typecode(msg) > 22:
raise RuntimeError(
"%s: Not a surface position message (5<TC<8), \
airborne position message (8<TC<19), \
or airborne position with GNSS height (20<TC<22)" % msg
)
try:
NUCp = uncertainty.TC_NUCp_lookup[tc]
HPL = uncertainty.NUCp[NUCp]['HPL']
RCu = uncertainty.NUCp[NUCp]['RCu']
RCv = uncertainty.NUCp[NUCp]['RCv']
except KeyError:
HPL, RCu, RCv = uncertainty.NA, uncertainty.NA, uncertainty.NA
if tc in [20, 21]:
RCv = uncertainty.NA
return HPL, RCu, RCv
def nuc_v(msg):
"""Calculate NUCv, Navigation Uncertainty Category - Velocity (ADS-B version 1)
Args:
msg (string): 28 bytes hexadecimal message string,
Returns:
int or string: 95% Horizontal Velocity Error
int or string: 95% Vertical Velocity Error
"""
tc = typecode(msg)
if tc != 19:
raise RuntimeError("%s: Not an airborne velocity message, expecting TC = 19" % msg)
msgbin = common.hex2bin(msg)
NUCv = common.bin2int(msgbin[42:45])
try:
HVE = uncertainty.NUCv[NUCv]['HVE']
VVE = uncertainty.NUCv[NUCv]['VVE']
except KeyError:
HVE, VVE = uncertainty.NA, uncertainty.NA
return HVE, VVE
def nic_v1(msg, NICs):
"""Calculate NIC, navigation integrity category, for ADS-B version 1
Args:
msg (string): 28 bytes hexadecimal message string
NICs (int or string): NIC supplement
Returns:
int or string: Horizontal Radius of Containment
int or string: Vertical Protection Limit
"""
if typecode(msg) < 5 or typecode(msg) > 22:
raise RuntimeError(
"%s: Not a surface position message (5<TC<8), \
airborne position message (8<TC<19), \
or airborne position with GNSS height (20<TC<22)" % msg
)
tc = typecode(msg)
NIC = uncertainty.TC_NICv1_lookup[tc]
if isinstance(NIC, dict):
NIC = NIC[NICs]
try:
Rc = uncertainty.NICv1[NIC][NICs]['Rc']
VPL = uncertainty.NICv1[NIC][NICs]['VPL']
except KeyError:
Rc, VPL = uncertainty.NA, uncertainty.NA
return Rc, VPL
def nic_v2(msg, NICa, NICbc):
"""Calculate NIC, navigation integrity category, for ADS-B version 2
Args:
msg (string): 28 bytes hexadecimal message string
NICa (int or string): NIC supplement - A
NICbc (int or srting): NIC supplement - B or C
Returns:
int or string: Horizontal Radius of Containment
"""
if typecode(msg) < 5 or typecode(msg) > 22:
raise RuntimeError(
"%s: Not a surface position message (5<TC<8), \
airborne position message (8<TC<19), \
or airborne position with GNSS height (20<TC<22)" % msg
)
tc = typecode(msg)
NIC = uncertainty.TC_NICv2_lookup[tc]
if 20<=tc<=22:
NICs = 0
else:
NICs = NICa*2 + NICbc
try:
if isinstance(NIC, dict):
NIC = NIC[NICs]
Rc = uncertainty.NICv2[NIC][NICs]['Rc']
except KeyError:
Rc = uncertainty.NA
return Rc
def nic_s(msg):
"""Obtain NIC supplement bit, TC=31 message
Args:
msg (string): 28 bytes hexadecimal message string
Returns:
int: NICs number (0 or 1)
"""
tc = typecode(msg)
if tc != 31:
raise RuntimeError("%s: Not a status operation message, expecting TC = 31" % msg)
msgbin = common.hex2bin(msg)
nic_s = int(msgbin[75])
return nic_s
def nic_a_c(msg):
"""Obtain NICa/c, navigation integrity category supplements a and c
Args:
msg (string): 28 bytes hexadecimal message string
Returns:
(int, int): NICa and NICc number (0 or 1)
"""
tc = typecode(msg)
if tc != 31:
raise RuntimeError("%s: Not a status operation message, expecting TC = 31" % msg)
msgbin = common.hex2bin(msg)
nic_a = int(msgbin[75])
nic_c = int(msgbin[51])
return nic_a, nic_c
def nic_b(msg):
"""Obtain NICb, navigation integrity category supplement-b
Args:
msg (string): 28 bytes hexadecimal message string
Returns:
int: NICb number (0 or 1)
"""
tc = typecode(msg)
if tc < 9 or tc > 18:
raise RuntimeError("%s: Not a airborne position message, expecting 8<TC<19" % msg)
msgbin = common.hex2bin(msg)
nic_b = int(msgbin[39])
return nic_b
def nac_p(msg):
"""Calculate NACp, Navigation Accuracy Category - Position
Args:
msg (string): 28 bytes hexadecimal message string, TC = 29 or 31
Returns:
int or string: 95% horizontal accuracy bounds, Estimated Position Uncertainty
int or string: 95% vertical accuracy bounds, Vertical Estimated Position Uncertainty
"""
tc = typecode(msg)
if tc not in [29, 31]:
raise RuntimeError("%s: Not a target state and status message, \
or operation status message, expecting TC = 29 or 31" % msg)
msgbin = common.hex2bin(msg)
if tc == 29:
NACp = common.bin2int(msgbin[71:75])
elif tc == 31:
NACp = common.bin2int(msgbin[76:80])
try:
EPU = uncertainty.NACp[NACp]['EPU']
VEPU = uncertainty.NACp[NACp]['VEPU']
except KeyError:
EPU, VEPU = uncertainty.NA, uncertainty.NA
return EPU, VEPU
def nac_v(msg):
"""Calculate NACv, Navigation Accuracy Category - Velocity
Args:
msg (string): 28 bytes hexadecimal message string, TC = 19
Returns:
int or string: 95% horizontal accuracy bounds for velocity, Horizontal Figure of Merit
int or string: 95% vertical accuracy bounds for velocity, Vertical Figure of Merit
"""
tc = typecode(msg)
if tc != 19:
raise RuntimeError("%s: Not an airborne velocity message, expecting TC = 19" % msg)
msgbin = common.hex2bin(msg)
NACv = common.bin2int(msgbin[42:45])
try:
HFOMr = uncertainty.NACv[NACv]['HFOMr']
VFOMr = uncertainty.NACv[NACv]['VFOMr']
except KeyError:
HFOMr, VFOMr = uncertainty.NA, uncertainty.NA
return HFOMr, VFOMr
def sil(msg, version):
"""Calculate SIL, Surveillance Integrity Level
Args:
msg (string): 28 bytes hexadecimal message string with TC = 29, 31
Returns:
int or string: Probability of exceeding Horizontal Radius of Containment RCu
int or string: Probability of exceeding Vertical Integrity Containment Region VPL
string: SIL supplement based on per "hour" or "sample", or 'unknown'
"""
tc = typecode(msg)
if tc not in [29, 31]:
raise RuntimeError("%s: Not a target state and status messag, \
or operation status message, expecting TC = 29 or 31" % msg)
msgbin = common.hex2bin(msg)
if tc == 29:
SIL = common.bin2int(msgbin[76:78])
elif tc == 31:
SIL = common.bin2int(msgbin[82:84])
try:
PE_RCu = uncertainty.SIL[SIL]['PE_RCu']
PE_VPL = uncertainty.SIL[SIL]['PE_VPL']
except KeyError:
PE_RCu, PE_VPL = uncertainty.NA, uncertainty.NA
base = 'unknown'
if version == 2:
if tc == 29:
SIL_SUP = common.bin2int(msgbin[39])
elif tc == 31:
SIL_SUP = common.bin2int(msgbin[86])
if SIL_SUP == 0:
base = "hour"
elif SIL_SUP == 1:
base = "sample"
return PE_RCu, PE_VPL, base
|
|
#!/usr/bin/env python
import eventlet
import errno
import imp
import logging
import os
import os.path
import sys
from daemon.daemon import DaemonContext
from daemon.runner import DaemonRunner, make_pidlockfile
from django.conf import settings as django_settings
from django.core.management import call_command
from eventlet import wsgi
from optparse import OptionParser
from sentry import VERSION
def settings_from_file(filename, silent=False):
"""
Configures django settings from an arbitrary (non sys.path) filename.
"""
mod = imp.new_module('config')
mod.__file__ = filename
try:
execfile(filename, mod.__dict__)
except IOError, e:
if silent and e.errno in (errno.ENOENT, errno.EISDIR):
return False
e.strerror = 'Unable to load configuration file (%s)' % e.strerror
raise
tuple_settings = ("INSTALLED_APPS", "TEMPLATE_DIRS")
if not django_settings.configured:
django_settings.configure()
for setting in dir(mod):
if setting == setting.upper():
setting_value = getattr(mod, setting)
if setting in tuple_settings and type(setting_value) == str:
setting_value = (setting_value,) # In case the user forgot the comma.
setattr(django_settings, setting, setting_value)
class SentryServer(DaemonRunner):
pidfile_timeout = 10
start_message = u"started with pid %(pid)d"
def __init__(self, host=None, port=None, pidfile=None,
logfile=None, daemonize=False, debug=False):
from sentry.conf import settings
if not logfile:
logfile = settings.WEB_LOG_FILE
logfile = os.path.realpath(logfile)
pidfile = os.path.realpath(pidfile or settings.WEB_PID_FILE)
if daemonize:
detach_process = True
else:
detach_process = False
self.daemon_context = DaemonContext(detach_process=detach_process)
self.daemon_context.stdout = open(logfile, 'w+')
self.daemon_context.stderr = open(logfile, 'w+', buffering=0)
self.debug = debug
self.pidfile = make_pidlockfile(pidfile, self.pidfile_timeout)
self.daemon_context.pidfile = self.pidfile
self.host = host or settings.WEB_HOST
self.port = port or settings.WEB_PORT
# HACK: set app to self so self.app.run() works
self.app = self
def execute(self, action):
self.action = action
# Upgrade needs to happen before forking
upgrade()
if self.daemon_context.detach_process is False and self.action == 'start':
# HACK:
self.run()
else:
self.do_action()
def run(self):
from sentry.wsgi import application
def inner_run():
wsgi.server(eventlet.listen((self.host, self.port)), application)
if self.debug:
from django.utils import autoreload
autoreload.main(inner_run)
else:
inner_run()
def cleanup(days=30, logger=None, site=None, server=None, level=None):
"""
Deletes a portion of the trailing data in Sentry based on
their creation dates. For example, if ``days`` is 30, this
would attempt to clean up all data thats older than 30 days.
:param logger: limit all deletion scopes to messages from the
specified logger.
:param site: limit the message deletion scope to the specified
site.
:param server: limit the message deletion scope to the specified
server.
:param level: limit all deleteion scopes to messages that are greater
than or equal to level.
"""
# TODO: we should collect which messages above were deleted
# and potentially just send out post_delete signals where
# GroupedMessage can update itself accordingly
from sentry.models import GroupedMessage, Message, MessageCountByMinute, \
MessageFilterValue, FilterValue
from sentry.utils.query import RangeQuerySetWrapper, SkinnyQuerySet
import datetime
ts = datetime.datetime.now() - datetime.timedelta(days=days)
# Message
qs = SkinnyQuerySet(Message).filter(datetime__lte=ts)
if logger:
qs = qs.filter(logger=logger)
if site:
qs = qs.filter(site=site)
if server:
qs = qs.filter(server_name=server)
if level:
qs = qs.filter(level__gte=level)
groups_to_check = set()
for obj in RangeQuerySetWrapper(qs):
print ">>> Removing <%s: id=%s>" % (obj.__class__.__name__, obj.pk)
obj.delete()
groups_to_check.add(obj.group_id)
if not (server or site):
# MessageCountByMinute
qs = SkinnyQuerySet(MessageCountByMinute).filter(date__lte=ts)
if logger:
qs = qs.filter(group__logger=logger)
if level:
qs = qs.filter(group__level__gte=level)
for obj in RangeQuerySetWrapper(qs):
print ">>> Removing <%s: id=%s>" % (obj.__class__.__name__, obj.pk)
obj.delete()
# GroupedMessage
qs = SkinnyQuerySet(GroupedMessage).filter(last_seen__lte=ts)
if logger:
qs = qs.filter(logger=logger)
if level:
qs = qs.filter(level__gte=level)
for obj in RangeQuerySetWrapper(qs):
for key, value in SkinnyQuerySet(MessageFilterValue).filter(group=obj).values_list('key', 'value'):
if not MessageFilterValue.objects.filter(key=key, value=value).exclude(group=obj).exists():
print ">>> Removing <FilterValue: key=%s, value=%s>" % (key, value)
FilterValue.objects.filter(key=key, value=value).delete()
print ">>> Removing <%s: id=%s>" % (obj.__class__.__name__, obj.pk)
obj.delete()
# attempt to cleanup any groups that may now be empty
groups_to_delete = []
for group_id in groups_to_check:
if not Message.objects.filter(group=group_id).exists():
groups_to_delete.append(group_id)
if groups_to_delete:
for obj in SkinnyQuerySet(GroupedMessage).filter(pk__in=groups_to_delete):
for key, value in SkinnyQuerySet(MessageFilterValue).filter(group=obj).values_list('key', 'value'):
if not MessageFilterValue.objects.filter(key=key, value=value).exclude(group=obj).exists():
print ">>> Removing <FilterValue: key=%s, value=%s>" % (key, value)
FilterValue.objects.filter(key=key, value=value).delete()
print ">>> Removing <%s: id=%s>" % (obj.__class__.__name__, obj.pk)
obj.delete()
def upgrade(interactive=True):
from sentry.conf import settings
call_command('syncdb', database=settings.DATABASE_USING or 'default', interactive=interactive)
if 'south' in django_settings.INSTALLED_APPS:
call_command('migrate', database=settings.DATABASE_USING or 'default', interactive=interactive)
def main():
command_list = ('start', 'stop', 'restart', 'cleanup', 'upgrade')
args = sys.argv
if len(args) < 2 or args[1] not in command_list:
print "usage: sentry [command] [options]"
print
print "Available subcommands:"
for cmd in command_list:
print " ", cmd
sys.exit(1)
parser = OptionParser(version="%%prog %s" % VERSION)
parser.add_option('--config', metavar='CONFIG')
if args[1] == 'start':
parser.add_option('--host', metavar='HOSTNAME')
parser.add_option('--port', type=int, metavar='PORT')
parser.add_option('--daemon', action='store_true', default=False, dest='daemonize')
parser.add_option('--no-daemon', action='store_false', default=False, dest='daemonize')
parser.add_option('--debug', action='store_true', default=False, dest='debug')
parser.add_option('--pidfile', dest='pidfile')
parser.add_option('--logfile', dest='logfile')
elif args[1] == 'stop':
parser.add_option('--pidfile', dest='pidfile')
parser.add_option('--logfile', dest='logfile')
elif args[1] == 'cleanup':
parser.add_option('--days', default='30', type=int,
help='Numbers of days to truncate on.')
parser.add_option('--logger',
help='Limit truncation to only entries from logger.')
parser.add_option('--site',
help='Limit truncation to only entries from site.')
parser.add_option('--server',
help='Limit truncation to only entries from server.')
parser.add_option('--level',
help='Limit truncation to only entries greater than or equal to level (e.g. DEBUG).')
(options, args) = parser.parse_args()
# Install default server values
if not django_settings.configured:
os.environ['DJANGO_SETTINGS_MODULE'] = 'sentry.conf.server'
if options.config:
# assumed to be a file
settings_from_file(options.config)
else:
config_path = os.path.expanduser(os.path.join('~', '.sentry', 'sentry.conf.py'))
if os.path.exists(config_path):
settings_from_file(config_path)
if getattr(options, 'debug', False):
django_settings.DEBUG = True
if args[0] == 'upgrade':
upgrade()
elif args[0] == 'start':
app = SentryServer(host=options.host, port=options.port,
pidfile=options.pidfile, logfile=options.logfile,
daemonize=options.daemonize, debug=options.debug)
app.execute(args[0])
elif args[0] == 'restart':
app = SentryServer()
app.execute(args[0])
elif args[0] == 'stop':
app = SentryServer(pidfile=options.pidfile, logfile=options.logfile)
app.execute(args[0])
elif args[0] == 'cleanup':
level = options.level
if level is not None and not level.isdigit():
level = getattr(logging, level.upper())
cleanup(days=options.days, logger=options.logger, site=options.site, server=options.server,
level=level)
sys.exit(0)
if __name__ == '__main__':
main()
|
|
"""
Test the pipeline module.
"""
from tempfile import mkdtemp
import shutil
import time
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_dict_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.base import clone, BaseEstimator
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.externals.joblib import Memory
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class NoFit(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class NoTrans(NoFit):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class NoInvTransf(NoTrans):
def transform(self, X):
return X
class Transf(NoInvTransf):
def transform(self, X):
return X
def inverse_transform(self, X):
return X
class TransfFitParams(Transf):
def fit(self, X, y, **fit_params):
self.fit_params = fit_params
return self
class Mult(BaseEstimator):
def __init__(self, mult=1):
self.mult = mult
def fit(self, X, y):
return self
def transform(self, X):
return np.asarray(X) * self.mult
def inverse_transform(self, X):
return np.asarray(X) / self.mult
def predict(self, X):
return (np.asarray(X) * self.mult).sum(axis=1)
predict_proba = predict_log_proba = decision_function = predict
def score(self, X, y=None):
return np.sum(X)
class FitParamT(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
self.successful = False
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def fit_predict(self, X, y, should_succeed=False):
self.fit(X, y, should_succeed=should_succeed)
return self.predict(X)
def score(self, X, y=None, sample_weight=None):
if sample_weight is not None:
X = X * sample_weight
return np.sum(X)
class DummyTransf(Transf):
"""Transformer which store the column means"""
def fit(self, X, y):
self.means_ = np.mean(X, axis=0)
# store timestamp to figure out whether the result of 'fit' has been
# cached or not
self.timestamp_ = time.time()
return self
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
assert_raises_regex(TypeError,
'Last step of Pipeline should implement fit. '
'.*NoFit.*',
Pipeline, [('clf', NoFit())])
# Smoke test with only an estimator
clf = NoTrans()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't instantiate with non-transformers on the way
# Note that NoTrans implements fit, but not transform
assert_raises_regex(TypeError,
'All intermediate steps should be transformers'
'.*\\bNoTrans\\b.*',
Pipeline, [('t', NoTrans()), ('svc', clf)])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = assert_no_warnings(clone, pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_init_tuple():
# Pipeline accepts steps as tuple
X = np.array([[1, 2]])
pipe = Pipeline((('transf', Transf()), ('clf', FitParamT())))
pipe.fit(X, y=None)
pipe.score(X)
pipe.set_params(transf=None)
pipe.fit(X, y=None)
pipe.score(X)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', Transf()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
# invalid parameters should raise an error message
assert_raise_message(
TypeError,
"fit() got an unexpected keyword argument 'bad'",
pipe.fit, None, None, clf__bad=True
)
def test_pipeline_sample_weight_supported():
# Pipeline should pass sample_weight
X = np.array([[1, 2]])
pipe = Pipeline([('transf', Transf()), ('clf', FitParamT())])
pipe.fit(X, y=None)
assert_equal(pipe.score(X), 3)
assert_equal(pipe.score(X, y=None), 3)
assert_equal(pipe.score(X, y=None, sample_weight=None), 3)
assert_equal(pipe.score(X, sample_weight=np.array([2, 3])), 8)
def test_pipeline_sample_weight_unsupported():
# When sample_weight is None it shouldn't be passed
X = np.array([[1, 2]])
pipe = Pipeline([('transf', Transf()), ('clf', Mult())])
pipe.fit(X, y=None)
assert_equal(pipe.score(X), 3)
assert_equal(pipe.score(X, sample_weight=None), 3)
assert_raise_message(
TypeError,
"score() got an unexpected keyword argument 'sample_weight'",
pipe.score, X, sample_weight=np.array([2, 3])
)
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
# expected error message
error_msg = ('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.')
assert_raise_message(ValueError,
error_msg % ('fake', 'Pipeline'),
pipe.set_params,
fake='nope')
# nested model check
assert_raise_message(ValueError,
error_msg % ("fake", pipe),
pipe.set_params,
fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(svd_solver='full', n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = PCA(n_components=2, svd_solver='randomized', whiten=True)
clf = SVC(probability=True, random_state=0, decision_function_shape='ovr')
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# As pipeline doesn't clone estimators on construction,
# it must have its own estimators
scaler_for_pipeline = StandardScaler()
km_for_pipeline = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([
('scaler', scaler_for_pipeline),
('Kmeans', km_for_pipeline)
])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA(svd_solver='full')
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_fit_predict_with_intermediate_fit_params():
# tests that Pipeline passes fit_params to intermediate steps
# when fit_predict is invoked
pipe = Pipeline([('transf', TransfFitParams()), ('clf', FitParamT())])
pipe.fit_predict(X=None,
y=None,
transf__should_get_this=True,
clf__should_succeed=True)
assert_true(pipe.named_steps['transf'].fit_params['should_get_this'])
assert_true(pipe.named_steps['clf'].successful)
assert_false('should_succeed' in pipe.named_steps['transf'].fit_params)
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# Test clone
fs2 = assert_no_warnings(clone, fs)
assert_false(fs.transformer_list[0][1] is fs2.transformer_list[0][1])
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", Transf()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
# test error if some elements do not support transform
assert_raises_regex(TypeError,
'All estimators should implement fit and '
'transform.*\\bNoTrans\\b',
FeatureUnion,
[("transform", Transf()), ("no_transform", NoTrans())])
# test that init accepts tuples
fs = FeatureUnion((("svd", svd), ("select", select)))
fs.fit(X, y)
def test_make_union():
pca = PCA(svd_solver='full')
mock = Transf()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transf"))
assert_equal(transformers, (pca, mock))
def test_make_union_kwargs():
pca = PCA(svd_solver='full')
mock = Transf()
fu = make_union(pca, mock, n_jobs=3)
assert_equal(fu.transformer_list, make_union(pca, mock).transformer_list)
assert_equal(3, fu.n_jobs)
# invalid keyword parameters should raise an error message
assert_raise_message(
TypeError,
'Unknown keyword arguments: "transformer_weights"',
make_union, pca, mock, transformer_weights={'pca': 10, 'Transf': 1}
)
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2, svd_solver='full')
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transf = Transf()
pipeline = Pipeline([('mock', transf)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transf.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_set_pipeline_steps():
transf1 = Transf()
transf2 = Transf()
pipeline = Pipeline([('mock', transf1)])
assert_true(pipeline.named_steps['mock'] is transf1)
# Directly setting attr
pipeline.steps = [('mock2', transf2)]
assert_true('mock' not in pipeline.named_steps)
assert_true(pipeline.named_steps['mock2'] is transf2)
assert_equal([('mock2', transf2)], pipeline.steps)
# Using set_params
pipeline.set_params(steps=[('mock', transf1)])
assert_equal([('mock', transf1)], pipeline.steps)
# Using set_params to replace single step
pipeline.set_params(mock=transf2)
assert_equal([('mock', transf2)], pipeline.steps)
# With invalid data
pipeline.set_params(steps=[('junk', ())])
assert_raises(TypeError, pipeline.fit, [[1]], [1])
assert_raises(TypeError, pipeline.fit_transform, [[1]], [1])
def test_pipeline_named_steps():
transf = Transf()
mult2 = Mult(mult=2)
pipeline = Pipeline([('mock', transf), ("mult", mult2)])
# Test access via named_steps bunch object
assert_true('mock' in pipeline.named_steps)
assert_true('mock2' not in pipeline.named_steps)
assert_true(pipeline.named_steps.mock is transf)
assert_true(pipeline.named_steps.mult is mult2)
# Test bunch with conflict attribute of dict
pipeline = Pipeline([('values', transf), ("mult", mult2)])
assert_true(pipeline.named_steps.values is not transf)
assert_true(pipeline.named_steps.mult is mult2)
def test_set_pipeline_step_none():
# Test setting Pipeline steps to None
X = np.array([[1]])
y = np.array([1])
mult2 = Mult(mult=2)
mult3 = Mult(mult=3)
mult5 = Mult(mult=5)
def make():
return Pipeline([('m2', mult2), ('m3', mult3), ('last', mult5)])
pipeline = make()
exp = 2 * 3 * 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
pipeline.set_params(m3=None)
exp = 2 * 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
assert_dict_equal(pipeline.get_params(deep=True),
{'steps': pipeline.steps,
'm2': mult2,
'm3': None,
'last': mult5,
'memory': None,
'm2__mult': 2,
'last__mult': 5,
})
pipeline.set_params(m2=None)
exp = 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
# for other methods, ensure no AttributeErrors on None:
other_methods = ['predict_proba', 'predict_log_proba',
'decision_function', 'transform', 'score']
for method in other_methods:
getattr(pipeline, method)(X)
pipeline.set_params(m2=mult2)
exp = 2 * 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
pipeline = make()
pipeline.set_params(last=None)
# mult2 and mult3 are active
exp = 6
assert_array_equal([[exp]], pipeline.fit(X, y).transform(X))
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
assert_raise_message(AttributeError,
"'NoneType' object has no attribute 'predict'",
getattr, pipeline, 'predict')
# Check None step at construction time
exp = 2 * 5
pipeline = Pipeline([('m2', mult2), ('m3', None), ('last', mult5)])
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
def test_pipeline_ducktyping():
pipeline = make_pipeline(Mult(5))
pipeline.predict
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline(Transf())
assert_false(hasattr(pipeline, 'predict'))
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline(None)
assert_false(hasattr(pipeline, 'predict'))
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline(Transf(), NoInvTransf())
assert_false(hasattr(pipeline, 'predict'))
pipeline.transform
assert_false(hasattr(pipeline, 'inverse_transform'))
pipeline = make_pipeline(NoInvTransf(), Transf())
assert_false(hasattr(pipeline, 'predict'))
pipeline.transform
assert_false(hasattr(pipeline, 'inverse_transform'))
def test_make_pipeline():
t1 = Transf()
t2 = Transf()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transf-1")
assert_equal(pipe.steps[1][0], "transf-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transf-1")
assert_equal(pipe.steps[1][0], "transf-2")
assert_equal(pipe.steps[2][0], "fitparamt")
assert_raise_message(
TypeError,
'Unknown keyword arguments: "random_parameter"',
make_pipeline, t1, t2, random_parameter='rnd'
)
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = PCA(n_components=2, svd_solver='randomized', random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", Transf()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
ft = FeatureUnion([("tr1", Transf())]).fit([[1]])
assert_raise_message(AttributeError,
'Transformer tr1 (type Transf) does not provide '
'get_feature_names', ft.get_feature_names)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
def test_set_feature_union_steps():
mult2 = Mult(2)
mult2.get_feature_names = lambda: ['x2']
mult3 = Mult(3)
mult3.get_feature_names = lambda: ['x3']
mult5 = Mult(5)
mult5.get_feature_names = lambda: ['x5']
ft = FeatureUnion([('m2', mult2), ('m3', mult3)])
assert_array_equal([[2, 3]], ft.transform(np.asarray([[1]])))
assert_equal(['m2__x2', 'm3__x3'], ft.get_feature_names())
# Directly setting attr
ft.transformer_list = [('m5', mult5)]
assert_array_equal([[5]], ft.transform(np.asarray([[1]])))
assert_equal(['m5__x5'], ft.get_feature_names())
# Using set_params
ft.set_params(transformer_list=[('mock', mult3)])
assert_array_equal([[3]], ft.transform(np.asarray([[1]])))
assert_equal(['mock__x3'], ft.get_feature_names())
# Using set_params to replace single step
ft.set_params(mock=mult5)
assert_array_equal([[5]], ft.transform(np.asarray([[1]])))
assert_equal(['mock__x5'], ft.get_feature_names())
def test_set_feature_union_step_none():
mult2 = Mult(2)
mult2.get_feature_names = lambda: ['x2']
mult3 = Mult(3)
mult3.get_feature_names = lambda: ['x3']
X = np.asarray([[1]])
ft = FeatureUnion([('m2', mult2), ('m3', mult3)])
assert_array_equal([[2, 3]], ft.fit(X).transform(X))
assert_array_equal([[2, 3]], ft.fit_transform(X))
assert_equal(['m2__x2', 'm3__x3'], ft.get_feature_names())
ft.set_params(m2=None)
assert_array_equal([[3]], ft.fit(X).transform(X))
assert_array_equal([[3]], ft.fit_transform(X))
assert_equal(['m3__x3'], ft.get_feature_names())
ft.set_params(m3=None)
assert_array_equal([[]], ft.fit(X).transform(X))
assert_array_equal([[]], ft.fit_transform(X))
assert_equal([], ft.get_feature_names())
# check we can change back
ft.set_params(m3=mult3)
assert_array_equal([[3]], ft.fit(X).transform(X))
def test_step_name_validation():
bad_steps1 = [('a__q', Mult(2)), ('b', Mult(3))]
bad_steps2 = [('a', Mult(2)), ('a', Mult(3))]
for cls, param in [(Pipeline, 'steps'),
(FeatureUnion, 'transformer_list')]:
# we validate in construction (despite scikit-learn convention)
bad_steps3 = [('a', Mult(2)), (param, Mult(3))]
for bad_steps, message in [
(bad_steps1, "Estimator names must not contain __: got ['a__q']"),
(bad_steps2, "Names provided are not unique: ['a', 'a']"),
(bad_steps3, "Estimator names conflict with constructor "
"arguments: ['%s']" % param),
]:
# three ways to make invalid:
# - construction
assert_raise_message(ValueError, message, cls,
**{param: bad_steps})
# - setattr
est = cls(**{param: [('a', Mult(1))]})
setattr(est, param, bad_steps)
assert_raise_message(ValueError, message, est.fit, [[1]], [1])
assert_raise_message(ValueError, message, est.fit_transform,
[[1]], [1])
# - set_params
est = cls(**{param: [('a', Mult(1))]})
est.set_params(**{param: bad_steps})
assert_raise_message(ValueError, message, est.fit, [[1]], [1])
assert_raise_message(ValueError, message, est.fit_transform,
[[1]], [1])
def test_pipeline_wrong_memory():
# Test that an error is raised when memory is not a string or a Memory
# instance
iris = load_iris()
X = iris.data
y = iris.target
# Define memory as an integer
memory = 1
cached_pipe = Pipeline([('transf', DummyTransf()), ('svc', SVC())],
memory=memory)
assert_raises_regex(ValueError, "'memory' should be None, a string or"
" have the same interface as "
"sklearn.externals.joblib.Memory."
" Got memory='1' instead.", cached_pipe.fit, X, y)
class DummyMemory(object):
def cache(self, func):
return func
class WrongDummyMemory(object):
pass
def test_pipeline_with_cache_attribute():
X = np.array([[1, 2]])
pipe = Pipeline([('transf', Transf()), ('clf', Mult())],
memory=DummyMemory())
pipe.fit(X, y=None)
dummy = WrongDummyMemory()
pipe = Pipeline([('transf', Transf()), ('clf', Mult())],
memory=dummy)
assert_raises_regex(ValueError, "'memory' should be None, a string or"
" have the same interface as "
"sklearn.externals.joblib.Memory."
" Got memory='{}' instead.".format(dummy), pipe.fit, X)
def test_pipeline_memory():
iris = load_iris()
X = iris.data
y = iris.target
cachedir = mkdtemp()
try:
memory = Memory(cachedir=cachedir, verbose=10)
# Test with Transformer + SVC
clf = SVC(probability=True, random_state=0)
transf = DummyTransf()
pipe = Pipeline([('transf', clone(transf)), ('svc', clf)])
cached_pipe = Pipeline([('transf', transf), ('svc', clf)],
memory=memory)
# Memoize the transformer at the first fit
cached_pipe.fit(X, y)
pipe.fit(X, y)
# Get the time stamp of the transformer in the cached pipeline
ts = cached_pipe.named_steps['transf'].timestamp_
# Check that cached_pipe and pipe yield identical results
assert_array_equal(pipe.predict(X), cached_pipe.predict(X))
assert_array_equal(pipe.predict_proba(X), cached_pipe.predict_proba(X))
assert_array_equal(pipe.predict_log_proba(X),
cached_pipe.predict_log_proba(X))
assert_array_equal(pipe.score(X, y), cached_pipe.score(X, y))
assert_array_equal(pipe.named_steps['transf'].means_,
cached_pipe.named_steps['transf'].means_)
assert_false(hasattr(transf, 'means_'))
# Check that we are reading the cache while fitting
# a second time
cached_pipe.fit(X, y)
# Check that cached_pipe and pipe yield identical results
assert_array_equal(pipe.predict(X), cached_pipe.predict(X))
assert_array_equal(pipe.predict_proba(X), cached_pipe.predict_proba(X))
assert_array_equal(pipe.predict_log_proba(X),
cached_pipe.predict_log_proba(X))
assert_array_equal(pipe.score(X, y), cached_pipe.score(X, y))
assert_array_equal(pipe.named_steps['transf'].means_,
cached_pipe.named_steps['transf'].means_)
assert_equal(ts, cached_pipe.named_steps['transf'].timestamp_)
# Create a new pipeline with cloned estimators
# Check that even changing the name step does not affect the cache hit
clf_2 = SVC(probability=True, random_state=0)
transf_2 = DummyTransf()
cached_pipe_2 = Pipeline([('transf_2', transf_2), ('svc', clf_2)],
memory=memory)
cached_pipe_2.fit(X, y)
# Check that cached_pipe and pipe yield identical results
assert_array_equal(pipe.predict(X), cached_pipe_2.predict(X))
assert_array_equal(pipe.predict_proba(X),
cached_pipe_2.predict_proba(X))
assert_array_equal(pipe.predict_log_proba(X),
cached_pipe_2.predict_log_proba(X))
assert_array_equal(pipe.score(X, y), cached_pipe_2.score(X, y))
assert_array_equal(pipe.named_steps['transf'].means_,
cached_pipe_2.named_steps['transf_2'].means_)
assert_equal(ts, cached_pipe_2.named_steps['transf_2'].timestamp_)
finally:
shutil.rmtree(cachedir)
def test_make_pipeline_memory():
cachedir = mkdtemp()
memory = Memory(cachedir=cachedir)
pipeline = make_pipeline(DummyTransf(), SVC(), memory=memory)
assert_true(pipeline.memory is memory)
pipeline = make_pipeline(DummyTransf(), SVC())
assert_true(pipeline.memory is None)
shutil.rmtree(cachedir)
|
|
from functools import partial
from itertools import chain
import pytest
import numpy as np
from sklearn.metrics.cluster import adjusted_mutual_info_score
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import rand_score
from sklearn.metrics.cluster import completeness_score
from sklearn.metrics.cluster import fowlkes_mallows_score
from sklearn.metrics.cluster import homogeneity_score
from sklearn.metrics.cluster import mutual_info_score
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics.cluster import v_measure_score
from sklearn.metrics.cluster import silhouette_score
from sklearn.metrics.cluster import calinski_harabasz_score
from sklearn.metrics.cluster import davies_bouldin_score
from sklearn.utils._testing import assert_allclose
# Dictionaries of metrics
# ------------------------
# The goal of having those dictionaries is to have an easy way to call a
# particular metric and associate a name to each function:
# - SUPERVISED_METRICS: all supervised cluster metrics - (when given a
# ground truth value)
# - UNSUPERVISED_METRICS: all unsupervised cluster metrics
#
# Those dictionaries will be used to test systematically some invariance
# properties, e.g. invariance toward several input layout.
#
SUPERVISED_METRICS = {
"adjusted_mutual_info_score": adjusted_mutual_info_score,
"adjusted_rand_score": adjusted_rand_score,
"rand_score": rand_score,
"completeness_score": completeness_score,
"homogeneity_score": homogeneity_score,
"mutual_info_score": mutual_info_score,
"normalized_mutual_info_score": normalized_mutual_info_score,
"v_measure_score": v_measure_score,
"fowlkes_mallows_score": fowlkes_mallows_score
}
UNSUPERVISED_METRICS = {
"silhouette_score": silhouette_score,
"silhouette_manhattan": partial(silhouette_score, metric='manhattan'),
"calinski_harabasz_score": calinski_harabasz_score,
"davies_bouldin_score": davies_bouldin_score
}
# Lists of metrics with common properties
# ---------------------------------------
# Lists of metrics with common properties are used to test systematically some
# functionalities and invariance, e.g. SYMMETRIC_METRICS lists all metrics
# that are symmetric with respect to their input argument y_true and y_pred.
#
# --------------------------------------------------------------------
# Symmetric with respect to their input arguments y_true and y_pred.
# Symmetric metrics only apply to supervised clusters.
SYMMETRIC_METRICS = [
"adjusted_rand_score", "rand_score", "v_measure_score",
"mutual_info_score", "adjusted_mutual_info_score",
"normalized_mutual_info_score", "fowlkes_mallows_score"
]
NON_SYMMETRIC_METRICS = ["homogeneity_score", "completeness_score"]
# Metrics whose upper bound is 1
NORMALIZED_METRICS = [
"adjusted_rand_score", "rand_score", "homogeneity_score",
"completeness_score", "v_measure_score", "adjusted_mutual_info_score",
"fowlkes_mallows_score", "normalized_mutual_info_score"
]
rng = np.random.RandomState(0)
y1 = rng.randint(3, size=30)
y2 = rng.randint(3, size=30)
def test_symmetric_non_symmetric_union():
assert (sorted(SYMMETRIC_METRICS + NON_SYMMETRIC_METRICS) ==
sorted(SUPERVISED_METRICS))
# 0.22 AMI and NMI changes
@pytest.mark.filterwarnings('ignore::FutureWarning')
@pytest.mark.parametrize(
'metric_name, y1, y2',
[(name, y1, y2) for name in SYMMETRIC_METRICS]
)
def test_symmetry(metric_name, y1, y2):
metric = SUPERVISED_METRICS[metric_name]
assert metric(y1, y2) == pytest.approx(metric(y2, y1))
@pytest.mark.parametrize(
'metric_name, y1, y2',
[(name, y1, y2) for name in NON_SYMMETRIC_METRICS]
)
def test_non_symmetry(metric_name, y1, y2):
metric = SUPERVISED_METRICS[metric_name]
assert metric(y1, y2) != pytest.approx(metric(y2, y1))
# 0.22 AMI and NMI changes
@pytest.mark.filterwarnings('ignore::FutureWarning')
@pytest.mark.parametrize("metric_name", NORMALIZED_METRICS)
def test_normalized_output(metric_name):
upper_bound_1 = [0, 0, 0, 1, 1, 1]
upper_bound_2 = [0, 0, 0, 1, 1, 1]
metric = SUPERVISED_METRICS[metric_name]
assert metric([0, 0, 0, 1, 1], [0, 0, 0, 1, 2]) > 0.0
assert metric([0, 0, 1, 1, 2], [0, 0, 1, 1, 1]) > 0.0
assert metric([0, 0, 0, 1, 2], [0, 1, 1, 1, 1]) < 1.0
assert metric([0, 0, 0, 1, 2], [0, 1, 1, 1, 1]) < 1.0
assert metric(upper_bound_1, upper_bound_2) == pytest.approx(1.0)
lower_bound_1 = [0, 0, 0, 0, 0, 0]
lower_bound_2 = [0, 1, 2, 3, 4, 5]
score = np.array([metric(lower_bound_1, lower_bound_2),
metric(lower_bound_2, lower_bound_1)])
assert not (score < 0).any()
# 0.22 AMI and NMI changes
@pytest.mark.filterwarnings('ignore::FutureWarning')
@pytest.mark.parametrize(
"metric_name", chain(SUPERVISED_METRICS, UNSUPERVISED_METRICS)
)
def test_permute_labels(metric_name):
# All clustering metrics do not change score due to permutations of labels
# that is when 0 and 1 exchanged.
y_label = np.array([0, 0, 0, 1, 1, 0, 1])
y_pred = np.array([1, 0, 1, 0, 1, 1, 0])
if metric_name in SUPERVISED_METRICS:
metric = SUPERVISED_METRICS[metric_name]
score_1 = metric(y_pred, y_label)
assert_allclose(score_1, metric(1 - y_pred, y_label))
assert_allclose(score_1, metric(1 - y_pred, 1 - y_label))
assert_allclose(score_1, metric(y_pred, 1 - y_label))
else:
metric = UNSUPERVISED_METRICS[metric_name]
X = np.random.randint(10, size=(7, 10))
score_1 = metric(X, y_pred)
assert_allclose(score_1, metric(X, 1 - y_pred))
# 0.22 AMI and NMI changes
@pytest.mark.filterwarnings('ignore::FutureWarning')
@pytest.mark.parametrize(
"metric_name", chain(SUPERVISED_METRICS, UNSUPERVISED_METRICS)
)
# For all clustering metrics Input parameters can be both
# in the form of arrays lists, positive, negative or string
def test_format_invariance(metric_name):
y_true = [0, 0, 0, 0, 1, 1, 1, 1]
y_pred = [0, 1, 2, 3, 4, 5, 6, 7]
def generate_formats(y):
y = np.array(y)
yield y, 'array of ints'
yield y.tolist(), 'list of ints'
yield [str(x) + "-a" for x in y.tolist()], 'list of strs'
yield (np.array([str(x) + "-a" for x in y.tolist()], dtype=object),
'array of strs')
yield y - 1, 'including negative ints'
yield y + 1, 'strictly positive ints'
if metric_name in SUPERVISED_METRICS:
metric = SUPERVISED_METRICS[metric_name]
score_1 = metric(y_true, y_pred)
y_true_gen = generate_formats(y_true)
y_pred_gen = generate_formats(y_pred)
for (y_true_fmt, fmt_name), (y_pred_fmt, _) in zip(y_true_gen,
y_pred_gen):
assert score_1 == metric(y_true_fmt, y_pred_fmt)
else:
metric = UNSUPERVISED_METRICS[metric_name]
X = np.random.randint(10, size=(8, 10))
score_1 = metric(X, y_true)
assert score_1 == metric(X.astype(float), y_true)
y_true_gen = generate_formats(y_true)
for (y_true_fmt, fmt_name) in y_true_gen:
assert score_1 == metric(X, y_true_fmt)
@pytest.mark.parametrize("metric", SUPERVISED_METRICS.values())
def test_single_sample(metric):
# only the supervised metrics support single sample
for i, j in [(0, 0), (0, 1), (1, 0), (1, 1)]:
metric([i], [j])
@pytest.mark.parametrize(
"metric_name, metric_func",
dict(SUPERVISED_METRICS, **UNSUPERVISED_METRICS).items()
)
def test_inf_nan_input(metric_name, metric_func):
if metric_name in SUPERVISED_METRICS:
invalids = [([0, 1], [np.inf, np.inf]),
([0, 1], [np.nan, np.nan]),
([0, 1], [np.nan, np.inf])]
else:
X = np.random.randint(10, size=(2, 10))
invalids = [(X, [np.inf, np.inf]),
(X, [np.nan, np.nan]),
(X, [np.nan, np.inf])]
with pytest.raises(ValueError, match='contains NaN, infinity'):
for args in invalids:
metric_func(*args)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# acertmgr - acme api v2 functions (implements RFC8555)
# Copyright (c) Rudolf Mayerhofer, 2019.
# available under the ISC license, see LICENSE
import copy
import json
import re
import time
from acertmgr import tools
from acertmgr.authority.acme import ACMEAuthority as AbstractACMEAuthority
from acertmgr.tools import log
# Maximum age for nonce values (Boulder invalidates them after some time, so we use a low value of 2 minutes here)
MAX_NONCE_AGE = 120
class ACMEAuthority(AbstractACMEAuthority):
# @brief Init class with config
# @param config Configuration data
# @param key Account key data
def __init__(self, config, key):
AbstractACMEAuthority.__init__(self, config, key)
# Initialize config vars
self.ca = config['authority']
self.tos_agreed = str(config.get('authority_tos_agreement')).lower() == 'true'
contact_email = config.get('authority_contact_email')
if contact_email is None:
self.contact = None
elif isinstance(contact_email, list):
self.contact = ["mailto:{}".format(contact) for contact in contact_email]
else:
self.contact = ["mailto:{}".format(contact_email)]
# Initialize runtime vars
code, self.directory, _ = self._request_url(self.ca + '/directory')
if code >= 400 or not self.directory:
self.directory = {
"meta": {},
"newAccount": "{}/acme/new-acct".format(self.ca),
"newNonce": "{}/acme/new-nonce".format(self.ca),
"newOrder": "{}/acme/new-order".format(self.ca),
"revokeCert": "{}/acme/revoke-cert".format(self.ca),
}
log("API directory retrieval failed ({}). Guessed necessary values: {}".format(code, self.directory),
warning=True)
self.nonce = None
self.nonce_time = 0
self.algorithm, jwk = tools.get_key_alg_and_jwk(key)
self.account_protected = {
"alg": self.algorithm,
"jwk": jwk
}
self.account_id = None # will be updated to correct value during account registration
# @brief fetch a given url
def _request_url(self, url, data=None, raw_result=False):
header = {'Content-Type': 'application/jose+json'}
if data:
# Always encode data to bytes
data = data.encode('utf-8')
try:
resp = tools.get_url(url, data, header)
except IOError as e:
body = getattr(e, "read", e.__str__)()
if getattr(body, 'decode', None):
# Decode function available? Use it to get a proper str
body = body.decode('utf-8')
return getattr(e, "code", 999), body, {}
# Store next Replay-Nonce if it is in the header
if 'Replay-Nonce' in resp.headers:
self.nonce = resp.headers['Replay-Nonce']
self.nonce_time = time.time()
body = resp.read()
if getattr(body, 'decode', None):
# Decode function available? Use it to get a proper str
body = body.decode('utf-8')
if not raw_result and len(body) > 0:
try:
body = json.loads(body)
except json.JSONDecodeError as e:
raise ValueError('Could not parse non-raw result (expected JSON)', e)
return resp.getcode(), body, resp.headers
# @brief fetch an url with a signed request
def _request_acme_url(self, url, payload=None, protected=None, raw_result=False):
if not protected:
protected = {}
if payload:
payload64 = tools.bytes_to_base64url(json.dumps(payload).encode('utf8'))
else:
payload64 = "" # for POST-as-GET
# Request a new nonce if there is none in cache
if not self.nonce or time.time() > self.nonce_time + MAX_NONCE_AGE:
self._request_url(self.directory['newNonce'])
# Set request nonce to current cache value
protected["nonce"] = self.nonce
# Reset nonce cache as we are using it's current value
self.nonce = None
protected["url"] = url
if self.algorithm:
protected["alg"] = self.algorithm
if self.account_id:
protected["kid"] = self.account_id
protected64 = tools.bytes_to_base64url(json.dumps(protected).encode('utf8'))
out = tools.signature_of_str(self.key, '.'.join([protected64, payload64]))
data = json.dumps({
"protected": protected64,
"payload": payload64,
"signature": tools.bytes_to_base64url(out),
})
return self._request_url(url, data, raw_result)
# @brief send a signed request to authority
def _request_acme_endpoint(self, request, payload=None, protected=None, raw_result=False):
return self._request_acme_url(self.directory[request], payload, protected, raw_result)
# @brief register an account over ACME
def register_account(self):
if self.account_id:
# We already have registered with this authority, just return
return
protected = copy.deepcopy(self.account_protected)
payload = {
"termsOfServiceAgreed": self.tos_agreed,
"onlyReturnExisting": False,
}
if self.contact:
payload["contact"] = self.contact
code, result, headers = self._request_acme_endpoint("newAccount", payload, protected)
if code < 400 and result['status'] == 'valid':
self.account_id = headers['Location']
if 'meta' in self.directory and 'termsOfService' in self.directory['meta']:
log("ToS at {} have been accepted.".format(self.directory['meta']['termsOfService']))
log("Account registered and valid on {}.".format(self.ca))
else:
raise ValueError("Error registering account: {0} {1}".format(code, result))
# @brief function to fetch certificate using ACME
# @param csr the certificate signing request in pyopenssl format
# @param domains list of domains in the certificate, first is CN
# @param challenge_handlers a dict containing challenge for all given domains
# @return the certificate and corresponding ca as a tuple
# @note algorithm and parts of the code are from acme-tiny
def get_crt_from_csr(self, csr, domains, challenge_handlers):
account_thumbprint = tools.bytes_to_base64url(
tools.hash_of_str(json.dumps(self.account_protected['jwk'], sort_keys=True, separators=(',', ':'))))
log("Ordering certificate for {}".format(domains))
identifiers = [{'type': 'dns', 'value': domain} for domain in domains]
code, order, headers = self._request_acme_endpoint('newOrder', {'identifiers': identifiers})
if code >= 400:
raise ValueError("Error with certificate order: {0} {1}".format(code, order))
order_url = headers['Location']
authorizations = list()
# verify each domain
try:
for authorizationUrl in order['authorizations']:
# get new challenge
code, authorization, _ = self._request_acme_url(authorizationUrl)
if code >= 400:
raise ValueError("Error requesting authorization: {0} {1}".format(code, authorization))
authorization['_domain'] = "*.{}".format(authorization['identifier']['value']) if \
'wildcard' in authorization and authorization['wildcard'] else authorization['identifier']['value']
if authorization.get('status', 'no-status-found') == 'valid':
log("{} has already been authorized".format(authorization['_domain']))
continue
if authorization['_domain'] not in challenge_handlers:
raise ValueError("No challenge handler given for domain: {0}".format(authorization['_domain']))
log("Authorizing {0}".format(authorization['_domain']))
# create the challenge
ctype = challenge_handlers[authorization['_domain']].get_challenge_type()
matching_challenges = [c for c in authorization['challenges'] if c['type'] == ctype]
if len(matching_challenges) == 0:
raise ValueError("Error no challenge matching {0} found: {1}".format(ctype, authorization))
authorization['_challenge'] = matching_challenges[0]
if authorization['_challenge'].get('status', 'no-status-found') == 'valid':
log("{} has already been authorized using {}".format(authorization['_domain'], ctype))
continue
authorization['_token'] = re.sub(r"[^A-Za-z0-9_\-]", "_", authorization['_challenge']['token'])
challenge_handlers[authorization['_domain']].create_challenge(authorization['identifier']['value'],
account_thumbprint,
authorization['_token'])
authorizations.append(authorization)
# after all challenges are created, start processing authorizations
for authorization in authorizations:
try:
log("Starting verification of {}".format(authorization['_domain']))
challenge_handlers[authorization['_domain']].start_challenge(authorization['identifier']['value'],
account_thumbprint,
authorization['_token'])
# notify challenge is met
code, challenge_status, _ = self._request_acme_url(authorization['_challenge']['url'], {
"keyAuthorization": "{0}.{1}".format(authorization['_token'], account_thumbprint),
})
# wait for challenge to be verified
while code < 400 and challenge_status.get('status') == "pending":
time.sleep(5)
code, challenge_status, _ = self._request_acme_url(authorization['_challenge']['url'])
if code < 400 and challenge_status.get('status') == "valid":
log("{0} verified".format(authorization['_domain']))
else:
raise ValueError("{0} challenge did not pass ({1}): {2}".format(
authorization['_domain'], code, challenge_status))
finally:
challenge_handlers[authorization['_domain']].stop_challenge(authorization['identifier']['value'],
account_thumbprint,
authorization['_token'])
finally:
# Destroy challenge handlers in reverse order to replay
# any saved state information in the handlers correctly
for authorization in reversed(authorizations):
try:
challenge_handlers[authorization['_domain']].destroy_challenge(
authorization['identifier']['value'], account_thumbprint, authorization['_token'])
except Exception as e:
log('Challenge destruction failed: {}'.format(e), error=True)
# check order status and retry once
code, order, _ = self._request_acme_url(order_url)
if code < 400 and order.get('status') == 'pending':
time.sleep(5)
code, order, _ = self._request_acme_url(order_url)
if code >= 400:
raise ValueError("Order is still not ready to be finalized: {0} {1}".format(code, order))
# get the new certificate
log("Finalizing certificate")
code, finalize, _ = self._request_acme_url(order['finalize'], {
"csr": tools.bytes_to_base64url(tools.convert_cert_to_der_bytes(csr)),
})
while code < 400 and (finalize.get('status') == 'pending' or finalize.get('status') == 'processing'):
time.sleep(5)
code, finalize, _ = self._request_acme_url(order_url)
if code >= 400:
raise ValueError("Error finalizing certificate: {0} {1}".format(code, finalize))
log("Certificate ready!")
# return certificate
code, certificate, _ = self._request_acme_url(finalize['certificate'], raw_result=True)
if code >= 400:
raise ValueError("Error downloading certificate chain: {0} {1}".format(code, certificate))
cert_dict = re.match((r'(?P<cert>^-----BEGIN CERTIFICATE-----\n[^\-]+\n-----END CERTIFICATE-----)\n*'
r'(?P<ca>-----BEGIN CERTIFICATE-----\n.+\n-----END CERTIFICATE-----)?$'),
certificate, re.DOTALL).groupdict()
cert = tools.convert_pem_str_to_cert(cert_dict['cert'])
if cert_dict['ca'] is None:
ca = tools.download_issuer_ca(cert)
else:
ca = tools.convert_pem_str_to_cert(cert_dict['ca'])
return cert, ca
# @brief function to revoke a certificate using ACME
# @param crt certificate to revoke
# @param reason (int) optional certificate revoke reason (see https://tools.ietf.org/html/rfc5280#section-5.3.1)
def revoke_crt(self, crt, reason=None):
payload = {'certificate': tools.bytes_to_base64url(tools.convert_cert_to_der_bytes(crt))}
if reason:
payload['reason'] = int(reason)
code, result, _ = self._request_acme_endpoint("revokeCert", payload)
if code < 400:
log("Revocation successful")
else:
raise ValueError("Revocation failed: {}".format(result))
|
|
# -*- test-case-name: twisted.test.test_pcp -*-
# Copyright (c) 2001-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Producer-Consumer Proxy.
"""
from zope.interface import implements
from twisted.internet import interfaces
class BasicProducerConsumerProxy:
"""
I can act as a man in the middle between any Producer and Consumer.
@ivar producer: the Producer I subscribe to.
@type producer: L{IProducer<interfaces.IProducer>}
@ivar consumer: the Consumer I publish to.
@type consumer: L{IConsumer<interfaces.IConsumer>}
@ivar paused: As a Producer, am I paused?
@type paused: bool
"""
implements(interfaces.IProducer, interfaces.IConsumer)
consumer = None
producer = None
producerIsStreaming = None
iAmStreaming = True
outstandingPull = False
paused = False
stopped = False
def __init__(self, consumer):
self._buffer = []
if consumer is not None:
self.consumer = consumer
consumer.registerProducer(self, self.iAmStreaming)
# Producer methods:
def pauseProducing(self):
self.paused = True
if self.producer:
self.producer.pauseProducing()
def resumeProducing(self):
self.paused = False
if self._buffer:
# TODO: Check to see if consumer supports writeSeq.
self.consumer.write(''.join(self._buffer))
self._buffer[:] = []
else:
if not self.iAmStreaming:
self.outstandingPull = True
if self.producer is not None:
self.producer.resumeProducing()
def stopProducing(self):
if self.producer is not None:
self.producer.stopProducing()
if self.consumer is not None:
del self.consumer
# Consumer methods:
def write(self, data):
if self.paused or (not self.iAmStreaming and not self.outstandingPull):
# We could use that fifo queue here.
self._buffer.append(data)
elif self.consumer is not None:
self.consumer.write(data)
self.outstandingPull = False
def finish(self):
if self.consumer is not None:
self.consumer.finish()
self.unregisterProducer()
def registerProducer(self, producer, streaming):
self.producer = producer
self.producerIsStreaming = streaming
def unregisterProducer(self):
if self.producer is not None:
del self.producer
del self.producerIsStreaming
if self.consumer:
self.consumer.unregisterProducer()
def __repr__(self):
return '<%s@%x around %s>' % (self.__class__, id(self), self.consumer)
class ProducerConsumerProxy(BasicProducerConsumerProxy):
"""ProducerConsumerProxy with a finite buffer.
When my buffer fills up, I have my parent Producer pause until my buffer
has room in it again.
"""
# Copies much from abstract.FileDescriptor
bufferSize = 2**2**2**2
producerPaused = False
unregistered = False
def pauseProducing(self):
# Does *not* call up to ProducerConsumerProxy to relay the pause
# message through to my parent Producer.
self.paused = True
def resumeProducing(self):
self.paused = False
if self._buffer:
data = ''.join(self._buffer)
bytesSent = self._writeSomeData(data)
if bytesSent < len(data):
unsent = data[bytesSent:]
assert not self.iAmStreaming, (
"Streaming producer did not write all its data.")
self._buffer[:] = [unsent]
else:
self._buffer[:] = []
else:
bytesSent = 0
if (self.unregistered and bytesSent and not self._buffer and
self.consumer is not None):
self.consumer.unregisterProducer()
if not self.iAmStreaming:
self.outstandingPull = not bytesSent
if self.producer is not None:
bytesBuffered = sum([len(s) for s in self._buffer])
# TODO: You can see here the potential for high and low
# watermarks, where bufferSize would be the high mark when we
# ask the upstream producer to pause, and we wouldn't have
# it resume again until it hit the low mark. Or if producer
# is Pull, maybe we'd like to pull from it as much as necessary
# to keep our buffer full to the low mark, so we're never caught
# without something to send.
if self.producerPaused and (bytesBuffered < self.bufferSize):
# Now that our buffer is empty,
self.producerPaused = False
self.producer.resumeProducing()
elif self.outstandingPull:
# I did not have any data to write in response to a pull,
# so I'd better pull some myself.
self.producer.resumeProducing()
def write(self, data):
if self.paused or (not self.iAmStreaming and not self.outstandingPull):
# We could use that fifo queue here.
self._buffer.append(data)
elif self.consumer is not None:
assert not self._buffer, (
"Writing fresh data to consumer before my buffer is empty!")
# I'm going to use _writeSomeData here so that there is only one
# path to self.consumer.write. But it doesn't actually make sense,
# if I am streaming, for some data to not be all data. But maybe I
# am not streaming, but I am writing here anyway, because there was
# an earlier request for data which was not answered.
bytesSent = self._writeSomeData(data)
self.outstandingPull = False
if not bytesSent == len(data):
assert not self.iAmStreaming, (
"Streaming producer did not write all its data.")
self._buffer.append(data[bytesSent:])
if (self.producer is not None) and self.producerIsStreaming:
bytesBuffered = sum([len(s) for s in self._buffer])
if bytesBuffered >= self.bufferSize:
self.producer.pauseProducing()
self.producerPaused = True
def registerProducer(self, producer, streaming):
self.unregistered = False
BasicProducerConsumerProxy.registerProducer(self, producer, streaming)
if not streaming:
producer.resumeProducing()
def unregisterProducer(self):
if self.producer is not None:
del self.producer
del self.producerIsStreaming
self.unregistered = True
if self.consumer and not self._buffer:
self.consumer.unregisterProducer()
def _writeSomeData(self, data):
"""Write as much of this data as possible.
@returns: The number of bytes written.
"""
if self.consumer is None:
return 0
self.consumer.write(data)
return len(data)
|
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import os
import sys
import numpy as np
import tensorflow as tf
import preprocessing
# Allow import of top level python files
import inspect
currentdir = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe()))
)
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from benchmark_args import BaseCommandLineAPI
from benchmark_runner import BaseBenchmarkRunner
class CommandLineAPI(BaseCommandLineAPI):
def __init__(self):
super(CommandLineAPI, self).__init__()
self._parser.add_argument(
'--input_size',
type=int,
default=224,
help='Size of input images expected by the '
'model'
)
self._parser.add_argument(
'--num_classes',
type=int,
default=1001,
help='Number of classes used when training '
'the model'
)
self._parser.add_argument(
'--preprocess_method',
type=str,
choices=['vgg', 'inception', 'resnet50_v1_5_tf1_ngc_preprocess'],
default='vgg',
help='The image preprocessing method used in '
'dataloading.'
)
def _post_process_args(self, args):
args = super(CommandLineAPI, self)._post_process_args(args)
args.labels_shift = 1 if args.num_classes == 1001 else 0
return args
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% #
# %%%%%%%%%%%%%%%%% IMPLEMENT MODEL-SPECIFIC FUNCTIONS HERE %%%%%%%%%%%%%%%%%% #
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% #
class BenchmarkRunner(BaseBenchmarkRunner):
def get_dataset_batches(self):
"""Returns a list of batches of input samples.
Each batch should be in the form [x, y], where
x is a numpy array of the input samples for the batch, and
y is a numpy array of the expected model outputs for the batch
Returns:
- dataset: a TF Dataset object
- bypass_data_to_eval: any object type that will be passed unmodified to
`evaluate_result()`. If not necessary: `None`
Note: script arguments can be accessed using `self._args.attr`
"""
def get_files(data_dir, filename_pattern):
if data_dir is None:
return []
files = tf.io.gfile.glob(os.path.join(data_dir, filename_pattern))
if not files:
raise ValueError(
'Can not find any files in {} with '
'pattern "{}"'.format(data_dir, filename_pattern)
)
return files
def deserialize_image_record(record):
feature_map = {
'image/encoded': tf.io.FixedLenFeature([], tf.string, ''),
'image/class/label': tf.io.FixedLenFeature([1], tf.int64, -1)
}
with tf.compat.v1.name_scope('deserialize_image_record'):
obj = tf.io.parse_single_example(
serialized=record, features=feature_map
)
imgdata = obj['image/encoded']
label = tf.cast(obj['image/class/label'], tf.int32)
return imgdata, label
def get_preprocess_fn(preprocess_method, input_size):
"""Creates a function to parse and process a TFRecord
input_size: int
returns: function, the preprocessing function for a record
"""
if preprocess_method == 'vgg':
preprocess_fn = preprocessing.vgg_preprocess
elif preprocess_method == 'inception':
preprocess_fn = preprocessing.inception_preprocess
elif preprocess_method == 'resnet50_v1_5_tf1_ngc_preprocess':
preprocess_fn = preprocessing.resnet50_v1_5_tf1_ngc_preprocess
else:
raise ValueError(
'Invalid preprocessing method {}'.format(preprocess_method)
)
def preprocess_sample_fn(record):
# Parse TFRecord
imgdata, label = deserialize_image_record(record)
label -= 1 # Change to 0-based (don't use background class)
try:
image = tf.image.decode_jpeg(
imgdata,
channels=3,
fancy_upscaling=False,
dct_method='INTEGER_FAST'
)
except:
image = tf.image.decode_png(imgdata, channels=3)
# Use model's preprocessing function
image = preprocess_fn(image, input_size, input_size)
return image, label
return preprocess_sample_fn
data_files = get_files(self._args.data_dir, 'validation*')
dataset = tf.data.Dataset.from_tensor_slices(data_files)
dataset = dataset.interleave(
tf.data.TFRecordDataset,
cycle_length=tf.data.experimental.AUTOTUNE,
block_length=max(self._args.batch_size, 32)
)
# preprocess function for input data
preprocess_fn = get_preprocess_fn(
preprocess_method=self._args.preprocess_method,
input_size=self._args.input_size
)
dataset = dataset.map(
map_func=preprocess_fn,
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
dataset = dataset.batch(self._args.batch_size, drop_remainder=False)
dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return dataset, None
def preprocess_model_inputs(self, data_batch):
"""This function prepare the `data_batch` generated from the dataset.
Returns:
x: input of the model
y: data to be used for model evaluation
Note: script arguments can be accessed using `self._args.attr`
"""
x, y = data_batch
return x, y
def postprocess_model_outputs(self, predictions, expected):
"""Post process if needed the predictions and expected tensors. At the
minimum, this function transforms all TF Tensors into a numpy arrays.
Most models will not need to modify this function.
Note: script arguments can be accessed using `self._args.attr`
"""
predictions = predictions.numpy()
if len(predictions.shape) != 1:
predictions = tf.math.argmax(predictions, axis=1)
predictions = predictions.numpy().reshape(-1)
predictions - self._args.labels_shift
return predictions - self._args.labels_shift, expected.numpy()
def evaluate_model(self, predictions, expected, bypass_data_to_eval):
"""Evaluate result predictions for entire dataset.
This computes overall accuracy, mAP, etc. Returns the
metric value and a metric_units string naming the metric.
Note: script arguments can be accessed using `args.attr`
"""
return (
np.mean(predictions["data"] == expected["data"]) * 100.0,
"Top-1 Accuracy %"
)
if __name__ == '__main__':
cmdline_api = CommandLineAPI()
args = cmdline_api.parse_args()
runner = BenchmarkRunner(args)
runner.execute_benchmark()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Task sub-commands"""
import importlib
import json
import logging
import os
import textwrap
from contextlib import contextmanager, redirect_stderr, redirect_stdout
from typing import List
from pendulum.parsing.exceptions import ParserError
from airflow import settings
from airflow.cli.simple_table import AirflowConsole
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.executors.executor_loader import ExecutorLoader
from airflow.jobs.local_task_job import LocalTaskJob
from airflow.models import DagPickle, TaskInstance
from airflow.models.dag import DAG
from airflow.models.dagrun import DagRun
from airflow.ti_deps.dep_context import DepContext
from airflow.ti_deps.dependencies_deps import SCHEDULER_QUEUED_DEPS
from airflow.utils import cli as cli_utils
from airflow.utils.cli import (
get_dag,
get_dag_by_file_location,
get_dag_by_pickle,
get_dags,
suppress_logs_and_warning,
)
from airflow.utils.dates import timezone
from airflow.utils.log.logging_mixin import StreamLogWriter
from airflow.utils.net import get_hostname
from airflow.utils.session import create_session, provide_session
def _get_ti(task, exec_date_or_run_id):
"""Get the task instance through DagRun.run_id, if that fails, get the TI the old way"""
dag_run = task.dag.get_dagrun(run_id=exec_date_or_run_id)
if not dag_run:
try:
execution_date = timezone.parse(exec_date_or_run_id)
ti = TaskInstance(task, execution_date)
ti.refresh_from_db()
return ti
except (ParserError, TypeError):
raise AirflowException(f"DagRun with run_id: {exec_date_or_run_id} not found")
ti = dag_run.get_task_instance(task.task_id)
ti.task = task
return ti
def _run_task_by_selected_method(args, dag: DAG, ti: TaskInstance) -> None:
"""
Runs the task in one of 3 modes
- using LocalTaskJob
- as raw task
- by executor
"""
if args.local and args.raw:
raise AirflowException(
"Option --raw and --local are mutually exclusive. "
"Please remove one option to execute the command."
)
if args.local:
_run_task_by_local_task_job(args, ti)
elif args.raw:
_run_raw_task(args, ti)
else:
_run_task_by_executor(args, dag, ti)
def _run_task_by_executor(args, dag, ti):
"""
Sends the task to the executor for execution. This can result in the task being started by another host
if the executor implementation does
"""
pickle_id = None
if args.ship_dag:
try:
# Running remotely, so pickling the DAG
with create_session() as session:
pickle = DagPickle(dag)
session.add(pickle)
pickle_id = pickle.id
# TODO: This should be written to a log
print(f'Pickled dag {dag} as pickle_id: {pickle_id}')
except Exception as e:
print('Could not pickle the DAG')
print(e)
raise e
executor = ExecutorLoader.get_default_executor()
executor.job_id = "manual"
executor.start()
print("Sending to executor.")
executor.queue_task_instance(
ti,
mark_success=args.mark_success,
pickle_id=pickle_id,
ignore_all_deps=args.ignore_all_dependencies,
ignore_depends_on_past=args.ignore_depends_on_past,
ignore_task_deps=args.ignore_dependencies,
ignore_ti_state=args.force,
pool=args.pool,
)
executor.heartbeat()
executor.end()
def _run_task_by_local_task_job(args, ti):
"""Run LocalTaskJob, which monitors the raw task execution process"""
run_job = LocalTaskJob(
task_instance=ti,
mark_success=args.mark_success,
pickle_id=args.pickle,
ignore_all_deps=args.ignore_all_dependencies,
ignore_depends_on_past=args.ignore_depends_on_past,
ignore_task_deps=args.ignore_dependencies,
ignore_ti_state=args.force,
pool=args.pool,
)
try:
run_job.run()
finally:
if args.shut_down_logging:
logging.shutdown()
RAW_TASK_UNSUPPORTED_OPTION = [
"ignore_all_dependencies",
"ignore_depends_on_past",
"ignore_dependencies",
"force",
]
def _run_raw_task(args, ti: TaskInstance) -> None:
"""Runs the main task handling code"""
unsupported_options = [o for o in RAW_TASK_UNSUPPORTED_OPTION if getattr(args, o)]
if unsupported_options:
raise AirflowException(
"Option --raw does not work with some of the other options on this command. You "
"can't use --raw option and the following options: {}. You provided the option {}. "
"Delete it to execute the command".format(
", ".join(f"--{o}" for o in RAW_TASK_UNSUPPORTED_OPTION),
", ".join(f"--{o}" for o in unsupported_options),
)
)
ti._run_raw_task(
mark_success=args.mark_success,
job_id=args.job_id,
pool=args.pool,
error_file=args.error_file,
)
@contextmanager
def _capture_task_logs(ti):
"""Manage logging context for a task run
- Replace the root logger configuration with the airflow.task configuration
so we can capture logs from any custom loggers used in the task.
- Redirect stdout and stderr to the task instance log, as INFO and WARNING
level messages, respectively.
"""
modify = not settings.DONOT_MODIFY_HANDLERS
if modify:
root_logger, task_logger = logging.getLogger(), logging.getLogger('airflow.task')
orig_level = root_logger.level
root_logger.setLevel(task_logger.level)
orig_handlers = root_logger.handlers.copy()
root_logger.handlers[:] = task_logger.handlers
try:
info_writer = StreamLogWriter(ti.log, logging.INFO)
warning_writer = StreamLogWriter(ti.log, logging.WARNING)
with redirect_stdout(info_writer), redirect_stderr(warning_writer):
yield
finally:
if modify:
# Restore the root logger to its original state.
root_logger.setLevel(orig_level)
root_logger.handlers[:] = orig_handlers
@cli_utils.action_logging
def task_run(args, dag=None):
"""Runs a single task instance"""
# Load custom airflow config
if args.cfg_path:
with open(args.cfg_path) as conf_file:
conf_dict = json.load(conf_file)
if os.path.exists(args.cfg_path):
os.remove(args.cfg_path)
conf.read_dict(conf_dict, source=args.cfg_path)
settings.configure_vars()
settings.MASK_SECRETS_IN_LOGS = True
# IMPORTANT, have to use the NullPool, otherwise, each "run" command may leave
# behind multiple open sleeping connections while heartbeating, which could
# easily exceed the database connection limit when
# processing hundreds of simultaneous tasks.
settings.configure_orm(disable_connection_pool=True)
if dag and args.pickle:
raise AirflowException("You cannot use the --pickle option when using DAG.cli() method.")
elif args.pickle:
print(f'Loading pickle id: {args.pickle}')
dag = get_dag_by_pickle(args.pickle)
elif not dag:
dag = get_dag(args.subdir, args.dag_id)
else:
# Use DAG from parameter
pass
task = dag.get_task(task_id=args.task_id)
ti = _get_ti(task, args.execution_date_or_run_id)
ti.init_run_context(raw=args.raw)
hostname = get_hostname()
print(f"Running {ti} on host {hostname}")
if args.interactive:
_run_task_by_selected_method(args, dag, ti)
else:
with _capture_task_logs(ti):
_run_task_by_selected_method(args, dag, ti)
@cli_utils.action_logging
def task_failed_deps(args):
"""
Returns the unmet dependencies for a task instance from the perspective of the
scheduler (i.e. why a task instance doesn't get scheduled and then queued by the
scheduler, and then run by an executor).
>>> airflow tasks failed-deps tutorial sleep 2015-01-01
Task instance dependencies not met:
Dagrun Running: Task instance's dagrun did not exist: Unknown reason
Trigger Rule: Task's trigger rule 'all_success' requires all upstream tasks
to have succeeded, but found 1 non-success(es).
"""
dag = get_dag(args.subdir, args.dag_id)
task = dag.get_task(task_id=args.task_id)
ti = _get_ti(task, args.execution_date_or_run_id)
dep_context = DepContext(deps=SCHEDULER_QUEUED_DEPS)
failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))
# TODO, Do we want to print or log this
if failed_deps:
print("Task instance dependencies not met:")
for dep in failed_deps:
print(f"{dep.dep_name}: {dep.reason}")
else:
print("Task instance dependencies are all met.")
@cli_utils.action_logging
@suppress_logs_and_warning
def task_state(args):
"""
Returns the state of a TaskInstance at the command line.
>>> airflow tasks state tutorial sleep 2015-01-01
success
"""
dag = get_dag(args.subdir, args.dag_id)
task = dag.get_task(task_id=args.task_id)
ti = _get_ti(task, args.execution_date_or_run_id)
print(ti.current_state())
@cli_utils.action_logging
@suppress_logs_and_warning
def task_list(args, dag=None):
"""Lists the tasks within a DAG at the command line"""
dag = dag or get_dag(args.subdir, args.dag_id)
if args.tree:
dag.tree_view()
else:
tasks = sorted(t.task_id for t in dag.tasks)
print("\n".join(tasks))
SUPPORTED_DEBUGGER_MODULES: List[str] = [
"pudb",
"web_pdb",
"ipdb",
"pdb",
]
def _guess_debugger():
"""
Trying to guess the debugger used by the user. When it doesn't find any user-installed debugger,
returns ``pdb``.
List of supported debuggers:
* `pudb <https://github.com/inducer/pudb>`__
* `web_pdb <https://github.com/romanvm/python-web-pdb>`__
* `ipdb <https://github.com/gotcha/ipdb>`__
* `pdb <https://docs.python.org/3/library/pdb.html>`__
"""
for mod in SUPPORTED_DEBUGGER_MODULES:
try:
return importlib.import_module(mod)
except ImportError:
continue
return importlib.import_module("pdb")
@cli_utils.action_logging
@suppress_logs_and_warning
@provide_session
def task_states_for_dag_run(args, session=None):
"""Get the status of all task instances in a DagRun"""
dag_run = (
session.query(DagRun)
.filter(DagRun.run_id == args.execution_date_or_run_id, DagRun.dag_id == args.dag_id)
.one_or_none()
)
if not dag_run:
try:
execution_date = timezone.parse(args.execution_date_or_run_id)
dag_run = (
session.query(DagRun)
.filter(DagRun.execution_date == execution_date, DagRun.dag_id == args.dag_id)
.one_or_none()
)
except (ParserError, TypeError) as err:
raise AirflowException(f"Error parsing the supplied execution_date. Error: {str(err)}")
if dag_run is None:
raise AirflowException("DagRun does not exist.")
tis = dag_run.get_task_instances()
AirflowConsole().print_as(
data=tis,
output=args.output,
mapper=lambda ti: {
"dag_id": ti.dag_id,
"execution_date": ti.execution_date.isoformat(),
"task_id": ti.task_id,
"state": ti.state,
"start_date": ti.start_date.isoformat() if ti.start_date else "",
"end_date": ti.end_date.isoformat() if ti.end_date else "",
},
)
@cli_utils.action_logging
def task_test(args, dag=None):
"""Tests task for a given dag_id"""
# We want to log output from operators etc to show up here. Normally
# airflow.task would redirect to a file, but here we want it to propagate
# up to the normal airflow handler.
settings.MASK_SECRETS_IN_LOGS = True
handlers = logging.getLogger('airflow.task').handlers
already_has_stream_handler = False
for handler in handlers:
already_has_stream_handler = isinstance(handler, logging.StreamHandler)
if already_has_stream_handler:
break
if not already_has_stream_handler:
logging.getLogger('airflow.task').propagate = True
env_vars = {'AIRFLOW_TEST_MODE': 'True'}
if args.env_vars:
env_vars.update(args.env_vars)
os.environ.update(env_vars)
dag = dag or get_dag(args.subdir, args.dag_id)
task = dag.get_task(task_id=args.task_id)
# Add CLI provided task_params to task.params
if args.task_params:
passed_in_params = json.loads(args.task_params)
task.params.update(passed_in_params)
ti = _get_ti(task, args.execution_date_or_run_id)
try:
if args.dry_run:
ti.dry_run()
else:
ti.run(ignore_task_deps=True, ignore_ti_state=True, test_mode=True)
except Exception:
if args.post_mortem:
debugger = _guess_debugger()
debugger.post_mortem()
else:
raise
finally:
if not already_has_stream_handler:
# Make sure to reset back to normal. When run for CLI this doesn't
# matter, but it does for test suite
logging.getLogger('airflow.task').propagate = False
@cli_utils.action_logging
@suppress_logs_and_warning
def task_render(args):
"""Renders and displays templated fields for a given task"""
dag = get_dag(args.subdir, args.dag_id)
task = dag.get_task(task_id=args.task_id)
ti = _get_ti(task, args.execution_date_or_run_id)
ti.render_templates()
for attr in task.__class__.template_fields:
print(
textwrap.dedent(
f""" # ----------------------------------------------------------
# property: {attr}
# ----------------------------------------------------------
{getattr(task, attr)}
"""
)
)
@cli_utils.action_logging
def task_clear(args):
"""Clears all task instances or only those matched by regex for a DAG(s)"""
logging.basicConfig(level=settings.LOGGING_LEVEL, format=settings.SIMPLE_LOG_FORMAT)
if args.dag_id and not args.subdir and not args.dag_regex and not args.task_regex:
dags = [get_dag_by_file_location(args.dag_id)]
else:
# todo clear command only accepts a single dag_id. no reason for get_dags with 's' except regex?
dags = get_dags(args.subdir, args.dag_id, use_regex=args.dag_regex)
if args.task_regex:
for idx, dag in enumerate(dags):
dags[idx] = dag.partial_subset(
task_ids_or_regex=args.task_regex,
include_downstream=args.downstream,
include_upstream=args.upstream,
)
DAG.clear_dags(
dags,
start_date=args.start_date,
end_date=args.end_date,
only_failed=args.only_failed,
only_running=args.only_running,
confirm_prompt=not args.yes,
include_subdags=not args.exclude_subdags,
include_parentdag=not args.exclude_parentdag,
)
|
|
import re
from PySide import QtGui
from datetime import datetime
def lataaTied(tiednimi):
f = open(tiednimi, 'r')
html_doc=f.read()
f.close()
return html_doc
def tallennaTied(tiednimi,sisalto):
print "tallennus"
f = open(tiednimi, 'a')
f.write(sisalto.encode("utf-8"))
f.close()
class ImageInfo(object):
def __init__(self,filename,infosource = 'harddrive'):
self.filename = filename #os.path.dirname(fullpath)
self.fullpath = None
self.folderpath = None # os.path.dirname(fullpath)
self.size = None
self.lastmodified= None
self.pictaken= None
self.tags= None
self.title= None
self.pubtags= None
self.pixsize= None
self.volserial = None
self.taggedtime = None
self.unsharp = False
self.pendingdelete = False
self.infosource = infosource
if self.infosource == 'harddrive':
self.scanMetadata()
def scanMetadata(self):
try:
self.size=os.path.getsize(self.fullpath )
except:
pass
try:
self.lastmodified=os.path.getmtime(self.fullpath )
except:
pass
try:
self.pictaken = get_date_taken(path)
except:
pass
self_ohjelmapath=r"E:\python\imagetagger"
self_kuvatHash = {}
self_luetteloTiedosto = "tagiluettelo.txt"
self_luontikaneetti = "uusi tagi lista luotu: "
self_sarakkeidenselitys = "tiedostonimi | tagit | kuvatiedoston last modified kuvapixelikoko kuvatiedosto koko | sijaintikansio | tagirivin luontihetki"
self_state = None
self_debugLog = ""
kuvalistaus = lataaTied(self_ohjelmapath+"/"+self_luetteloTiedosto)
kuvalistaus[:200]
kuvalistausArr = kuvalistaus.split("\n")
print "kuvalistausArr[0] ",kuvalistausArr[0]
if self_luontikaneetti[0:9] in kuvalistausArr[0]:
kuvalistausArr.pop(0)
print "kuvalistausArr[0] ",kuvalistausArr[0]
selitysrivinalku = "tiedostonimi | tagit | kuvatiedoston last modified kuvapixelikoko"
if selitysrivinalku in kuvalistausArr[0]:
kuvalistausArr.pop(0)
print "kuvalistausArr[0] ",kuvalistausArr[0]
print "len(kuvalistausArr) ",len(kuvalistausArr)
i = 2000
kuvainfoObjs=[]
self_kuvatHash={}
dupsii=[]
for i in range(len(kuvalistausArr)):
tagriviarray=kuvalistausArr[i].split("|")
#puts tagriviarray.size
tagriviarray
infohash={}
if len(tagriviarray) >= 3 :#and len( tagriviarray) <= 5:
infohash['filename']=tagriviarray[0].strip()
infohash['tags']=tagriviarray[1].strip()
if "UNSHARP" in infohash['tags']:
infohash['unsharp']= True
if "DELETE" in infohash['tags']:
infohash['pendingdelete']= True
etadataar= tagriviarray[2].strip().split()
etadataar
infohash['metadatalistlen']= len(etadataar)
infohash['metadata']= etadataar
if infohash['metadatalistlen'] > 4:
infohash['pictaken']= " ".join( etadataar[:2])
infohash['lastmodified']= " ".join( etadataar[2:4])
infohash['pixsize']= etadataar[4]
infohash['size']= etadataar[5]
elif infohash['metadatalistlen'] == 4:
infohash['lastmodified']= " ".join( etadataar[:2])
infohash['pixsize']= etadataar[2]
infohash['size']= etadataar[3]
else:
infohash['lastmodified']= etadataar[0]
infohash['pixsize']= etadataar[1]
infohash['size']= etadataar[2]
if len(tagriviarray) > 3:
infohash['folderpath']=tagriviarray[3].strip()
infohash['fullpath'] = infohash['folderpath'] + "\\" +infohash['filename']
if len(tagriviarray) > 4:
## tagriviarray[4]
tagriviarray
infohash['taggedtime']=re.findall(r'tagg?e?d:\s+(.*?)$',tagriviarray[4])[0].strip()
if len(tagriviarray) > 5:
infohash['volserial']=re.findall(r':\s+([A-Z0-9]+\-[A-Z0-9]+)\s*$',tagriviarray[5])[0].strip()
if len(tagriviarray) > 6:
infohash['title']=re.findall(r'title:\s+(.*?)$',tagriviarray[6])[0].strip()
if len(tagriviarray) > 7:
infohash['pubtags']=re.findall(r'pubtags:\s+(.*?)$',tagriviarray[7])[0].strip()
infohash
kuvakey=unicode( infohash['filename'].decode("utf-8"))
kuvainfo = ImageInfo(kuvakey, 'tagilista')
kuvainfo.filename = infohash['filename']
try:
kuvainfo.fullpath = infohash['fullpath']
kuvainfo.folderpath = infohash['folderpath']
except:
pass
try:
kuvainfo.size = infohash['size']
except:
pass
try:
kuvainfo.lastmodified = infohash['lastmodified']
except:
pass
try:
kuvainfo.pictaken = infohash['pictaken']
except:
pass
try:
kuvainfo.tags = infohash['tags']
except:
pass
try:
kuvainfo.title = infohash['title']
except:
pass
try:
kuvainfo.pubtags = infohash['pubtags']
except:
pass
try:
kuvainfo.pixsize = infohash['pixsize']
except:
pass
try:
kuvainfo.volserial = infohash['volserial']
except:
pass
try:
kuvainfo.taggedtime = infohash['taggedtime']
except:
pass
try:
kuvainfo.unsharp = infohash['unsharp']
except:
pass
try:
kuvainfo.pendingdelete = infohash['pendingdelete']
except:
pass
kuvainfoObjs.append(kuvainfo)
if kuvakey not in self_kuvatHash.keys():
self_kuvatHash[kuvakey]=infohash
self_debugLog += tagriviarray[0].strip() +": " + tagriviarray[1].strip() + "\n"
elif 'folderpath' in self_kuvatHash[kuvakey].keys() and 'folderpath' in infohash.keys() and self_kuvatHash[kuvakey]['folderpath'] == infohash['folderpath']:
self_kuvatHash[kuvakey]=infohash
elif 'metadata' in self_kuvatHash[kuvakey].keys() and 'metadata' in infohash.keys() and self_kuvatHash[kuvakey]['metadata'] == infohash['metadata']:
self_kuvatHash[kuvakey]=infohash
else:
dupsii.append((self_kuvatHash[kuvakey],infohash))
len(dupsii)
dupsii
dupsii[0][0]
dupsii[0]
dupsii[1]
dupsii[2]
dupsii[0][0]['filename']
dupsii[0][1]['filename']
dupsii[0][0]['metadata']
dupsii[0][1]['metadata']
len( self_kuvatHash.keys())
len(kuvainfoObjs)
kuvainfoObjs[-1].filename
kuvainfoObjs[-1].fullpath
kuvainfoObjs[-1].folderpath
kuvainfoObjs[-1].size
kuvainfoObjs[-1].lastmodified
kuvainfoObjs[-1].pictaken
kuvainfoObjs[-1].tags
kuvainfoObjs[-1].title
kuvainfoObjs[-1].pubtags
kuvainfoObjs[-1].pixsize
kuvainfoObjs[-1].volserial
kuvainfoObjs[-1].taggedtime
kuvainfoObjs[-1].unsharp
kuvainfoObjs[-1].pendingdelete
kuvainfoObjs[-1].infosource
kuvainfoObjs[-1].lastmodified
kuvainfoObjs[-1].pictaken
kuvainfoObjs[-1].taggedtime
datetime.strftime(kuvainfoObjs[-1].taggedtime)
self_kuvatHash.keys()[:7]
self_kuvatHash.values()[:7]
len(kuvalistausArr)
infohkeys= self_kuvatHash[self_kuvatHash.keys()[-1]].keys()
infohkeys
infokeysvalueshash={}
for key in infohkeys:
infokeysvalueshash[key]=[]
for key in self_kuvatHash.keys():
key
for infokey in infohkeys:
if infokey in self_kuvatHash[key].keys():
infokeysvalueshash[infokey].append( self_kuvatHash[key][infokey])
infokeysvalueshash.keys()
len( infokeysvalueshash.keys())
infokeysvalueshash[infokeysvalueshash.keys()[0]][:9]
key=infokeysvalueshash.keys()[10]
key
valuelist= infokeysvalueshash[key]
len(valuelist)
valuelist[:9]
set( valuelist)
len( set( valuelist))
len( [valitem for valitem in valuelist if re.findall(r'\d+X\d+',valitem)!=[]])
len( [valitem for valitem in valuelist if re.findall(r'\d+(?:kb)?',valitem)!=[]])
len( [valitem for valitem in valuelist if re.findall(r'(DSC)|(IMG_)',valitem)!=[]])
[valitem for valitem in valuelist if re.findall(r'(DSC)|(IMG_)',valitem)==[]]
len( [valitem for valitem in valuelist if re.findall(r'[A-Z0-9]+\-[A-Z0-9]+',valitem)!=[]])
len( [valitem for valitem in valuelist if re.findall(r'\d+[\-\.]\d+[\-\.]\d+\s+\d+:\d+:\d+',valitem)!=[]])
len( [valitem for valitem in valuelist if re.findall(r'\d+[\-\.:]\d+[\-\.:]\d+\s+\d+:\d+:\d+',valitem)!=[]])
len( [valitem for valitem in valuelist if re.findall(r'\d+[\-\.]\d+[\-\.]\d+(?:\s+\d+:\d+:\d+)?',valitem)!=[]])
|
|
#!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import classifier
class MergedTest(unittest.TestCase):
def test_merged(self):
self.assertEqual(classifier.get_merged(zip('abcd', [
{'issue': {'n': 1, 'a': 2}},
{'pull_request': {'n': 2, 'b': 3}},
{'c': 4},
{'issue': {'n': 3, 'd': 4},
'pull_request': {'n': 4, 'e': 5}}
], [0] * 4)), {'n': 4, 'a': 2, 'b': 3, 'd': 4, 'e': 5})
def diffs_to_events(*diffs):
events = []
for diff in diffs:
label = {'name': diff[1:], 'color': '#fff'}
if diff[0] == '+':
action = 'labeled'
elif diff[0] == '-':
action = 'unlabeled'
events.append(('pull_request',
{'action': action,
'label': label}, 0))
return events
class LabelsTest(unittest.TestCase):
def expect_labels(self, events, names, extra_events=None):
labels = classifier.get_labels(events)
if extra_events:
labels = classifier.get_labels(extra_events, labels)
self.assertEqual(sorted(labels.keys()), sorted(names))
def test_empty(self):
self.expect_labels([('comment', {'body': 'no labels here'}, 0)], [])
def test_colors(self):
self.assertEqual(classifier.get_labels(
[('c', {'issue':
{'labels': [{'name': 'foo', 'color': '#abc'}]}
}, 0)]),
{'foo': '#abc'})
def test_labeled_action(self):
self.expect_labels(diffs_to_events('+a'), ['a'])
self.expect_labels(diffs_to_events('+a', '+a'), ['a'])
self.expect_labels(diffs_to_events('+a', '-a'), [])
self.expect_labels(diffs_to_events('+a', '+b', '-c', '-b'), ['a'])
self.expect_labels(diffs_to_events('+a', '+b', '-c'), ['a'],
extra_events=diffs_to_events('-b'))
def test_issue_overrides_action(self):
labels = [{'name': 'x', 'color': 'y'}]
self.expect_labels(diffs_to_events('+a') +
[('other_event', {'issue': {'labels': labels}}, 0)], ['x'])
def test_labeled_action_missing_label(self):
self.expect_labels([('pull_request', {'action': 'labeled'}, 0)], [])
def make_comment_event(num, name, msg='', event='issue_comment',
action='created', ts=None):
return event, {
'action': action,
'sender': {'login': name},
'comment': {
'id': num,
'user': {'login': name},
'body': msg,
'created_at': ts,
}
}, ts
class CalculateTest(unittest.TestCase):
def test_classify(self):
# A quick integration test to ensure that all the sub-parts are included.
# If this test fails, a smaller unit test SHOULD fail as well.
self.assertEqual(classifier.classify([
('pull_request', {
'pull_request': {
'state': 'open',
'user': {'login': 'a'},
'assignees': [{'login': 'b'}],
'title': 'some fix',
'head': {'sha': 'abcdef'},
'additions': 1,
'deletions': 1,
'milestone': {'title': 'v1.10'},
}
}, 1),
make_comment_event(1, 'k8s-bot',
'failure in https://k8s-gubernator.appspot.com/build/bucket/job/123/', ts=2),
('pull_request', {
'action': 'labeled',
'label': {'name': 'release-note-none', 'color': 'orange'},
}, 3),
make_comment_event(2, 'k8s-merge-robot', '<!-- META={"approvers":["o"]} -->', ts=4),
], status_fetcher={'abcdef': {'e2e': ['failure', None, 'stuff is broken']}}.get
),
(True, True, ['a', 'b', 'o'],
{
'author': 'a',
'approvers': ['o'],
'assignees': ['b'],
'additions': 1,
'deletions': 1,
'attn': {'a': 'fix tests', 'b': 'needs review#0#0', 'o': 'needs approval'},
'title': 'some fix',
'labels': {'release-note-none': 'orange'},
'head': 'abcdef',
'needs_rebase': False,
'status': {'e2e': ['failure', None, 'stuff is broken']},
'xrefs': ['/bucket/job/123'],
'milestone': 'v1.10',
}))
def test_distill(self):
self.assertEqual(classifier.distill_events([
make_comment_event(1, 'a', ts=1),
make_comment_event(2, 'b', ts=2),
make_comment_event(1, 'a', action='deleted', ts=3),
make_comment_event(3, 'c', event='pull_request_review_comment', ts=4),
make_comment_event(4, 'k8s-bot', ts=4),
('pull_request', {'action': 'synchronize', 'sender': {'login': 'auth'}}, 5),
('pull_request', {'action': 'labeled', 'sender': {'login': 'rev'},
'label': {'name': 'lgtm'}}, 6),
]),
[
('comment', 'b', 2),
('comment', 'c', 4),
('push', 'auth', 5),
('label lgtm', 'rev', 6),
])
def test_calculate_attention(self):
def expect(payload, events, expected_attn):
self.assertEqual(classifier.calculate_attention(events, payload),
expected_attn)
def make_payload(author, assignees=None, labels=None, **kwargs):
ret = {'author': author, 'assignees': assignees or [], 'labels': labels or []}
ret.update(kwargs)
return ret
expect(make_payload('alpha', needs_rebase=True), [],
{'alpha': 'needs rebase'})
expect(make_payload('beta', labels={'do-not-merge/release-note-label-needed'}), [],
{'beta': 'needs release-note label'})
expect(make_payload('gamma', status={'ci': ['failure', '', '']}), [],
{'gamma': 'fix tests'})
expect(make_payload('gamma', status={'ci': ['failure', '', '']}),
[('comment', 'other', 1)],
{'gamma': 'address comments#1#1'})
expect(make_payload('delta', ['epsilon']), [],
{'epsilon': 'needs review#0#0'})
expect(make_payload('alpha', ['alpha']), [('comment', 'other', 1)],
{'alpha': 'address comments#1#1'})
expect(make_payload('alpha', approvers=['owner']), [],
{'owner': 'needs approval'})
def test_author_state(self):
def expect(events, result):
self.assertEqual(classifier.get_author_state('author', events),
result)
expect([], ('waiting', 0, 0))
expect([('comment', 'author', 1)], ('waiting', 0, 0))
expect([('comment', 'other', 1)], ('address comments', 1, 1))
expect([('comment', 'other', 1), ('push', 'author', 2)], ('waiting', 2, 2))
expect([('comment', 'other', 1), ('comment', 'author', 2)], ('waiting', 2, 2))
expect([('comment', 'other', 1), ('comment', 'other', 2)], ('address comments', 1, 2))
def test_assignee_state(self):
def expect(events, result):
self.assertEqual(classifier.get_assignee_state('me', 'author', events),
result)
expect([], ('needs review', 0, 0))
expect([('comment', 'other', 1)], ('needs review', 0, 0))
expect([('comment', 'me', 1)], ('waiting', 1, 1))
expect([('label lgtm', 'other', 1)], ('needs review', 0, 0))
expect([('label lgtm', 'me', 1)], ('waiting', 1, 1))
expect([('comment', 'me', 1), ('push', 'author', 2)], ('needs review', 2, 2))
expect([('comment', 'me', 1), ('comment', 'author', 2)], ('needs review', 2, 2))
expect([('comment', 'me', 1), ('comment', 'author', 2), ('comment', 'author', 3)],
('needs review', 2, 3))
def test_xrefs(self):
def expect(body, comments, result):
self.assertEqual(result, classifier.get_xrefs(
[{'comment': c} for c in comments], {'body': body}))
def fail(path):
return 'foobar https://k8s-gubernator.appspot.com/build%s asdf' % path
expect(None, [], [])
expect('something', [], [])
expect(fail('/a/b/34/'), [], ['/a/b/34'])
expect(None, [fail('/a/b/34/')], ['/a/b/34'])
expect(fail('/a/b/34/'), [fail('/a/b/34]')], ['/a/b/34'])
expect(fail('/a/b/34/)'), [fail('/a/b/35]')], ['/a/b/34', '/a/b/35'])
def test_reviewers(self):
def expect(events, result):
self.assertEqual(result, classifier.get_reviewers(events))
def mk(*specs):
out = []
for event, action, body in specs:
body = dict(body) # copy
body['action'] = action
out.append((event, body, 0))
return out
expect([], set())
user_a = {'requested_reviewer': {'login': 'a'}}
expect(mk(('pull_request', 'review_requested', user_a)), {'a'})
expect(mk(('pull_request', 'review_request_removed', user_a)), set())
expect(mk(('pull_request', 'review_requested', user_a),
('pull_request', 'review_request_removed', user_a)), set())
expect(mk(('pull_request_review', 'submitted', {'sender': {'login': 'a'}})), {'a'})
def test_approvers(self):
def expect(comment, result):
self.assertEqual(result, classifier.get_approvers([{
'author': 'k8s-merge-robot', 'comment': comment}]))
expect('nothing', [])
expect('before\n<!-- META={approvers:[someone]} -->', ['someone'])
expect('<!-- META={approvers:[someone,else]} -->', ['someone', 'else'])
expect('<!-- META={approvers:[someone,else]} -->', ['someone', 'else'])
# The META format is *supposed* to be JSON, but a recent change broke it.
# Support both formats so it can be fixed in the future.
expect('<!-- META={"approvers":["username"]} -->\n', ['username'])
class CommentsTest(unittest.TestCase):
def test_basic(self):
self.assertEqual(classifier.get_comments([make_comment_event(1, 'aaa', 'msg', ts=2016)]),
[{'id': 1, 'author': 'aaa', 'comment': 'msg', 'timestamp': 2016}])
def test_deleted(self):
self.assertEqual(classifier.get_comments([
make_comment_event(1, 'aaa', 'msg', 2016),
make_comment_event(1, None, None, None, action='deleted'),
make_comment_event(2, '', '', '', action='deleted')]),
[])
def test_edited(self):
self.assertEqual(classifier.get_comments([
make_comment_event(1, 'aaa', 'msg', ts=2016),
make_comment_event(1, 'aaa', 'redacted', ts=2016.1, action='edited')]),
[{'id': 1, 'author': 'aaa', 'comment': 'redacted', 'timestamp': 2016.1}])
if __name__ == '__main__':
unittest.main()
|
|
import grpc
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
import google.cloud.proto.spanner.admin.instance.v1.spanner_instance_admin_pb2 as google_dot_cloud_dot_proto_dot_spanner_dot_admin_dot_instance_dot_v1_dot_spanner__instance__admin__pb2
import google.cloud.proto.spanner.admin.instance.v1.spanner_instance_admin_pb2 as google_dot_cloud_dot_proto_dot_spanner_dot_admin_dot_instance_dot_v1_dot_spanner__instance__admin__pb2
import google.cloud.proto.spanner.admin.instance.v1.spanner_instance_admin_pb2 as google_dot_cloud_dot_proto_dot_spanner_dot_admin_dot_instance_dot_v1_dot_spanner__instance__admin__pb2
import google.cloud.proto.spanner.admin.instance.v1.spanner_instance_admin_pb2 as google_dot_cloud_dot_proto_dot_spanner_dot_admin_dot_instance_dot_v1_dot_spanner__instance__admin__pb2
import google.cloud.proto.spanner.admin.instance.v1.spanner_instance_admin_pb2 as google_dot_cloud_dot_proto_dot_spanner_dot_admin_dot_instance_dot_v1_dot_spanner__instance__admin__pb2
import google.cloud.proto.spanner.admin.instance.v1.spanner_instance_admin_pb2 as google_dot_cloud_dot_proto_dot_spanner_dot_admin_dot_instance_dot_v1_dot_spanner__instance__admin__pb2
import google.cloud.proto.spanner.admin.instance.v1.spanner_instance_admin_pb2 as google_dot_cloud_dot_proto_dot_spanner_dot_admin_dot_instance_dot_v1_dot_spanner__instance__admin__pb2
import google.cloud.proto.spanner.admin.instance.v1.spanner_instance_admin_pb2 as google_dot_cloud_dot_proto_dot_spanner_dot_admin_dot_instance_dot_v1_dot_spanner__instance__admin__pb2
import google.cloud.proto.spanner.admin.instance.v1.spanner_instance_admin_pb2 as google_dot_cloud_dot_proto_dot_spanner_dot_admin_dot_instance_dot_v1_dot_spanner__instance__admin__pb2
import google.longrunning.operations_pb2 as google_dot_longrunning_dot_operations__pb2
import google.cloud.proto.spanner.admin.instance.v1.spanner_instance_admin_pb2 as google_dot_cloud_dot_proto_dot_spanner_dot_admin_dot_instance_dot_v1_dot_spanner__instance__admin__pb2
import google.longrunning.operations_pb2 as google_dot_longrunning_dot_operations__pb2
import google.cloud.proto.spanner.admin.instance.v1.spanner_instance_admin_pb2 as google_dot_cloud_dot_proto_dot_spanner_dot_admin_dot_instance_dot_v1_dot_spanner__instance__admin__pb2
import google.protobuf.empty_pb2 as google_dot_protobuf_dot_empty__pb2
import google.iam.v1.iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2
import google.iam.v1.policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2
import google.iam.v1.iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2
import google.iam.v1.policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2
import google.iam.v1.iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2
import google.iam.v1.iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2
class InstanceAdminStub(object):
"""Cloud Spanner Instance Admin API
The Cloud Spanner Instance Admin API can be used to create, delete,
modify and list instances. Instances are dedicated Cloud Spanner serving
and storage resources to be used by Cloud Spanner databases.
Each instance has a "configuration", which dictates where the
serving resources for the Cloud Spanner instance are located (e.g.,
US-central, Europe). Configurations are created by Google based on
resource availability.
Cloud Spanner billing is based on the instances that exist and their
sizes. After an instance exists, there are no additional
per-database or per-operation charges for use of the instance
(though there may be additional network bandwidth charges).
Instances offer isolation: problems with databases in one instance
will not affect other instances. However, within an instance
databases can affect each other. For example, if one database in an
instance receives a lot of requests and consumes most of the
instance resources, fewer resources are available for other
databases in that instance, and their performance may suffer.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ListInstanceConfigs = channel.unary_unary(
'/google.spanner.admin.instance.v1.InstanceAdmin/ListInstanceConfigs',
request_serializer=google_dot_cloud_dot_proto_dot_spanner_dot_admin_dot_instance_dot_v1_dot_spanner__instance__admin__pb2.ListInstanceConfigsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_proto_dot_spanner_dot_admin_dot_instance_dot_v1_dot_spanner__instance__admin__pb2.ListInstanceConfigsResponse.FromString,
)
self.GetInstanceConfig = channel.unary_unary(
'/google.spanner.admin.instance.v1.InstanceAdmin/GetInstanceConfig',
request_serializer=google_dot_cloud_dot_proto_dot_spanner_dot_admin_dot_instance_dot_v1_dot_spanner__instance__admin__pb2.GetInstanceConfigRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_proto_dot_spanner_dot_admin_dot_instance_dot_v1_dot_spanner__instance__admin__pb2.InstanceConfig.FromString,
)
self.ListInstances = channel.unary_unary(
'/google.spanner.admin.instance.v1.InstanceAdmin/ListInstances',
request_serializer=google_dot_cloud_dot_proto_dot_spanner_dot_admin_dot_instance_dot_v1_dot_spanner__instance__admin__pb2.ListInstancesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_proto_dot_spanner_dot_admin_dot_instance_dot_v1_dot_spanner__instance__admin__pb2.ListInstancesResponse.FromString,
)
self.GetInstance = channel.unary_unary(
'/google.spanner.admin.instance.v1.InstanceAdmin/GetInstance',
request_serializer=google_dot_cloud_dot_proto_dot_spanner_dot_admin_dot_instance_dot_v1_dot_spanner__instance__admin__pb2.GetInstanceRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_proto_dot_spanner_dot_admin_dot_instance_dot_v1_dot_spanner__instance__admin__pb2.Instance.FromString,
)
self.CreateInstance = channel.unary_unary(
'/google.spanner.admin.instance.v1.InstanceAdmin/CreateInstance',
request_serializer=google_dot_cloud_dot_proto_dot_spanner_dot_admin_dot_instance_dot_v1_dot_spanner__instance__admin__pb2.CreateInstanceRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.UpdateInstance = channel.unary_unary(
'/google.spanner.admin.instance.v1.InstanceAdmin/UpdateInstance',
request_serializer=google_dot_cloud_dot_proto_dot_spanner_dot_admin_dot_instance_dot_v1_dot_spanner__instance__admin__pb2.UpdateInstanceRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.DeleteInstance = channel.unary_unary(
'/google.spanner.admin.instance.v1.InstanceAdmin/DeleteInstance',
request_serializer=google_dot_cloud_dot_proto_dot_spanner_dot_admin_dot_instance_dot_v1_dot_spanner__instance__admin__pb2.DeleteInstanceRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.SetIamPolicy = channel.unary_unary(
'/google.spanner.admin.instance.v1.InstanceAdmin/SetIamPolicy',
request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString,
)
self.GetIamPolicy = channel.unary_unary(
'/google.spanner.admin.instance.v1.InstanceAdmin/GetIamPolicy',
request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString,
)
self.TestIamPermissions = channel.unary_unary(
'/google.spanner.admin.instance.v1.InstanceAdmin/TestIamPermissions',
request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString,
)
class InstanceAdminServicer(object):
"""Cloud Spanner Instance Admin API
The Cloud Spanner Instance Admin API can be used to create, delete,
modify and list instances. Instances are dedicated Cloud Spanner serving
and storage resources to be used by Cloud Spanner databases.
Each instance has a "configuration", which dictates where the
serving resources for the Cloud Spanner instance are located (e.g.,
US-central, Europe). Configurations are created by Google based on
resource availability.
Cloud Spanner billing is based on the instances that exist and their
sizes. After an instance exists, there are no additional
per-database or per-operation charges for use of the instance
(though there may be additional network bandwidth charges).
Instances offer isolation: problems with databases in one instance
will not affect other instances. However, within an instance
databases can affect each other. For example, if one database in an
instance receives a lot of requests and consumes most of the
instance resources, fewer resources are available for other
databases in that instance, and their performance may suffer.
"""
def ListInstanceConfigs(self, request, context):
"""Lists the supported instance configurations for a given project.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetInstanceConfig(self, request, context):
"""Gets information about a particular instance configuration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListInstances(self, request, context):
"""Lists all instances in the given project.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetInstance(self, request, context):
"""Gets information about a particular instance.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateInstance(self, request, context):
"""Creates an instance and begins preparing it to begin serving. The
returned [long-running operation][google.longrunning.Operation]
can be used to track the progress of preparing the new
instance. The instance name is assigned by the caller. If the
named instance already exists, `CreateInstance` returns
`ALREADY_EXISTS`.
Immediately upon completion of this request:
* The instance is readable via the API, with all requested attributes
but no allocated resources. Its state is `CREATING`.
Until completion of the returned operation:
* Cancelling the operation renders the instance immediately unreadable
via the API.
* The instance can be deleted.
* All other attempts to modify the instance are rejected.
Upon completion of the returned operation:
* Billing for all successfully-allocated resources begins (some types
may have lower than the requested levels).
* Databases can be created in the instance.
* The instance's allocated resource levels are readable via the API.
* The instance's state becomes `READY`.
The returned [long-running operation][google.longrunning.Operation] will
have a name of the format `<instance_name>/operations/<operation_id>` and
can be used to track creation of the instance. The
[metadata][google.longrunning.Operation.metadata] field type is
[CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata].
The [response][google.longrunning.Operation.response] field type is
[Instance][google.spanner.admin.instance.v1.Instance], if successful.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateInstance(self, request, context):
"""Updates an instance, and begins allocating or releasing resources
as requested. The returned [long-running
operation][google.longrunning.Operation] can be used to track the
progress of updating the instance. If the named instance does not
exist, returns `NOT_FOUND`.
Immediately upon completion of this request:
* For resource types for which a decrease in the instance's allocation
has been requested, billing is based on the newly-requested level.
Until completion of the returned operation:
* Cancelling the operation sets its metadata's
[cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time], and begins
restoring resources to their pre-request values. The operation
is guaranteed to succeed at undoing all resource changes,
after which point it terminates with a `CANCELLED` status.
* All other attempts to modify the instance are rejected.
* Reading the instance via the API continues to give the pre-request
resource levels.
Upon completion of the returned operation:
* Billing begins for all successfully-allocated resources (some types
may have lower than the requested levels).
* All newly-reserved resources are available for serving the instance's
tables.
* The instance's new resource levels are readable via the API.
The returned [long-running operation][google.longrunning.Operation] will
have a name of the format `<instance_name>/operations/<operation_id>` and
can be used to track the instance modification. The
[metadata][google.longrunning.Operation.metadata] field type is
[UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata].
The [response][google.longrunning.Operation.response] field type is
[Instance][google.spanner.admin.instance.v1.Instance], if successful.
Authorization requires `spanner.instances.update` permission on
resource [name][google.spanner.admin.instance.v1.Instance.name].
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteInstance(self, request, context):
"""Deletes an instance.
Immediately upon completion of the request:
* Billing ceases for all of the instance's reserved resources.
Soon afterward:
* The instance and *all of its databases* immediately and
irrevocably disappear from the API. All data in the databases
is permanently deleted.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetIamPolicy(self, request, context):
"""Sets the access control policy on an instance resource. Replaces any
existing policy.
Authorization requires `spanner.instances.setIamPolicy` on
[resource][google.iam.v1.SetIamPolicyRequest.resource].
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetIamPolicy(self, request, context):
"""Gets the access control policy for an instance resource. Returns an empty
policy if an instance exists but does not have a policy set.
Authorization requires `spanner.instances.getIamPolicy` on
[resource][google.iam.v1.GetIamPolicyRequest.resource].
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def TestIamPermissions(self, request, context):
"""Returns permissions that the caller has on the specified instance resource.
Attempting this RPC on a non-existent Cloud Spanner instance resource will
result in a NOT_FOUND error if the user has `spanner.instances.list`
permission on the containing Google Cloud Project. Otherwise returns an
empty set of permissions.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_InstanceAdminServicer_to_server(servicer, server):
rpc_method_handlers = {
'ListInstanceConfigs': grpc.unary_unary_rpc_method_handler(
servicer.ListInstanceConfigs,
request_deserializer=google_dot_cloud_dot_proto_dot_spanner_dot_admin_dot_instance_dot_v1_dot_spanner__instance__admin__pb2.ListInstanceConfigsRequest.FromString,
response_serializer=google_dot_cloud_dot_proto_dot_spanner_dot_admin_dot_instance_dot_v1_dot_spanner__instance__admin__pb2.ListInstanceConfigsResponse.SerializeToString,
),
'GetInstanceConfig': grpc.unary_unary_rpc_method_handler(
servicer.GetInstanceConfig,
request_deserializer=google_dot_cloud_dot_proto_dot_spanner_dot_admin_dot_instance_dot_v1_dot_spanner__instance__admin__pb2.GetInstanceConfigRequest.FromString,
response_serializer=google_dot_cloud_dot_proto_dot_spanner_dot_admin_dot_instance_dot_v1_dot_spanner__instance__admin__pb2.InstanceConfig.SerializeToString,
),
'ListInstances': grpc.unary_unary_rpc_method_handler(
servicer.ListInstances,
request_deserializer=google_dot_cloud_dot_proto_dot_spanner_dot_admin_dot_instance_dot_v1_dot_spanner__instance__admin__pb2.ListInstancesRequest.FromString,
response_serializer=google_dot_cloud_dot_proto_dot_spanner_dot_admin_dot_instance_dot_v1_dot_spanner__instance__admin__pb2.ListInstancesResponse.SerializeToString,
),
'GetInstance': grpc.unary_unary_rpc_method_handler(
servicer.GetInstance,
request_deserializer=google_dot_cloud_dot_proto_dot_spanner_dot_admin_dot_instance_dot_v1_dot_spanner__instance__admin__pb2.GetInstanceRequest.FromString,
response_serializer=google_dot_cloud_dot_proto_dot_spanner_dot_admin_dot_instance_dot_v1_dot_spanner__instance__admin__pb2.Instance.SerializeToString,
),
'CreateInstance': grpc.unary_unary_rpc_method_handler(
servicer.CreateInstance,
request_deserializer=google_dot_cloud_dot_proto_dot_spanner_dot_admin_dot_instance_dot_v1_dot_spanner__instance__admin__pb2.CreateInstanceRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
'UpdateInstance': grpc.unary_unary_rpc_method_handler(
servicer.UpdateInstance,
request_deserializer=google_dot_cloud_dot_proto_dot_spanner_dot_admin_dot_instance_dot_v1_dot_spanner__instance__admin__pb2.UpdateInstanceRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
'DeleteInstance': grpc.unary_unary_rpc_method_handler(
servicer.DeleteInstance,
request_deserializer=google_dot_cloud_dot_proto_dot_spanner_dot_admin_dot_instance_dot_v1_dot_spanner__instance__admin__pb2.DeleteInstanceRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'SetIamPolicy': grpc.unary_unary_rpc_method_handler(
servicer.SetIamPolicy,
request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.FromString,
response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString,
),
'GetIamPolicy': grpc.unary_unary_rpc_method_handler(
servicer.GetIamPolicy,
request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.FromString,
response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString,
),
'TestIamPermissions': grpc.unary_unary_rpc_method_handler(
servicer.TestIamPermissions,
request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.FromString,
response_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.spanner.admin.instance.v1.InstanceAdmin', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
|
"""
Copyright (c) 2016-2018 Ad Schellevis <ad@opnsense.org>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------------
data aggregator loader
"""
import os
import sys
import glob
import syslog
import datetime
import sqlite3
from lib.aggregate import convert_timestamp
sqlite3.register_converter('timestamp', convert_timestamp)
class BaseFlowAggregator(object):
# target location ('<store>.sqlite')
target_filename = None
# list of fields to use in this aggregate
agg_fields = None
@classmethod
def resolutions(cls):
""" sample resolutions for this aggregation
:return: list of sample resolutions
"""
return list()
@classmethod
def history_per_resolution(cls):
""" history to keep in seconds per sample resolution
:return: dict sample resolution / expire time (seconds)
"""
return dict()
@classmethod
def seconds_per_day(cls, days):
"""
:param days: number of days
:return: number of seconds
"""
return 60*60*24*days
def __init__(self, resolution, database_dir='/var/netflow'):
""" construct new flow sample class
:return: None
"""
self.database_dir = database_dir
self.resolution = resolution
# target table name, data_<resolution in seconds>
self._db_connection = None
self._update_cur = None
self._known_targets = list()
# construct update and insert sql statements
tmp = 'update timeserie set last_seen = :flow_end, '
tmp += 'octets = octets + :octets_consumed, packets = packets + :packets_consumed '
tmp += 'where mtime = :mtime and %s '
self._update_stmt = tmp % (' and '.join(['%s = :%s' % (x, x) for x in self.agg_fields]))
tmp = 'insert into timeserie (mtime, last_seen, octets, packets, %s) '
tmp += 'values (:mtime, :flow_end, :octets_consumed, :packets_consumed, %s)'
self._insert_stmt = tmp % (','.join(self.agg_fields), ','.join([':%s' % x for x in self.agg_fields]))
# open database
self._open_db()
self._fetch_known_targets()
def __del__(self):
""" close database on destruct
:return: None
"""
if self._db_connection is not None:
self._db_connection.close()
def _fetch_known_targets(self):
""" read known target table names from the sqlite db
:return: None
"""
if self._db_connection is not None:
self._known_targets = list()
cur = self._db_connection.cursor()
cur.execute('SELECT name FROM sqlite_master')
for record in cur.fetchall():
self._known_targets.append(record[0])
cur.close()
def _create_target_table(self):
""" construct target aggregate table, using resulution and list of agg_fields
:return: None
"""
if self._db_connection is not None:
# construct new aggregate table
sql_text = list()
sql_text.append('create table timeserie ( ')
sql_text.append(' mtime timestamp')
sql_text.append(', last_seen timestamp')
for agg_field in self.agg_fields:
sql_text.append(', %s varchar(255)' % agg_field)
sql_text.append(', octets numeric')
sql_text.append(', packets numeric')
sql_text.append(', primary key(mtime, %s)' % ','.join(self.agg_fields))
sql_text.append(')')
cur = self._db_connection.cursor()
cur.executescript('\n'.join(sql_text))
cur.close()
# read table names
self._fetch_known_targets()
def is_db_open(self):
""" check if target database is open
:return: database connected (True/False)
"""
if self._db_connection is not None:
return True
else:
return False
def _open_db(self):
""" open / create database
:return: None
"""
if self.target_filename is not None:
# make sure the target directory exists
if not os.path.isdir(self.database_dir):
os.makedirs(self.database_dir)
# open sqlite database
self._db_connection = sqlite3.connect(
("%s/%s" % (self.database_dir, self.target_filename)) % self.resolution, timeout=60,
detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES
)
# open update/insert cursor
self._update_cur = self._db_connection.cursor()
def commit(self):
""" commit data
:return: None
"""
if self._db_connection is not None:
self._db_connection.commit()
def add(self, flow):
""" calculate timeslices per flow depending on sample resolution
:param flow: flow data (from parse.py)
:return: None
"""
# make sure target exists
if 'timeserie' not in self._known_targets:
self._create_target_table()
# push record(s) depending on resolution
start_time = int(flow['flow_start'] / self.resolution) * self.resolution
while start_time <= flow['flow_end']:
consume_start_time = max(flow['flow_start'], start_time)
consume_end_time = min(start_time + self.resolution, flow['flow_end'])
if flow['duration_ms'] != 0:
consume_perc = (consume_end_time - consume_start_time) / float(flow['duration_ms'] / 1000.0)
else:
consume_perc = 1
if self.is_db_open():
# upsert data
flow['octets_consumed'] = consume_perc * flow['octets']
flow['packets_consumed'] = consume_perc * flow['packets']
flow['mtime'] = datetime.datetime.utcfromtimestamp(start_time)
self._update_cur.execute(self._update_stmt, flow)
if self._update_cur.rowcount == 0:
self._update_cur.execute(self._insert_stmt, flow)
# next start time
start_time += self.resolution
def cleanup(self, do_vacuum=False):
""" cleanup timeserie table
:param do_vacuum: vacuum database
:return: None
"""
if self.is_db_open() and 'timeserie' in self._known_targets \
and self.resolution in self.history_per_resolution():
self._update_cur.execute('select max(mtime) as "[timestamp]" from timeserie')
last_timestamp = self._update_cur.fetchall()[0][0]
if type(last_timestamp) == datetime.datetime:
expire = self.history_per_resolution()[self.resolution]
expire_timestamp = last_timestamp - datetime.timedelta(seconds=expire)
if last_timestamp > datetime.datetime.now():
# if data recorded seems to be in the future, use current timestamp for cleanup
# (prevent current data being removed)
expire_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=expire)
self._update_cur.execute('delete from timeserie where mtime < :expire', {'expire': expire_timestamp})
self.commit()
if do_vacuum:
# vacuum database if requested
syslog.syslog(syslog.LOG_NOTICE, 'vacuum %s' % (self.target_filename % self.resolution))
self._update_cur.execute('vacuum')
@staticmethod
def _parse_timestamp(timestamp):
""" convert input to datetime.datetime or return if it already was of that type
:param timestamp: timestamp to convert
:return: datetime.datetime object
"""
if type(timestamp) in (int, float):
return datetime.datetime.utcfromtimestamp(timestamp)
elif type(timestamp) != datetime.datetime:
return datetime.datetime.utcfromtimestamp(0)
else:
return timestamp
def _valid_fields(self, fields):
""" cleanse fields (return only valid ones)
:param fields: field list
:return: list
"""
# validate field list (can only select fields in self.agg_fields)
select_fields = list()
for field in fields:
if field.strip() in self.agg_fields:
select_fields.append(field.strip())
return select_fields
def get_timeserie_data(self, start_time, end_time, fields):
""" fetch data from aggregation source, groups by mtime and selected fields
:param start_time: start timestamp
:param end_time: end timestamp
:param fields: fields to retrieve
:return: iterator returning dict records (start_time, end_time, [fields], octets, packets)
"""
if self.is_db_open() and 'timeserie' in self._known_targets:
# validate field list (can only select fields in self.agg_fields)
select_fields = self._valid_fields(fields)
if len(select_fields) == 0:
# select "none", add static null as field
select_fields.append('null')
sql_select = 'select mtime as "start_time [timestamp]", %s' % ','.join(select_fields)
sql_select += ', sum(octets) as octets, sum(packets) as packets\n'
sql_select += 'from timeserie \n'
sql_select += 'where mtime >= :start_time and mtime < :end_time\n'
sql_select += 'group by mtime, %s\n' % ','.join(select_fields)
# execute select query
cur = self._db_connection.cursor()
cur.execute(sql_select, {'start_time': self._parse_timestamp(start_time),
'end_time': self._parse_timestamp(end_time)})
#
field_names = ([x[0] for x in cur.description])
for record in cur.fetchall():
result_record = dict()
for field_indx in range(len(field_names)):
if len(record) > field_indx:
if type(record[field_indx]) == bytes:
result_record[field_names[field_indx]] = record[field_indx].decode()
else:
result_record[field_names[field_indx]] = record[field_indx]
if 'start_time' in result_record:
result_record['end_time'] = result_record['start_time'] \
+ datetime.timedelta(seconds=self.resolution)
# send data
yield result_record
# close cursor
cur.close()
def get_top_data(self, start_time, end_time, fields, value_field, data_filters=None, max_hits=100):
""" Retrieve top (usage) from this aggregation.
Fetch data from aggregation source, groups by selected fields, sorts by value_field descending
use data_filter to filter before grouping.
:param start_time: start timestamp
:param end_time: end timestamp
:param fields: fields to retrieve
:param value_field: field to sum
:param data_filters: filter data, use as field=value
:param max_hits: maximum number of results, rest is summed into (other)
:return: iterator returning dict records (start_time, end_time, [fields], octets, packets)
"""
result = list()
if self.is_db_open() and 'timeserie' in self._known_targets:
select_fields = self._valid_fields(fields)
filter_fields = []
query_params = {}
if value_field == 'octets':
value_sql = 'sum(octets)'
elif value_field == 'packets':
value_sql = 'sum(packets)'
else:
value_sql = '0'
# query filters, correct start_time for resolution
query_params['start_time'] = self._parse_timestamp((int(start_time/self.resolution))*self.resolution)
query_params['end_time'] = self._parse_timestamp(end_time)
if data_filters:
for data_filter in data_filters.split(','):
tmp = data_filter.split('=')[0].strip()
if tmp in self.agg_fields and data_filter.find('=') > -1:
filter_fields.append(tmp)
query_params[tmp] = '='.join(data_filter.split('=')[1:])
if len(select_fields) > 0:
# construct sql query to filter and select data
sql_select = 'select %s' % ','.join(select_fields)
sql_select += ', %s as total, max(last_seen) last_seen \n' % value_sql
sql_select += 'from timeserie \n'
sql_select += 'where mtime >= :start_time and mtime < :end_time\n'
for filter_field in filter_fields:
sql_select += ' and %s = :%s \n' % (filter_field, filter_field)
sql_select += 'group by %s\n' % ','.join(select_fields)
sql_select += 'order by %s desc ' % value_sql
# execute select query
cur = self._db_connection.cursor()
cur.execute(sql_select, query_params)
# fetch all data, to a max of [max_hits] rows.
field_names = ([x[0] for x in cur.description])
for record in cur.fetchall():
result_record = dict()
for field_indx in range(len(field_names)):
if len(record) > field_indx:
result_record[field_names[field_indx]] = record[field_indx]
if len(result) < max_hits:
result.append(result_record)
else:
if len(result) == max_hits:
# generate row for "rest of data"
result.append({'total': 0})
for key in result_record:
if key not in result[-1]:
result[-1][key] = ""
result[-1]['total'] += result_record['total']
# close cursor
cur.close()
return result
def get_data(self, start_time, end_time):
""" get detail data
:param start_time: start timestamp
:param end_time: end timestamp
:return: iterator
"""
if self.is_db_open() and 'timeserie' in self._known_targets:
query_params = dict()
query_params['start_time'] = self._parse_timestamp((int(start_time/self.resolution))*self.resolution)
query_params['end_time'] = self._parse_timestamp(end_time)
sql_select = 'select mtime start_time, '
sql_select += '%s, octets, packets, last_seen as "last_seen [timestamp]" \n' % ','.join(self.agg_fields)
sql_select += 'from timeserie \n'
sql_select += 'where mtime >= :start_time and mtime < :end_time\n'
cur = self._db_connection.cursor()
cur.execute(sql_select, query_params)
# fetch all data, to a max of [max_hits] rows.
field_names = ([x[0] for x in cur.description])
while True:
record = cur.fetchone()
if record is None:
break
else:
result_record = dict()
for field_indx in range(len(field_names)):
if len(record) > field_indx:
result_record[field_names[field_indx]] = record[field_indx]
yield result_record
def get_aggregators():
""" collect and return available aggregators
:return: list of class references
"""
result = list()
for filename in glob.glob('%s/*.py' % os.path.dirname(__file__)):
filename_base = os.path.basename(filename)
if filename_base[0:2] != '__':
module_name = 'lib.aggregates.%s' % '.'.join(filename_base.split('.')[:-1])
__import__(module_name)
for clsname in dir(sys.modules[module_name]):
clshandle = getattr(sys.modules[module_name], clsname)
if type(clshandle) == type and issubclass(clshandle, BaseFlowAggregator):
if hasattr(clshandle, 'target_filename') and clshandle.target_filename is not None:
result.append(clshandle)
return result
|
|
#!/usr/bin/python
#-*-coding:utf-8-*-
#By Giggle Liu
from numpy import *
from lattice import Lattice
from bzone import BZone
from group import C6vGroup,C4vGroup,C3vGroup
__all__=['Honeycomb_Lattice','Square_Lattice','Triangular_Lattice','Chain','construct_lattice','resize_lattice']
class Honeycomb_Lattice(Lattice):
'''
HoneyComb Lattice class.
Construct
----------------
Honeycomb_Lattice(N,form=1.)
form:
The form of lattice.
`1` -> traditional one with 0 point at a vertex, using C3v group.
`2` -> the C6v form with 0 point at the center of hexagon, using C6v group.
'''
def __init__(self,N,form=1):
if form==1:
catoms=[(0.,0.),(0.5,sqrt(3.)/6)]
pg=C3vGroup()
elif form==2:
catoms=array[(0.,1./sqrt(3.)),(0.5,sqrt(3.)/6)]
pg=C6vGroup()
else:
raise ValueError('Form %s not defined.'%form)
super(Honeycomb_Lattice,self).__init__(name='honeycomb',a=array([(1.,0),(0.5,sqrt(3.)/2)]),N=N,catoms=catoms)
self.usegroup(pg)
@property
def kspace(self):
'''
Get the <KSpace> instance.
'''
ks=super(Honeycomb_Lattice,self).kspace
M0=ks.b[1]/2.0
K0=(ks.b[0]+2*ks.b[1])/3.0
c6vg=C6vGroup()
M=[]
K=[]
for i in xrange(6):
M.append(c6vg.actonK(M0,i))
K.append(c6vg.actonK(K0,i))
ks.special_points['M']=M
ks.special_points['K']=K
ks.usegroup(c6vg)
return ks
class Square_Lattice(Lattice):
'''
Square Lattice, using C4v Group.
Construct
----------------
Square_Lattice(N,catoms=[(0.,0.)])
'''
def __init__(self,N,catoms=[(0.,0.)]):
a=array([(1.,0),(0.,1.)])
super(Square_Lattice,self).__init__(N=N,a=a,catoms=catoms,name='square')
c4vg=C4vGroup()
self.usegroup(c4vg)
@property
def kspace(self):
'''
Get the <KSpace> instance.
'''
ks=super(Square_Lattice,self).kspace
M0=ks.b[1]/2.0
K0=(ks.b[0]+ks.b[1])/2.0
c4vg=C4vGroup()
M=[]
K=[]
for i in xrange(4):
M.append(c4vg.actonK(M0,i))
K.append(c4vg.actonK(K0,i))
ks.special_points['M']=M
ks.special_points['K']=K
ks.usegroup(c4vg)
return ks
class Triangular_Lattice(Lattice):
'''
Triangular Lattice, using C6v Group.
Construct
----------------
Triangular_Lattice(N,catoms=[(0.,0.)])
'''
def __init__(self,N,catoms=[(0.,0.)]):
'''Basic information of Triangular Lattice'''
a=array([(1.,0),(0.5,sqrt(3.)/2)])
super(Triangular_Lattice,self).__init__(a=a,catoms=catoms,name='triangular',N=N)
c6vg=C6vGroup()
self.usegroup(c6vg)
@property
def kspace(self):
'''
Get the <KSpace> instance.
'''
ks=super(Triangular_Lattice,self).kspace
M0=ks.b[1]/2.0
K0=(ks.b[0]+2*ks.b[1])/3.0
c6vg=C6vGroup()
M=[]
K=[]
for i in xrange(6):
M.append(c6vg.actonK(M0,i))
K.append(c6vg.actonK(K0,i))
ks.special_points['M']=M
ks.special_points['K']=K
ks.usegroup(c6vg)
return ks
class Chain(Lattice):
'''
Lattice of Chain.
Construct
----------------
Chain(N,a=(1.),catoms=[(0.,0.)])
'''
def __init__(self,N,a=(1.),catoms=[(0.)]):
'''
N:
Number of cells, integer.
a:
Lattice vector, 1D array.
catoms:
Atom positions in a unit cell.
'''
super(Chain,self).__init__(a=[a],N=[N],name='chain',catoms=catoms)
@property
def kspace(self):
'''The <KSpace> instance correspond to a chain.'''
a=self.a[0]
b=2*pi*a/a.dot(a)
ks=KSpace(N=self.N,b=b)
ks.special_points['K']=array([-b/2.,b/2.])
return ks
def construct_lattice(N,lattice_shape='',a=None,catoms=None,args={}):
'''
Uniform construct method for lattice.
N:
The size of lattice.
lattice_shape:
The shape of lattice.
* '' -> the anonymous lattice.
* 'square' -> square lattice.
* 'honeycomb' -> honeycomb lattice.
* 'triangular' -> triangular lattice.
* 'chain' -> a chain.
a:
The unit vector.
catoms:
The atoms in a unit cell.
args:
Other arguments,
* `form` -> the form used in constructing honeycomb lattice.
'''
if lattice_shape=='':
assert(a is not None)
if catoms is None: catoms=zeros(shape(a)[-1])
return Lattice(name='anonymous',N=N,a=a,catoms=catoms)
elif lattice_shape=='honeycomb':
return Honeycomb_Lattice(N=N,form=args.get('form',1))
elif lattice_shape=='square':
if catoms is None: catoms=zeros([1,2])
return Square_Lattice(N=N,catoms=catoms)
elif lattice_shape=='triangular':
if catoms is None: catoms=zeros([1,2])
return Triangular_Lattice(N=N,catoms=catoms)
elif lattice_shape=='chain':
if a is None: a=[1.]
if catoms is None: catoms=zeros([1,1])
if ndim(N)==1:
N=N[0]
return Chain(N=N,catoms=catoms)
def resize_lattice(lattice,N):
'''
Resize the lattice to specific size.
lattice:
The target lattice.
N:
1D - array, the size of new lattice.
'''
return construct_lattice(a=lattice.a,N=N,catoms=lattice.catoms,args={'form':getattr(lattice,'form',None)})
|
|
from django.core.urlresolvers import reverse
from django.forms.models import modelformset_factory
from django.shortcuts import get_object_or_404
from django.http import HttpResponseRedirect, HttpResponse
from django.views.generic import simple
from django.views.generic import list_detail, simple
from django.views.generic.create_update import apply_extra_context
from vt_manager_kvm.models import *
from vt_manager_kvm.communication.utils.XmlHelper import XmlHelper
import uuid, time, logging
from django.template import loader, RequestContext
from django.core.xheaders import populate_xheaders
from django.contrib import messages
#News
from vt_manager_kvm.controller.drivers.VTDriver import VTDriver
from vt_manager_kvm.utils.HttpUtils import HttpUtils
from vt_manager_kvm.models.NetworkInterface import NetworkInterface
from vt_manager_kvm.models.MacRange import MacRange
from vt_manager_kvm.controller.dispatchers.xmlrpc.InformationDispatcher import InformationDispatcher
from vt_manager_kvm.controller.dispatchers.forms.NetworkInterfaceForm import MgmtBridgeForm
from vt_manager_kvm.controller.dispatchers.forms.ServerForm import ServerForm
from django.db import transaction
def userIsIslandManager(request):
if (not request.user.is_superuser):
return simple.direct_to_template(request,
template = 'not_admin.html',
extra_context = {'user':request.user},
)
@transaction.commit_on_success
def servers_crud(request, server_id=None):
"""Show a page for the user to add/edit an VTServer """
if (not request.user.is_superuser):
return simple.direct_to_template(request,
template = 'not_admin.html',
extra_context = {'user':request.user},
)
vmProjects = {}
vmSlices = {}
try:
for vm in VTDriver.getVMsInServer(VTDriver.getServerById(server_id)):
if vm.projectName not in vmProjects:
vmProjects[vm.projectName] = vm.projectId
if vm.sliceName not in vmSlices:
vmSlices[vm.sliceName] = vm.sliceId
except Exception as e:
print e
pass
serverFormClass = HttpUtils.getFormFromModel(VTServer)
ifaceFormClass = HttpUtils.getFormFromModel(NetworkInterface)
IfaceFormSetClass = modelformset_factory(NetworkInterface)
if server_id != None:
server = get_object_or_404(VTServer, pk=server_id)
else:
server = None
if request.method == "GET":
#serverForm = serverFormClass(instance=server)
serverForm = ServerForm(instance=server, prefix ="server")
if server != None:
mgmt = server.getNetworkInterfaces().filter(isMgmt = True)
if mgmt:
mgmt = mgmt.get()
mgmtIfaceForm = MgmtBridgeForm({'mgmtBridge-name':mgmt.getName(), 'mgmtBridge-mac':mgmt.getMacStr()}, prefix ="mgmtBridge")
else:
mgmtIfaceForm = MgmtBridgeForm(prefix ="mgmtBridge")
data = server.getNetworkInterfaces().filter(isMgmt = False)
if data:
IfaceFormSetClass = modelformset_factory(NetworkInterface,extra = 0)
ifaceformset = IfaceFormSetClass(queryset= data)
else:
mgmtIfaceForm = MgmtBridgeForm(prefix ="mgmtBridge")
ifaceformset = IfaceFormSetClass(queryset= NetworkInterface.objects.none())
elif request.method == "POST":
#serverForm = serverFormClass(request.POST, instance=server)
serverForm = ServerForm(request.POST, instance=server, prefix ="server")
ifaceformset = IfaceFormSetClass(request.POST)
mgmtIfaceForm = MgmtBridgeForm(request.POST, prefix ="mgmtBridge")
if serverForm.is_valid() and ifaceformset.is_valid() and mgmtIfaceForm.is_valid():
ifaces = ifaceformset.save(commit = False)
if server == None:
server = serverForm.save(commit = False)
try:
server = VTDriver.crudServerFromInstance(server)
VTDriver.setMgmtBridge(request, server)
VTDriver.crudDataBridgeFromInstance(server, ifaces,request.POST.getlist("DELETE"))
except Exception as e:
print e
e = HttpUtils.processException(e)
context = {"exception":e, "serverForm": serverForm, 'vmProjects': vmProjects, 'vmSlices': vmSlices,'ifaceformset' : ifaceformset, 'mgmtIfaceForm' : mgmtIfaceForm}
if server_id != None: context["server"] = server
return simple.direct_to_template(
request,
template="servers/servers_crud.html",
extra_context=context,
)
# Returns to server's admin page and rollback transactions
return HttpResponseRedirect('/servers/admin/')
else:
return HttpResponseNotAllowed("GET", "POST")
context = {"serverForm": serverForm, 'vmProjects': vmProjects, 'vmSlices': vmSlices,'ifaceformset' : ifaceformset, 'mgmtIfaceForm' : mgmtIfaceForm}
if server_id != None: context["server"] = server
return simple.direct_to_template(
request,
template="servers/servers_crud.html",
extra_context=context,
)
def admin_servers(request):
if (not request.user.is_superuser):
return simple.direct_to_template(request,
template = 'not_admin.html',
extra_context = {'user':request.user},
)
servers = VTDriver.getAllServers()
return simple.direct_to_template(
request, template="servers/admin_servers.html",
extra_context={"servers_ids": servers})
def delete_server(request, server_id):
if (not request.user.is_superuser):
return simple.direct_to_template(request,
template = 'not_admin.html',
extra_context = {'user':request.user},
)
if request.method == 'POST':
try:
VTDriver.deleteServer(VTDriver.getServerById(server_id))
return HttpResponseRedirect(reverse('dashboard'))
except Exception as e:
logging.error(e)
e = HttpUtils.processException(e)
return simple.direct_to_template(request,
template = 'servers/delete_server.html',
extra_context = {'user':request.user, 'exception':e, 'next':reverse("admin_servers")},
)
elif request.method == 'GET':
return simple.direct_to_template(request,
template = 'servers/delete_server.html',
extra_context = {'user':request.user, 'next':reverse("admin_servers"),'object':VTDriver.getServerById(server_id)},
)
def action_vm(request, server_id, vm_id, action):
if (not request.user.is_superuser):
return simple.direct_to_template(request,
template = 'not_admin.html',
extra_context = {'user':request.user},
)
if(action == 'list'):
return simple.direct_to_template(
request, template="servers/server_vm_details.html",
extra_context={"vm": VTDriver.getVMbyId(vm_id), "server_id":server_id}
)
elif(action == 'check_status'):
#XXX: Do this function if needed
return simple.direct_to_template(
request, template="servers/list_vm.html",
extra_context={"vm": VM.objects.get(id = vm_id)}
)
elif(action == 'force_update_server'):
InformationDispatcher.forceListActiveVMs(serverID=server_id)
elif(action == 'force_update_vm'):
InformationDispatcher.forceListActiveVMs(vmID=vm_id)
else:
#XXX: serverUUID should be passed in a different way
VTDriver.PropagateActionToProvisioningDispatcher(vm_id, VTServer.objects.get(id=server_id).uuid, action)
#return HttpResponseRedirect(reverse('edit_server', args = [server_id]))
return HttpResponse("")
def subscribeEthernetRanges(request, server_id):
if (not request.user.is_superuser):
return simple.direct_to_template(request,
template = 'not_admin.html',
extra_context = {'user':request.user},
)
macRanges = MacRange.objects.all()
if server_id != None:
server = get_object_or_404(VTServer, pk=server_id)
else:
raise Exception ("NO SERVER")
if request.method == "GET":
return simple.direct_to_template(request,
template = 'servers/servers_subscribeEthernetRanges.html',
extra_context = {'server': server, 'macRanges':macRanges},
)
elif request.method=='POST':
VTDriver.manageEthernetRanges(request,server,macRanges)
return HttpResponseRedirect(reverse('edit_server', args = [server_id]))
else:
return HttpResponseNotAllowed("GET", "POST")
def subscribeIp4Ranges(request, server_id):
if (not request.user.is_superuser):
return simple.direct_to_template(request,
template = 'not_admin.html',
extra_context = {'user':request.user},
)
ipRanges = Ip4Range.objects.all()
if server_id != None:
server = get_object_or_404(VTServer, pk=server_id)
else:
raise Exception ("NO SERVER")
if request.method == "GET":
return simple.direct_to_template(request,
template = 'servers/servers_subscribeIp4Ranges.html',
extra_context = {'server': server, 'ipRanges':ipRanges},
)
elif request.method=='POST':
VTDriver.manageIp4Ranges(request,server,ipRanges)
return HttpResponseRedirect(reverse('edit_server', args = [server_id]))
else:
return HttpResponseNotAllowed("GET", "POST")
def list_vms(request, server_id):
if (not request.user.is_superuser):
return simple.direct_to_template(request,
template = 'not_admin.html',
extra_context = {'user':request.user},
)
vmProjects = {}
vmSlices = {}
try:
for vm in VTDriver.getVMsInServer(VTDriver.getServerById(server_id)):
if vm.projectName not in vmProjects:
vmProjects[vm.projectName] = vm.projectId
if vm.sliceName not in vmSlices:
vmSlices[vm.sliceName] = vm.sliceId
except Exception as e:
print e
pass
server = get_object_or_404(VTServer, pk=server_id)
context = { 'vmProjects': vmProjects, 'vmSlices': vmSlices,'server':server}
return simple.direct_to_template(
request,
template="servers/servers_list_vms.html",
extra_context=context,
)
'''
Networking point of entry
'''
from vt_manager_kvm.controller.networking.EthernetController import EthernetController
from vt_manager_kvm.controller.networking.Ip4Controller import Ip4Controller
from vt_manager_kvm.models.MacRange import MacRange
NETWORKING_ACTION_ADD="add"
NETWORKING_ACTION_EDIT="edit"
NETWORKING_ACTION_DELETE="delete"
NETWORKING_ACTION_SHOW="show"
NETWORKING_ACTION_ADDEXCLUDED="addExcluded"
NETWORKING_ACTION_REMOVEXCLUDED="removeExcluded"
NETWORKING_POSSIBLE_ACTIONS=(NETWORKING_ACTION_ADD,NETWORKING_ACTION_DELETE,NETWORKING_ACTION_EDIT,NETWORKING_ACTION_SHOW,NETWORKING_ACTION_ADDEXCLUDED,NETWORKING_ACTION_REMOVEXCLUDED,None)
def networkingDashboard(request):#,rangeId):
extra_context = {"section": "networking","subsection":"None"}
extra_context["macRanges"] = EthernetController.listRanges()
extra_context["MacRange"] = MacRange
extra_context["ip4Ranges"] = Ip4Controller.listRanges()
extra_context["Ip4Range"] = Ip4Range
template = "networking/index.html"
return simple.direct_to_template(
request,
extra_context=extra_context,
template=template,
)
def manageIp4(request,rangeId=None,action=None,ip4Id=None):
if not action in NETWORKING_POSSIBLE_ACTIONS:
raise Exception("Unknown action")
#Define context
extra_context = {"section": "networking","subsection":"ip4"+str(action),}
#Add process
if (action == NETWORKING_ACTION_ADD):
if request.method == "GET":
#Show form
extra_context["form"] = HttpUtils.getFormFromModel(Ip4Range)
return simple.direct_to_template(
request,
extra_context = extra_context,
template="networking/ip4/rangeCrud.html",
)
return
# return HttpResponseRedirect("/networking/ip4/")
elif request.method == "POST":
try:
instance = HttpUtils.getInstanceFromForm(request,Ip4Range)
#Create Range
Ip4Controller.createRange(instance)
return HttpResponseRedirect("/networking/ip4/")
except Exception as e:
print e
extra_context["form"] = HttpUtils.processExceptionForm(e,request,Ip4Range)
#Process creation query
#return HttpResponseRedirect("/networking/ip4/")
return simple.direct_to_template(
request,
extra_context = extra_context,
template="networking/ip4/rangeCrud.html",
)
#Show
if ((action == None) or (action==NETWORKING_ACTION_SHOW)) and (not rangeId==None):
instance = Ip4Controller.getRange(rangeId)
extra_context["range"] = instance
#return HttpResponseRedirect("/networking/ip4/")
return simple.direct_to_template(
request,
extra_context = extra_context,
template="networking/ip4/rangeDetail.html",
)
#Edit
#TODO
#Add excluded Ip
if (action == NETWORKING_ACTION_ADDEXCLUDED) and (request.method == "POST"):
if not request.method == "POST":
raise Exception("Invalid method")
try:
instance = Ip4Controller.getRange(rangeId)
extra_context["range"] = instance
#Create excluded
Ip4Controller.addExcludedIp4(instance,request)
return HttpResponseRedirect("/networking/ip4/"+rangeId)
except Exception as e:
print e
extra_context["errors"] = HttpUtils.processException(e)
pass
return simple.direct_to_template(
request,
extra_context = extra_context,
template="networking/ip4/rangeDetail.html",
)
#Release excluded Ip
if (action == NETWORKING_ACTION_REMOVEXCLUDED) and (request.method == "POST"):
try:
instance = Ip4Controller.getRange(rangeId)
#Create excluded
Ip4Controller.removeExcludedIp4(instance,ip4Id)
#FIXME: Why initial instance is not refreshed?
instance = Ip4Controller.getRange(rangeId)
extra_context["range"] = instance
return HttpResponseRedirect("/networking/ip4/"+rangeId)
except Exception as e:
print e
extra_context["errors"] = HttpUtils.processException(e)
pass
return simple.direct_to_template(
request,
extra_context = extra_context,
template="networking/ip4/rangeDetail.html",
)
#Delete
if (action == NETWORKING_ACTION_DELETE) and (request.method == "POST"):
try:
Ip4Controller.deleteRange(rangeId)
return HttpResponseRedirect("/networking/ip4/")
except Exception as e:
print e
extra_context["errors"] = HttpUtils.processException(e)
pass
extra_context["ranges"] = Ip4Controller.listRanges()
template = "networking/ip4/index.html"
return simple.direct_to_template(
request,
extra_context = extra_context,
template=template,
)
def manageEthernet(request,rangeId=None,action=None,macId=None):
if not action in NETWORKING_POSSIBLE_ACTIONS:
raise Exception("Unknown action")
#Define context
extra_context = {"section": "networking","subsection":"ethernet",}
#Add process
if (action == NETWORKING_ACTION_ADD):
if request.method == "GET":
#Show form
extra_context["form"] = HttpUtils.getFormFromModel(MacRange)
return simple.direct_to_template(
request,
extra_context = extra_context,
template="networking/ethernet/rangeCrud.html",
)
return
elif request.method == "POST":
try:
instance = HttpUtils.getInstanceFromForm(request,MacRange)
#Create Range
EthernetController.createRange(instance)
return HttpResponseRedirect("/networking/ethernet/")
except Exception as e:
print e
extra_context["form"] = HttpUtils.processExceptionForm(e,request,MacRange)
#Process creation query
return simple.direct_to_template(
request,
extra_context = extra_context,
template="networking/ethernet/rangeCrud.html",
)
#Show
if ((action == None) or (action==NETWORKING_ACTION_SHOW)) and (not rangeId==None):
instance = EthernetController.getRange(rangeId)
extra_context["range"] = instance
#return HttpResponseRedirect("/networking/ethernet/")
return simple.direct_to_template(
request,
extra_context = extra_context,
template="networking/ethernet/rangeDetail.html",
)
#Edit
#TODO
#Add excluded Mac
if (action == NETWORKING_ACTION_ADDEXCLUDED) and (request.method == "POST"):
if not request.method == "POST":
raise Exception("Invalid method")
try:
instance = EthernetController.getRange(rangeId)
extra_context["range"] = instance
#Create excluded
EthernetController.addExcludedMac(instance,request)
return HttpResponseRedirect("/networking/ethernet/"+rangeId)
except Exception as e:
print e
extra_context["errors"] = HttpUtils.processException(e)
pass
return simple.direct_to_template(
request,
extra_context = extra_context,
template="networking/ethernet/rangeDetail.html",
)
#Release excluded Mac
if (action == NETWORKING_ACTION_REMOVEXCLUDED) and (request.method == "POST"):
try:
instance = EthernetController.getRange(rangeId)
#Create excluded
#FIXME: Why initial instance is not refreshed?
EthernetController.removeExcludedMac(instance,macId)
instance = EthernetController.getRange(rangeId)
extra_context["range"] = instance
return HttpResponseRedirect("/networking/ethernet/"+rangeId)
except Exception as e:
print e
extra_context["errors"] = HttpUtils.processException(e)
pass
return simple.direct_to_template(
request,
extra_context = extra_context,
template="networking/ethernet/rangeDetail.html",
)
#Delete
if (action == NETWORKING_ACTION_DELETE) and (request.method == "POST"):
try:
EthernetController.deleteRange(rangeId)
return HttpResponseRedirect("/networking/ethernet/")
except Exception as e:
print e
extra_context["errors"] = HttpUtils.processException(e)
pass
#Listing ranges
extra_context["ranges"] = EthernetController.listRanges()
return simple.direct_to_template(
request,
extra_context = extra_context,
template = "networking/ethernet/index.html",
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.