gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
#!/usr/bin/python
"""
Usage: ./pdbqt.py <old.pdbqt> <old.mol2> <new.mol2>
"""
def randString():
from random import choice
import string
chars = string.letters + string.digits
newstring = ''
for i in range(8):
newstring += choice(chars)
return newstring
class pdbqt2mol2:
"""
Loads values from PDBQT file into a mol2 file and gives new mol2 file.
"""
def __init__(self,oldPDBQTfilename,oldmol2filename,newmol2filename):
import os,tempfile,sys,shutil
tempmol2filename = '/tmp/pdbqt2mol2_'+randString()
PDBQTlines = open(oldPDBQTfilename).readlines()
atomlocations = {} # new atom coordinates
remapatoms = {} # new atom number, for bond remap
adscore = ''
for line in PDBQTlines:
if line.startswith('REMARK VINA RESULT:'):
adscore,rmsd1,rmsd2 = line.split(':')[1].split()[:3]
if line.find('ATOM') == 0:
atomname = line[11:16].split()[0]
xcoord = line[30:38].split()[0]
ycoord = line[38:46].split()[0]
zcoord = line[46:54].split()[0]
atomlocations[atomname] = [xcoord, ycoord, zcoord]
oldmol2 = open(oldmol2filename)
newmol2 = open(tempmol2filename, 'w')
atomnum = 0
bondnum = 0
counter = 0
start_mol = False
while 1:
if counter == 0 and adscore != '':
print >> newmol2,'### USER Estimated Free Energy of Binding = %s kcal/mol'%(adscore,)
counter += 1
line = oldmol2.readline()
if line.startswith('@<TRIPOS>MOLECULE'): start_mol = True
if start_mol:
newmol2.write(line)
else:
continue
if line.startswith('@<TRIPOS>ATOM'): break
oldFilePos = 'New'
newFilePos = oldmol2.tell()
while oldFilePos != newFilePos:
line = oldmol2.readline()
oldFilePos = newFilePos
newFilePos = oldmol2.tell()
if oldFilePos == newFilePos: break
if line.find('<TRIPOS>BOND') != -1:
newmol2.write(line)
break
contents = line.split()
# common @<TRIPOS>ATOM line:
# 1 S1 6.5100 -2.3824 0.4691 S.o2 1 LIG 0.8327
# []0 1 2 3 4 5 6 7 8
try:
newcoords = atomlocations[contents[1]]
atomnum += 1
remapatoms[contents[0]] = str(atomnum)
modline = ' ' + ' '.join([str(atomnum), contents[1],
newcoords[0], newcoords[1],
newcoords[2], contents[5],
contents[6], contents[7],
contents[8]]) + '\n'
llist = modline.split()
nxx = float(llist[2])
nyy = float(llist[3])
nzz = float(llist[4])
achg = float(llist[8])
newmol2.write("%7s %-8s%10.4f%10.4f%10.4f %-8s%3s %-8s%10.4f\n"%(llist[0],llist[1],nxx,nyy,nzz,llist[5],llist[6],llist[7],achg))
except KeyError:
continue
while oldFilePos != newFilePos:
line = oldmol2.readline()
oldFilePos = newFilePos
newFilePos = oldmol2.tell()
if oldFilePos == newFilePos: break
if line.find('<TRIPOS>SUBSTRUCTURE') != -1:
newmol2.write(line)
break
try:
bondfirst = line.split()[1]
bondsecond = line.split()[2]
except IndexError:
newmol2.write(line)
continue
# common @<TRIPOS>BOND line:
# 5 4 6 ar
# []0 1 2 3
try:
newfirst = remapatoms[bondfirst]
newsecond = remapatoms[bondsecond]
except KeyError:
continue
bondnum += 1
modline = ' ' + ' '.join([str(bondnum), newfirst, newsecond,
line.split()[3]]) + '\n'
llist = modline.split()
newmol2.write(" %5s%5s%5s %-5s\n"%(llist[0],llist[1],llist[2],llist[3]))
while oldFilePos != newFilePos:
line = oldmol2.readline()
oldFilePos = newFilePos
newFilePos = oldmol2.tell()
if oldFilePos == newFilePos: break
newmol2.write(line)
oldmol2.close()
newmol2.flush()
newmol2.close()
# Open it right back up -- fix the header
oldmol2 = open(tempmol2filename)
oldFilePos = 'New'
newFilePos = oldmol2.tell()
newmol2 = open(newmol2filename, 'w')
while oldFilePos != newFilePos:
line = oldmol2.readline()
oldFilePos = newFilePos
newFilePos = oldmol2.tell()
# common header lines:
# @<TRIPOS>MOLECULE
# 10113978a
# 55 58 1
# SMALL
# USER_CHARGES
if line.find('<TRIPOS>MOLECULE') != -1:
newmol2.write(line)
newmol2.write(oldmol2.readline())
line = oldmol2.readline()
try:
tp1,tp2,tp3 = line.split()[2:5]
tp1 = int(tp1)
tp2 = int(tp2)
tp3 = int(tp3)
except:
tp1,tp2,tp3 = 1,0,0
newmol2.write('%5d%6d%6d%6d%6d\n'%(atomnum,bondnum,tp1,tp2,tp3,))
break
newmol2.write(line)
while oldFilePos != newFilePos:
line = oldmol2.readline()
oldFilePos = newFilePos
newFilePos = oldmol2.tell()
newmol2.write(line)
oldmol2.close()
newmol2.flush()
newmol2.close()
os.unlink(tempmol2filename)
def splitpdbqt(pdbqt):
f = open(pdbqt,'r')
fout = open(pdbqt + '-new','w')
f.next()
for line in f:
if line.startswith('ENDMDL'):
break
fout.write(line)
f.close()
fout.close()
return pdbqt + '-new'
if __name__ == '__main__':
import os,sys
if len(sys.argv) != 4:
print>>sys.stderr, __doc__
raise SystemExit
oldPDBQT = sys.argv[1]
oldMOL2 = sys.argv[2]
newMOL2 = sys.argv[3]
oldPDBQT = splitpdbqt(oldPDBQT)
a = pdbqt2mol2(oldPDBQT,oldMOL2,newMOL2)
|
|
"""An implementation of a neural network without classes (just a module)
"""
import numpy
import scipy.optimize
import itertools
def create_training_dict(X, y):
"""Take a set of input features and their labels and package them
along with some useful quantities into a dictionary. This could
be a training, validation, or test set.
Args:
X (numpy.ndarray): 2-D array of feature vectors (1 per row)
y (numpy.ndarray): labels for each feature vector
Returns:
A dictionary containing ...
X (numpy.ndarray): 2-D array of feature vectors (1 per row)
y (numpy.ndarray): labels for each feature vector
m (int): number of feature vectors (i.e. training examples)
n (int): number of features per vector
n_cat (int): number of categories (i.e. unique values in y)
y1hot (numpy.ndarray) 2-D array of one-hot vectors (1 per row)
for example if n_cat = 5, the label 3 -> [0, 0, 0, 1, 0]
"""
m, n = X.shape
n_cat = len(numpy.unique(y))
y1hot = numpy.identity(n_cat)[y]
Xmean = X.mean()
Xstd = X.std()
Xnorm = (X - Xmean) / Xstd
return {'Xnorm': Xnorm, 'Xmean': Xmean, 'Xstd': Xstd, 'y': y, 'm': m,
'n': n, 'n_cat': n_cat, 'y1hot': y1hot}
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2,s3), ..."
a, b = itertools.tee(iterable)
next(b, None)
return itertools.izip(a, b)
def sigmoid(z):
"""Return element-wise sigmoid
Args:
z (numpy.ndarray): argument for sigmoid function
Returns:
g (numpy.ndarray): sigmoid function evaluated element-wise
"""
return 1.0 / (1.0 + numpy.exp(-z))
def sigmoid_gradient(z):
"""Return element-wise sigmoid gradient evaluated at z
Args:
z (numpy.ndarray): argument for sigmoid function
Returns:
g (numpy.ndarray): sigmoid function evaluated element-wise
"""
return sigmoid(z) * (1.0 - sigmoid(z))
def flatten_arrays(arrays):
"""Turn a list of 2-D arrays into a single 1-D array.
Args:
arrays (``list`` of numpy.ndarray): a list of 2-D arrays
Returns:
(numpy.ndarray): a flattened 1-D array
"""
return numpy.concatenate([a.flatten() for a in arrays])
def unflatten_array(flat_array, array_shapes):
"""Turn a single 1-D array into a list of 2-D arrays.
Args:
flat_array (numpy.ndarray): a flattened 1-D array
array_shapes (``list`` of ``tuple``): 2-D array shapes
Returns:
arrays (``list`` of numpy.ndarray): a list of 2-D arrays
"""
i = 0
weight_arrays = []
for shape in array_shapes:
j = i + shape[0] * shape[1]
weight_arrays.append(flat_array[i:j].reshape(shape))
i = j
return weight_arrays
def initialize_random_weights(layer_sizes):
"""Initialize weight arrays to random values. We use the normalized
initialization of Glorot and Bengio (2010).
https://scholar.google.com/scholar?cluster=17889055433985220047&hl=en&as_sdt=0,22
"""
weights = []
for si, sj in pairwise(layer_sizes):
b = numpy.sqrt(6.0 / (si + sj))
weights.append(
numpy.random.uniform(low=-b, high=b, size=(sj, si+1))
)
return weights
def minimize(initial_weights, X, y1hot, lam=0.0, method='TNC', jac=True,
tol=1.0e-3, options={'disp': True, 'maxiter': 2000}):
"""Calculate values of weights that minimize the cost function.
Args:
initial_weights (``list`` of numpy.ndarray): weights between each layer
X (numpy.ndarray): 2-D array of feature vectors (1 per row)
y1hot (numpy.ndarray): 2-D array of one-hot vectors (1 per row)
lam (``float``): regularization parameter
method (``str``): minimization method (see scipy.optimize.minimize docs)
jac (``bool`` or ``callable``): gradient provided? (see
scipy.optimize.minimize docs)
tol (``float``): stopping criterion (see scipy.optimize.minimize docs)
options (``dict``): method specific (see scipy.optimize.minimize docs)
Returns:
res (``OptimizeResult``): (see scipy.optimize.minimize docs)
"""
weight_shapes = [w.shape for w in initial_weights]
flat_weights = flatten_arrays(initial_weights)
res = scipy.optimize.minimize(
compute_cost_and_grad,
flat_weights,
args=(X, y1hot, weight_shapes, lam),
method=method,
jac=jac,
tol=tol,
options=options,
)
return res
def compute_cost_and_grad(
weights_flat, X, y1hot, weight_shapes, lam=0.0, cost_only=False):
"""Calculate cost function and its gradient with respect to weights.
Args:
weights_flat (numpy.ndarray): a flattened 1-D weight array
X (numpy.ndarray): 2-D array of feature vectors (1 per row)
y1hot (numpy.ndarray) 2-D array of one-hot vectors (1 per row)
weight_shapes (``list`` of ``tuple``): 2-D array shapes
lam (``float``): regularization parameter
cost_only (``boolean``): if True return cost without gradient
Returns:
J (``float``): Cost with current weights
weights_grad_flat (numpy.ndarray): d_J/d_weight
"""
# package flat weights into a list of arrays
m = X.shape[0]
weights = unflatten_array(weights_flat, weight_shapes)
# feed forward
aa, zz = feed_forward(X, weights)
# calculate raw cost
h = aa[-1]
J = -(
numpy.sum(y1hot * numpy.log(h)) +
numpy.sum((1.0 - y1hot) * numpy.log(1.0 - h))
) / m
# add regularization
for weight in weights:
J += lam * numpy.sum(weight[:, 1:] * weight[:, 1:]) * 0.5 / m
if cost_only:
return J
# gradient - back prop
weights_grad_flat = flatten_arrays(
back_propogation(weights, aa, zz, y1hot, lam=lam))
return J, weights_grad_flat
def feed_forward(X, weights):
"""Perform a feed forward step. Note that the z variables will
not have the bias columns included and that all but the final a
variables will have the bias column included.
Args:
X (numpy.ndarray): 2-D array of feature vectors (1 per row)
weights (``list`` of numpy.ndarray): weights between each layer
Returns:
aa (``list`` of numpy.ndarray): activation of nodes for
each layer. The last item in the list is the hypothesis.
zz (``list`` of numpy.ndarray): input into nodes for each layer.
"""
aa = []
zz = []
zz.append(None) # this is z1 (i.e. there is no z1)
ai = X.copy()
ai = numpy.c_[numpy.ones(ai.shape[0]), ai] # a1 is X + bias nodes
aa.append(ai)
for weight in weights:
zi = ai.dot(weight.T)
zz.append(zi)
ai = sigmoid(zi)
ai = numpy.c_[numpy.ones(ai.shape[0]), ai] # add bias column
aa.append(ai)
# remove bias column from last aa layer
aa[-1] = aa[-1][:, 1:]
return aa, zz
def back_propogation(weights, aa, zz, y1hot, lam=0.0):
"""Perform a back propogation step
Args:
weights (``list`` of numpy.ndarray): weights between each layer
aa (``list`` of numpy.ndarray): activation of nodes for
each layer. The last item in the list is the hypothesis.
zz (``list`` of numpy.ndarray): input into nodes for each layer.
y1hot (numpy.ndarray) 2-D array of one-hot vectors (1 per row)
lam (``float``): regularization parameter
Returns:
weights_grad (``list`` of numpy.ndarray): d_J/d_weight
"""
weights_grad = []
m = y1hot.shape[0]
n_layers = len(weights) + 1
di_plus_1 = aa[-1] - y1hot
i = n_layers - 2
while i > 0:
ones_col = numpy.ones(zz[i].shape[0])
di = (
di_plus_1.dot(weights[i]) *
sigmoid_gradient(numpy.c_[ones_col, zz[i]])
)
di = di[:, 1:]
weights_grad.append(di_plus_1.T.dot(aa[i]))
i -= 1
di_plus_1 = di.copy()
weights_grad.append(di.T.dot(aa[0]))
# we built it backwards
weights_grad.reverse()
# normalize by m
weights_grad = [wg/m for wg in weights_grad]
# add regularization (skip first columns)
for i in range(n_layers-1):
weights_grad[i][:, 1:] += lam/m * weights[i][:, 1:]
return weights_grad
|
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script translates reviews on ReviewBoard into commits
on the current branch.
"""
import argparse
import atexit
import json
import linecache
import os
import platform
import re
import ssl
import subprocess
import sys
import urllib2
REVIEWBOARD_REVIEW_URL = 'https://reviews.apache.org/r'
REVIEWBOARD_API_URL =\
'https://reviews.apache.org/api/review-requests'
REVIEWBOARD_USER_URL = 'https://reviews.apache.org/api/users'
GITHUB_URL = 'https://api.github.com/repos/apache/mesos/pulls'
GITHUB_PATCH_URL =\
'https://patch-diff.githubusercontent.com/raw/apache/mesos/pull'
def review_api_url(review_id):
"""Returns a Review Board API URL given a review ID."""
# Reviewboard REST API expects '/' at the end of the URL.
return '{base}/{review}/'.format(
base=REVIEWBOARD_API_URL,
review=review_id)
def review_url(review_id):
"""Returns a Review Board UI URL given a review ID."""
return '{base}/{review}/'.format(
base=REVIEWBOARD_REVIEW_URL,
review=review_id)
def pull_request_url(pull_request_number):
"""Returns a GitHub pull request URL given a PR number."""
return '{base}/{pr}'.format(
base=GITHUB_URL,
pr=pull_request_number)
def reviewboard_user_url(username):
"""Returns a Review Board URL for a user given a username."""
# Reviewboard REST API expects '/' at the end of the URL.
return '{base}/{user}/'.format(
base=REVIEWBOARD_USER_URL,
user=username)
def patch_url(options):
"""Returns a Review Board or a GitHub URL for a patch."""
if options['review_id']:
# Reviewboard REST API expects '/' at the end of the URL.
return '{base}/{review}/diff/raw/'.format(
base=REVIEWBOARD_REVIEW_URL,
review=options['review_id'])
elif options['github']:
return '{base}/{patch}.patch'.format(
base=GITHUB_PATCH_URL,
patch=options['github'])
return None
def url_to_json(url):
"""Performs HTTP request and returns JSON-ified response."""
json_str = urllib2.urlopen(url)
return json.loads(json_str.read())
def extract_review_id(url):
"""Extracts review ID from Review Board URL."""
review_id = re.search(REVIEWBOARD_API_URL + r'/(\d+)/', url)
if review_id:
return review_id.group(1)
def review_chain(review_id):
"""Returns a parent review chain for a given review ID."""
json_obj = url_to_json(review_api_url(review_id))
# Stop as soon as we stumble upon a submitted request.
status = json_obj.get('review_request').get('status')
if status == "submitted":
return []
# Verify that the review has exactly one parent.
parent = json_obj.get('review_request').get('depends_on')
if len(parent) > 1:
sys.stderr.write('Error: Review {review} has more than'
' one parent'.format(review=review_id))
sys.exit(1)
elif len(parent) == 0:
return [(review_id, json_obj.get('review_request').get('summary'))]
else:
# The review has exactly one parent.
review_list = review_chain(extract_review_id(parent[0].get('href')))
review = (review_id, json_obj.get('review_request').get('summary'))
if review not in review_list:
return review_list + [review]
else:
sys.stderr.write('Found a circular dependency in the chain starting'
' at {review}\n'.format(review=review_id))
sys.exit(1)
def shell(command, dry_run):
"""
Runs a command in a shell, unless the dry-run option
is set (in which case it just prints the command).
"""
if dry_run:
print command
return
error_code = subprocess.call(command, stderr=subprocess.STDOUT, shell=True)
if error_code != 0:
sys.exit(error_code)
def apply_review(options):
"""Applies a review with a given ID locally."""
# Make sure we don't leave the patch behind in case of failure.
# We store the patch ID in a local variable to ensure the lambda
# captures the current patch ID.
patch_file = '%s.patch' % patch_id(options)
atexit.register(
lambda: os.path.exists(patch_file) and os.remove(patch_file))
fetch_patch(options)
apply_patch(options)
commit_patch(options)
def ssl_create_default_context():
"""
Equivalent to `ssl.create_default_context` with default arguments and
certificate/hostname verification disabled.
See: https://github.com/python/cpython/blob/2.7/Lib/ssl.py#L410
"""
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
# SSLv2 considered harmful.
context.options |= ssl.OP_NO_SSLv2
# SSLv3 has problematic security and is only required for really old
# clients such as IE6 on Windows XP.
context.options |= ssl.OP_NO_SSLv3
# Disable compression to prevent CRIME attacks (OpenSSL 1.0+).
context.options |= getattr(ssl, "OP_NO_COMPRESSION", 0)
# Disable certificate and hostname verification.
context.verify_mode = ssl.CERT_NONE
context.check_hostname = False
def fetch_patch(options):
"""Fetches a patch from Review Board or GitHub."""
if platform.system() == 'Windows':
response = urllib2.urlopen(
patch_url(options),
context=ssl_create_default_context())
with open('%s.patch' % patch_id(options), 'wb') as patch:
patch.write(response.read())
else:
# NOTE: SSL contexts are only supported in Python 2.7.9+. The version
# of Python running on the non-Windows ASF CI machines is sometimes
# older. Hence, we fall back to `wget` on non-Windows machines.
cmd = ' '.join([
'wget',
'--no-check-certificate',
'--no-verbose',
'-O '
'{review_id}.patch',
'{url}']).format(
review_id=patch_id(options),
url=patch_url(options))
# In case of github we always need to fetch the patch to extract username
# and email, so we ignore the dry_run option by setting the second parameter
# to False.
if options['github']:
shell(cmd, False)
else:
shell(cmd, options['dry_run'])
def patch_id(options):
"""Returns the review ID or the GitHub pull request number."""
return options['review_id'] or options['github']
def apply_patch(options):
"""Applies patch locally."""
cmd = 'git apply --index {review_id}.patch'.format(
review_id=patch_id(options))
if options['3way']:
cmd += ' --3way'
if platform.system() == 'Windows':
# NOTE: Depending on the Git settings, there may or may not be
# carriage returns in files and in the downloaded patch.
# We ignore these errors on Windows.
cmd += ' --ignore-whitespace'
shell(cmd, options['dry_run'])
def quote(string):
"""Quote a variable so it can be safely used in shell."""
return string.replace("'", "'\\''")
def commit_patch(options):
"""Commits patch locally."""
data = patch_data(options)
# Check whether we need to amend the commit message.
if options['no_amend']:
amend = ''
else:
amend = '-e'
# NOTE: Windows does not support multi-line commit messages via the shell.
message_file = '%s.message' % patch_id(options)
atexit.register(
lambda: os.path.exists(message_file) and os.remove(message_file))
with open(message_file, 'w') as message:
message.write(data['message'])
cmd = u'git commit --author \"{author}\" {_amend} -aF \"{message}\"'.format(
author=quote(data['author']),
_amend=amend,
message=message_file)
shell(cmd, options['dry_run'])
def patch_data(options):
"""
Populates and returns a dictionary with data necessary for
committing the patch (such as the message, the author, etc.).
"""
if options['review_id']:
return reviewboard_data(options)
elif options['github']:
return github_data(options)
else:
return None
def get_author(patch):
"""Reads the author name and email from the .patch file"""
author = linecache.getline(patch, 2)
return author.replace('From: ', '').rstrip()
def github_data(options):
"""Fetches pull request data and populates internal data structure."""
pull_request_number = options['github']
pull_request = url_to_json(pull_request_url(pull_request_number))
title = pull_request.get('title')
description = pull_request.get('body')
url = '{url}/{pr}'.format(url=GITHUB_URL, pr=pull_request_number)
author = get_author('{pr}.patch'.format(pr=pull_request_number))
message = '\n\n'.join([
title,
description,
'This closes #{pr}'.format(pr=pull_request_number)])
review_data = {
"summary": title,
"description": description,
"url": url,
"author": author,
"message": message
}
return review_data
def reviewboard_data(options):
"""Fetches review data and populates internal data structure."""
review_id = options['review_id']
# Populate review object.
review = url_to_json(review_api_url(review_id)).get('review_request')
url = review_url(review_id)
# Populate user object.
user = url_to_json(reviewboard_user_url(
review.get('links').get('submitter').get('title'))).get('user')
author = u'{author} <{email}>'.format(
author=user.get('fullname'),
email=user.get('email'))
message = '\n\n'.join([
review.get('summary'),
review.get('description'),
'Review: {review_url}'.format(review_url=url)])
review_data = {
"summary": review.get('summary'),
"description": review.get('description'),
"url": url,
"author": author,
"message": message
}
return review_data
def parse_options():
"""Parses command line options and returns an option dictionary."""
options = {}
parser = argparse.ArgumentParser(
description='Recursively apply Review Board reviews'
' and GitHub pull requests.')
parser.add_argument('-d', '--dry-run',
action='store_true',
help='Perform a dry run.')
parser.add_argument('-n', '--no-amend',
action='store_true',
help='Do not amend commit message.')
parser.add_argument('-c', '--chain',
action='store_true',
help='Recursively apply parent review chain.')
parser.add_argument('-3', '--3way',
dest='three_way',
action='store_true',
help='Use 3 way merge in git apply.')
# Add -g and -r and make them mutually exclusive.
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-g', '--github',
metavar='PULL_REQUEST',
help='Pull request number')
group.add_argument('-r', '--review-id',
metavar='REVIEW_ID',
help='Numeric review ID')
args = parser.parse_args()
options['review_id'] = args.review_id
options['dry_run'] = args.dry_run
options['no_amend'] = args.no_amend
options['github'] = args.github
options['chain'] = args.chain
options['3way'] = args.three_way
return options
def reviewboard(options):
"""Applies either a chain of reviewboard patches or a single patch."""
if options['chain']:
# Retrieve the list of reviews to apply.
applied = set()
for review_id, _ in review_chain(options['review_id']):
if review_id not in applied:
applied.add(review_id)
options['review_id'] = review_id
apply_review(options)
else:
apply_review(options)
def main():
"""
Main function to apply reviews.
"""
options = parse_options()
if options['review_id']:
reviewboard(options)
else:
apply_review(options)
if __name__ == "__main__":
main()
|
|
import humanize
import pprint
import contextlib
import inspect
import gevent
import re
from peewee import fn
from datetime import datetime, timedelta
from disco.api.http import Routes, APIException
from disco.types.message import MessageTable, MessageEmbed, MessageEmbedField, MessageEmbedThumbnail
from disco.types.user import User as DiscoUser
from disco.types.user import GameType, Status, Game
from disco.util.snowflake import to_datetime, to_snowflake
from disco.util.sanitize import S
from rowboat.plugins import BasePlugin as Plugin, CommandFail, CommandSuccess
from rowboat.redis import rdb
from rowboat.models.guild import Guild, GuildVoiceSession, GuildEmoji
from rowboat.models.user import User, Infraction
from rowboat.models.message import Message, Reaction
from rowboat.util.gevent import wait_many
from rowboat.util.stats import statsd, to_tags
from rowboat.util.images import get_dominant_colors_user, get_dominant_colors_guild
from rowboat.constants import (
GREEN_TICK_EMOJI, RED_TICK_EMOJI, ROWBOAT_GUILD_ID, ROWBOAT_USER_ROLE_ID,
ROWBOAT_CONTROL_CHANNEL, ROWBOAT_NAME, ROWBOAT_INFO, DOMAIN,
GREEN_TICK_EMOJI_ID, RED_TICK_EMOJI_ID
)
from rowboat.plugins.infractions import clamp
from rowboat.plugins.utilities import (
INFO_TIMESTAMP_MSG_SQL, INFO_INFRACTION_SQL, INFO_NOTE_SQL,
get_status_emoji
)
PY_CODE_BLOCK = u'```py\n{}\n```'
info_args = {'name': ROWBOAT_NAME, 'info': ROWBOAT_INFO}
BOT_INFO = '''
{name} {info}
'''.format(**info_args)
GUILDS_WAITING_SETUP_KEY = 'gws'
INFO_INFRACTION_STATS_SQL = """
WITH summary AS (
SELECT i.guild_id, i.type,
ROW_NUMBER() OVER(
) as rw
FROM infractions i
WHERE user_id = {author}
)
SELECT g.name, count(s.type), s.type
FROM summary s
INNER JOIN
guilds g ON
g.guild_id = s.guild_id
GROUP BY g.name, s.type
ORDER BY g.name, count DESC;
"""
STATUS_MSG = """
You forgot to define a type..
default
streaming
listening
watching
"""
class Object():
pass
class GlobalPlugin(Plugin):
global_plugin = True
def load(self, ctx):
super(GlobalPlugin, self).load(ctx)
core = self.bot.plugins['CorePlugin']
self.startup = core.startup
self.guilds = core.guilds
@Plugin.command('force setup', '<guild:snowflake>', level=-1)
def command_force_setup(self, event, guild):
e_guild = self.state.guilds.get(guild)
# Make sure we have admin perms
m = e_guild.members.select_one(id=self.state.me.id)
if not m.permissions.administrator and not global_bypass:
return event.msg.reply(':warning: bot must have the Administrator permissions')
guild = Guild.setup(e_guild)
rdb.srem(GUILDS_WAITING_SETUP_KEY, str(e_guild.id))
self.guilds[e_guild.id] = guild
event.msg.reply(':ok_hand: successfully loaded configuration')
@Plugin.command('setup')
def command_setup(self, event):
if not event.guild:
return event.msg.reply(':warning: this command can only be used in servers')
# Make sure we're not already setup
if event.guild.id in self.guilds:
return event.msg.reply(':warning: this server is already setup')
global_admin = rdb.sismember('global_admins', event.author.id)
# Override stuff :watchinu:
is_control_guild = False
global_bypass = False # We will basically use global_bypass in place of global_admin
if event.guild.id == ROWBOAT_GUILD_ID:
is_control_guild = True
# If we are in control guild and are global admin then we don't care about overrides
if global_admin and is_control_guild:
global_bypass = True
elif global_admin:
# We need to see if we have an any override inplace for us
override_key_base = 'global_admins:override:{}:'.format(event.author.id)
canoverride = rdb.get('{}{}'.format(override_key_base, 'ANY'))
if canoverride is not None:
global_bypass = True
else:
canoverride = rdb.get('{}{}'.format(override_key_base, event.guild.id))
if canoverride is not None:
global_bypass = True
# Make sure this is the owner of the server
if not global_bypass:
if not event.guild.owner_id == event.author.id:
return event.msg.reply(':warning: only the server owner can setup rowboat')
# Make sure we have admin perms
m = event.guild.members.select_one(id=self.state.me.id)
if not m.permissions.administrator and not global_bypass:
return event.msg.reply(':warning: bot must have the Administrator permissions')
guild = Guild.setup(event.guild)
rdb.srem(GUILDS_WAITING_SETUP_KEY, str(event.guild.id))
self.guilds[event.guild.id] = guild
event.msg.reply(':ok_hand: successfully loaded configuration')
@Plugin.command('about')
def command_about(self, event):
embed = MessageEmbed()
embed.set_author(name=ROWBOAT_NAME, icon_url=self.client.state.me.avatar_url, url=DOMAIN)
embed.description = BOT_INFO
embed.add_field(name='Servers', value=str(Guild.select().count()), inline=True)
embed.add_field(name='Uptime', value=humanize.naturaldelta(datetime.utcnow() - self.startup), inline=True)
event.msg.reply(embed=embed)
@Plugin.command('uptime', level=-1)
def command_uptime(self, event):
event.msg.reply('{} was started {}'.format(ROWBOAT_NAME,
humanize.naturaldelta(datetime.utcnow() - self.startup)
))
@Plugin.command('source', '<command>', level=-1)
def command_source(self, event, command=None):
for cmd in self.bot.commands:
if command.lower() in cmd.triggers:
break
else:
event.msg.reply(u"Couldn't find command for `{}`".format(S(command, escape_codeblocks=True)))
return
code = cmd.func.__code__
lines, firstlineno = inspect.getsourcelines(code)
event.msg.reply('<https://github.com/tobitenno/rowboat/blob/master/{}#L{}-{}>'.format(
code.co_filename,
firstlineno,
firstlineno + len(lines)
))
@Plugin.command('eval', level=-1)
def command_eval(self, event):
ctx = {
'bot': self.bot,
'client': self.bot.client,
'state': self.bot.client.state,
'event': event,
'msg': event.msg,
'guild': event.msg.guild,
'channel': event.msg.channel,
'author': event.msg.author
}
# Mulitline eval
src = event.codeblock
if src.count('\n'):
lines = filter(bool, src.split('\n'))
if lines[-1] and 'return' not in lines[-1]:
lines[-1] = 'return ' + lines[-1]
lines = '\n'.join(' ' + i for i in lines)
code = 'def f():\n{}\nx = f()'.format(lines)
local = {}
try:
exec compile(code, '<eval>', 'exec') in ctx, local
except Exception as e:
event.msg.reply(PY_CODE_BLOCK.format(type(e).__name__ + ': ' + str(e)))
return
result = pprint.pformat(local['x'])
else:
try:
result = str(eval(src, ctx))
except Exception as e:
event.msg.reply(PY_CODE_BLOCK.format(type(e).__name__ + ': ' + str(e)))
return
if len(result) > 1990:
event.msg.reply('', attachments=[('result.txt', result)])
else:
event.msg.reply(PY_CODE_BLOCK.format(result))
@Plugin.command('sync-bans', group='control', level=-1)
def control_sync_bans(self, event):
guilds = list(Guild.select().where(
Guild.enabled == 1
))
msg = event.msg.reply(':timer: please wait while I sync...')
for guild in guilds:
guild.sync_bans(self.client.state.guilds.get(guild.guild_id))
msg.edit('<:{}> synced {} guilds'.format(GREEN_TICK_EMOJI, len(guilds)))
@Plugin.command('reconnect', group='control', level=-1)
def control_reconnect(self, event):
event.msg.reply('Ok, closing connection')
self.client.gw.ws.close()
@Plugin.command('invite', '<guild:snowflake>', group='guilds', level=-1)
def guild_join(self, event, guild):
guild = self.state.guilds.get(guild)
if not guild:
return event.msg.reply(':no_entry_sign: invalid or unknown guild ID')
msg = event.msg.reply(u'Ok, hold on while I get you setup with an invite link to {}'.format(
guild.name,
))
general_channel = guild.channels.itervalues().next()
try:
invite = general_channel.create_invite(
max_age=300,
max_uses=1,
unique=True,
)
except:
return msg.edit(u':no_entry_sign: Hmmm, something went wrong creating an invite for {}'.format(
guild.name,
))
msg.edit(u'Ok, here is a temporary invite for you: {}'.format(
invite.code,
))
@Plugin.command('wh', '<guild:snowflake>', group='guilds', level=-1)
def guild_whitelist(self, event, guild):
rdb.sadd(GUILDS_WAITING_SETUP_KEY, str(guild))
event.msg.reply('Ok, guild {} is now in the whitelist'.format(guild))
@Plugin.command('unwh', '<guild:snowflake>', group='guilds', level=-1)
def guild_unwhitelist(self, event, guild):
rdb.srem(GUILDS_WAITING_SETUP_KEY, str(guild))
event.msg.reply('Ok, I\'ve made sure guild {} is no longer in the whitelist'.format(guild))
@Plugin.command('disable', '<plugin:str>', group='plugins', level=-1)
def plugin_disable(self, event, plugin):
plugin = self.bot.plugins.get(plugin)
if not plugin:
return event.msg.reply('Hmmm, it appears that plugin doesn\'t exist!?')
self.bot.rmv_plugin(plugin.__class__)
event.msg.reply('Ok, that plugin has been disabled and unloaded')
@Plugin.command('enable', '<plugin:str>', group='plugins', level=-1)
def plugin_enable(self, event, plugin):
p = self.bot.plugins.get(plugin)
if p:
CommandFail('{} is already loaded'.format(plugin))
try:
self.bot.add_plugin_module('rowboat.plugins.{}'.format(plugin))
except:
CommandFail('An error occured loading plugin')
# For some unknown reason I cannot get this to reply with success on loading
@Plugin.command('list', aliases=['ls'], group='plugins', level=-1)
def plugin_list(self, event):
embed = MessageEmbed()
embed.set_author(name=u'Loaded Plugins')
embed.description = '\n'.join(u'{}'.format(key) for key in self.bot.plugins)
event.msg.reply('', embed=embed)
# Thank you to Xenthys for the base of this plugin
@Plugin.command('override', '[duration:str] [inguild:str...]', level=-1)
def admin_override(self, event, duration=3600, inguild=None):
if duration.isnumeric():
duration = int(duration)
elif duration.lower() == 'rm':
items = rdb.keys('global_admins:override:{}:*'.format(event.author.id))
for i in items:
rdb.delete(i)
if len(items) > 0:
return event.msg.reply('Removed {} overrides'.format(len(items))).after(5).delete()
else:
return event.msg.reply('No overrides in place').after(5).delete()
elif duration.lower() == 'ls':
items = rdb.keys('global_admins:override:{}:*'.format(event.author.id))
if len(items) == 0:
return event.msg.reply('No overrides in place').after(5).delete()
striplen = len('global_admins:override:{}:'.format(event.author.id))
msg = ''
c = 0
for c, i in enumerate(items, 1):
if i[striplen:] != 'ANY':
guild_name = Guild.select(Guild.name).where(
Guild.guild_id == i[striplen:]
).get().name
msg += '{} - {}\n'.format(c, guild_name)
else:
msg += '{} - ANY\n'.format(c)
return event.msg.reply(msg).after(5).delete()
else:
return event.msg.reply('Invalid override option').after(5).delete()
override_guild = None
if inguild is None:
# Set to any
override_guild = 'ANY'
else:
if not inguild.lower().startswith('in'):
return event.msg.reply('Please specify a valid guild').after(5).delete()
pre_override_guild = inguild[3:].lower()
if pre_override_guild.isnumeric():
if not int(pre_override_guild) in self.guilds:
pass
else:
override_guild = pre_override_guild
elif pre_override_guild == 'any' or pre_override_guild == 'all':
# Bypass ALL guilds
override_guild = 'ANY'
if override_guild is None:
# It is probably a name so we have to search for it
guild = Guild.select(Guild.guild_id).where(
Guild.name ** '%{}%'.format(S(str(inguild[3:])))
).tuples()
if len(guild) == 0:
return event.msg.reply('`{}` is not a valid guild'.format(S(pre_override_guild))).after(5).delete()
elif len(guild) > 1:
return event.msg.reply('To many guilds found. Please try again..').after(5).delete()
for i in guild:
override_guild = i[0]
guild_name = None
if override_guild != 'ANY':
guild_name = Guild.select(Guild.name).where(
Guild.guild_id == override_guild
).get()
override_key = 'global_admins:override:{}:{}'.format(event.author.id, override_guild)
since, ttl = rdb.get(override_key), rdb.ttl(override_key)
now = datetime.utcnow()
since = datetime.strptime(since, '%Y-%m-%d %H:%M:%S.%f') if since else now
diff = now - since
if duration is None:
if not ttl:
return event.msg.reply('You are not currently overriding permissions.').after(5).delete()
return event.msg.reply('Your override has been active for {}, and will expire in {} second{}.'.format(
humanize.naturaldelta(diff), ttl, 's' if ttl != 1 else ''
)).after(5).delete()
if duration > 3600:
return event.msg.reply('Override duration cannot exceed one hour.').after(5).delete()
if duration <= 0:
if not ttl:
return event.msg.reply('You are not currently overriding permissions.').after(5).delete()
rdb.delete(override_key)
return event.msg.reply('Your override has been disabled. It has been active for {}.'.format(
humanize.naturaldelta(diff)
)).after(5).delete()
if ttl <= 1: # avoid potential race condition
rdb.set(override_key, datetime.strftime(now, '%Y-%m-%d %H:%M:%S.%f'), ex=duration)
with self.send_control_message() as embed:
embed.title = 'Override Enabled'
embed.color = 0xffb347
embed.add_field(name='Admin', value=unicode(event.author), inline=True)
embed.add_field(name='Admin ID', value=event.author.id, inline=True)
embed.add_field(name='Guild', value=unicode(event.guild.name), inline=True)
embed.add_field(name='Channel', value=unicode(event.channel), inline=True)
embed.add_field(name='Channel ID', value=event.channel.id, inline=True)
embed.add_field(name='Enabled On', value=guild_name.name if hasattr(guild_name, 'name') else 'ANY', inline=True)
embed.add_field(name='Duration', value=duration, inline=True)
return event.msg.reply(':triangular_flag_on_post: Your override has been enabled for {} second{} in guild: `{}`.'.format(
duration, 's' if duration > 1 else '', guild_name.name if hasattr(guild_name, 'name') else 'ANY'
))
rdb.expire(override_key, duration)
event.msg.reply('Your override has already been active for {}, and will now expire in {} second{}.'.format(
humanize.naturaldelta(diff), duration, 's' if duration > 1 else ''
)).after(5).delete()
@Plugin.command('info', '[user:user|snowflake]', group='adv', level=-1)
def adv_info(self, event, user=None):
# Since we are a global admin we can search ANY guild from here
if user is None:
user = event.author
user_id = 0
if isinstance(user, (int, long)):
user_id = user
user = self.state.users.get(user)
if user and not user_id:
user = self.state.users.get(user.id)
if not user:
if user_id:
user = self.fetch_user(user_id)
User.from_disco_user(user)
else:
raise CommandFail('Unknown user')
content = []
content.append(u'**\u276F User Information**')
content.append(u'ID: {}'.format(user.id))
content.append(u'Profile: <@{}>'.format(user.id))
if user.presence:
emoji, status = get_status_emoji(user.presence)
content.append('Status: {} <{}>'.format(status, emoji))
if user.presence.game and user.presence.game.name:
if user.presence.game.type == GameType.DEFAULT:
content.append(u'Game: {}'.format(user.presence.game.name))
elif user.presence.game.name == 'Spotify':
content.append(u'Listening To {}'.format(user.presence.game.name))
else:
content.append(u'Stream: [{}]({})'.format(user.presence.game.name, user.presence.game.url))
created_dt = to_datetime(user.id)
content.append('Created: {} ago ({})'.format(
humanize.naturaldelta(datetime.utcnow() - created_dt),
created_dt.strftime("%d-%b-%y @ %H:%M")
))
if rdb.sismember('global_admins', user.id):
content.append(u'\n**\u276F Heimdallr Staff**')
content.append(u'Global Administrator')
member = event.guild.get_member(user.id) if event.guild else None
if member:
content.append(u'\n**\u276F Member Information**')
if member.nick:
content.append(u'Nickname: {}'.format(S(member.nick, escape_codeblocks=True)))
content.append('Joined: {} ago ({})'.format(
humanize.naturaldelta(datetime.utcnow() - member.joined_at),
member.joined_at.strftime("%d-%b-%y @ %H:%M"),
))
if member.roles:
content.append(u'Roles: {}'.format(' '.join('<@&{}>'.format(r) for r in member.roles)))
# Execute a bunch of queries
# these will obviously only return the current guild but thats ok
newest_msg_raw = INFO_TIMESTAMP_MSG_SQL.format('DESC', guild=event.guild.id, author=user.id)
newest_msg = list(Message.raw(newest_msg_raw).tuples())
oldest_msg_raw = INFO_TIMESTAMP_MSG_SQL.format('ASC', guild=event.guild.id, author=user.id)
oldest_msg = list(Message.raw(oldest_msg_raw).tuples())
infractions_raw = INFO_INFRACTION_SQL.format(author=user.id)
infractions = list(Infraction.raw(infractions_raw).tuples())
notes_raw = INFO_NOTE_SQL.format(author=user.id)
notes = list(Infraction.raw(notes_raw).tuples())
voice = GuildVoiceSession.select(
GuildVoiceSession.user_id,
fn.COUNT('*'),
fn.SUM(GuildVoiceSession.ended_at - GuildVoiceSession.started_at)
).where(
(GuildVoiceSession.user_id == user.id) &
(~(GuildVoiceSession.ended_at >> None))
).group_by(GuildVoiceSession.user_id).tuples().async()
# Wait for them all to complete (we're still going to be as slow as the
# slowest query, so no need to be smart about this.)
wait_many(voice, timeout=15)
tags = to_tags(guild_id=event.msg.guild.id)
nmsg = None
for i in newest_msg:
nmsg = i[0]
omsg = None
for i in oldest_msg:
omsg = i[0]
if hasattr(nmsg, 'time') and hasattr(omsg, 'time'):
content.append(u'\n **\u276F Activity**')
content.append('Last Message: {} ago ({})'.format(
humanize.naturaldelta(datetime.utcnow() - nmsg),
nmsg.strftime("%d-%b-%y @ %H:%M"),
))
content.append('First Message: {} ago ({})'.format(
humanize.naturaldelta(datetime.utcnow() - omsg),
omsg.strftime("%d-%b-%y @ %H:%M"),
))
for inf in infractions:
infractions = list(infractions)
total = sum(i[1] for i in infractions)
content.append(u'\n**\u276F Infractions**')
content.append('Total Infractions: {}'.format(total))
content.append('Unique Servers: {}'.format(len(infractions)))
if notes:
notes = list(notes)
total = sum(i[1] for i in notes)
content.append(u'\n**\u276F Notes**')
content.append('Total Notes: {}'.format(total))
content.append('Unique Servers: {}'.format(len(notes)))
if voice.value:
statsd.timing('plugin.utilities.info.sql.voice', voice.value._query_time, tags=tags)
voice = list(voice.value)
content.append(u'\n**\u276F Voice**')
content.append(u'Sessions: {}'.format(voice[0][1]))
content.append(u'Time: {}'.format(humanize.naturaldelta(
voice[0][2]
)))
try:
rperms = []
perms = event.guild.get_permissions(member)
if perms.kick_members:
rperms.append('Kick Members')
if perms.ban_members:
rperms.append('Ban Members')
if perms.administrator:
rperms.append('Administrator')
if perms.manage_channels:
rperms.append('Manage Channels')
if perms.manage_guild:
rperms.append('Manage Guild')
if perms.manage_messages:
rperms.append('Manage Messages')
if perms.mention_everyone:
rperms.append('Mention Everyone')
if perms.mute_members:
rperms.append('Mute Members')
if perms.move_members:
rperms.append('Move Members')
if perms.manage_nicknames:
rperms.append('Manage Nicknames')
if perms.manage_roles:
rperms.append('Manage Roles')
if perms.manage_webhooks:
rperms.append('Manage Webhooks')
if perms.manage_emojis:
rperms.append('Manage Emojis')
if len(rperms) > 0:
i = ', '.join(rperms)
content.append(u'\n**\u276F Key Permissions**')
content.append(u'{}'.format(i))
except:
pass
try:
# Find all the guilds the user is in
gname = []
for guild in self.state.guilds:
g = self.state.guilds.get(guild)
if g.get_member(user):
gname.append(g.name)
if len(gname) > 0:
i = ', '.join(gname)
content.append(u'\n**\u276F Known Guilds**')
content.append(u'{}'.format(i))
except:
pass
embed = MessageEmbed()
avatar = u'https://cdn.discordapp.com/avatars/{}/{}.png'.format(user.id, user.avatar)
embed.set_author(name=u'{}#{}'.format(user.username, user.discriminator), icon_url=avatar)
embed.set_thumbnail(url=avatar)
embed.description = '\n'.join(content)
try:
embed.color = get_dominant_colors_user(user, avatar)
except:
pass
event.msg.reply('', embed=embed)
msg = event.msg.reply('Dump Infraction Stats?')
msg.chain(False).\
add_reaction(GREEN_TICK_EMOJI).\
add_reaction(RED_TICK_EMOJI)
try:
mra_event = self.wait_for_event(
'MessageReactionAdd',
message_id=msg.id,
conditional=lambda e: (
e.emoji.id in (GREEN_TICK_EMOJI_ID, RED_TICK_EMOJI_ID) and
e.user_id == event.author.id
)).get(timeout=10)
except gevent.Timeout:
return
finally:
msg.delete()
if mra_event.emoji.id != GREEN_TICK_EMOJI_ID:
return
infractions_stats_raw = INFO_INFRACTION_STATS_SQL.format(author=user.id)
infractions_stats = list(Infraction.raw(infractions_stats_raw))
tbl = MessageTable()
tbl.set_header('Guild', 'Count', 'Type')
for inf in infractions_stats:
type_ = {i.index: i for i in Infraction.Types.attrs}[inf.type_]
if len(tbl.compile()) > 1700:
event.msg.reply(tbl.compile())
tbl = MessageTable()
tbl.set_header('Guild', 'Count', 'Type')
tbl.add(
unicode(inf.name),
str(inf.count),
str(type_)
)
event.msg.reply(tbl.compile())
msg = event.msg.reply('Dump User Stats?')
msg.chain(False).\
add_reaction(GREEN_TICK_EMOJI).\
add_reaction(RED_TICK_EMOJI)
try:
mra_event = self.wait_for_event(
'MessageReactionAdd',
message_id=msg.id,
conditional=lambda e: (
e.emoji.id in (GREEN_TICK_EMOJI_ID, RED_TICK_EMOJI_ID) and
e.user_id == event.author.id
)).get(timeout=10)
except gevent.Timeout:
return
finally:
msg.delete()
if mra_event.emoji.id != GREEN_TICK_EMOJI_ID:
return
# Query for the basic aggregate message statistics
message_stats = Message.select(
fn.Count('*'),
fn.Sum(fn.char_length(Message.content)),
fn.Sum(fn.array_length(Message.emojis, 1)),
fn.Sum(fn.array_length(Message.mentions, 1)),
fn.Sum(fn.array_length(Message.attachments, 1)),
).where(
(Message.author_id == user.id)
).tuples().async()
reactions_given = Reaction.select(
fn.Count('*'),
Reaction.emoji_id,
Reaction.emoji_name,
).join(
Message,
on=(Message.id == Reaction.message_id)
).where(
(Reaction.user_id == user.id)
).group_by(
Reaction.emoji_id, Reaction.emoji_name
).order_by(fn.Count('*').desc()).tuples().async()
# Query for most used emoji
emojis = Message.raw('''
SELECT gm.emoji_id, gm.name, count(*)
FROM (
SELECT unnest(emojis) as id
FROM messages
WHERE author_id=%s
) q
JOIN guild_emojis gm ON gm.emoji_id=q.id
GROUP BY 1, 2
ORDER BY 3 DESC
LIMIT 1
''', (user.id, )).tuples().async()
deleted = Message.select(
fn.Count('*')
).where(
(Message.author_id == user.id) &
(Message.deleted == 1)
).tuples().async()
wait_many(message_stats, reactions_given, emojis, deleted, timeout=10)
# If we hit an exception executing the core query, throw an exception
if message_stats.exception:
message_stats.get()
q = message_stats.value[0]
embed = MessageEmbed()
embed.fields.append(
MessageEmbedField(name='Total Messages Sent', value=q[0] or '0', inline=True))
embed.fields.append(
MessageEmbedField(name='Total Characters Sent', value=q[1] or '0', inline=True))
if deleted.value:
embed.fields.append(
MessageEmbedField(name='Total Deleted Messages', value=deleted.value[0][0], inline=True))
embed.fields.append(
MessageEmbedField(name='Total Custom Emojis', value=q[2] or '0', inline=True))
embed.fields.append(
MessageEmbedField(name='Total Mentions', value=q[3] or '0', inline=True))
embed.fields.append(
MessageEmbedField(name='Total Attachments', value=q[4] or '0', inline=True))
if reactions_given.value:
reactions_given = reactions_given.value
embed.fields.append(
MessageEmbedField(name='Total Reactions', value=sum(i[0] for i in reactions_given), inline=True))
emoji = (
reactions_given[0][2]
if not reactions_given[0][1] else
'<:{}:{}>'.format(reactions_given[0][2], reactions_given[0][1])
)
embed.fields.append(
MessageEmbedField(name='Most Used Reaction', value=u'{} (used {} times)'.format(
emoji,
reactions_given[0][0],
), inline=True))
if emojis.value:
emojis = list(emojis.value)
if emojis:
embed.add_field(
name='Most Used Emoji',
value=u'<:{1}:{0}> (`{1}`, used {2} times)'.format(*emojis[0]))
embed.thumbnail = MessageEmbedThumbnail(url=user.avatar_url)
embed.color = get_dominant_colors_user(user)
event.msg.reply('', embed=embed)
@Plugin.command('inf search', parser=True, group='adv', level=-1)
@Plugin.parser.add_argument('query', type=str, nargs='+')
@Plugin.parser.add_argument('-g', '--guild', default=None, type=int)
def adv_infraction_search(self, event, args):
# We don't care if we get notes here since we are a global admin anyway
if args.guild:
q = (Infraction.guild_id == args.guild)
if args.query and isinstance(args.query, list) and isinstance(args.query[0], DiscoUser):
query = args.query[0].id
elif args.query:
query = ' '.join(args.query)
if args.guild:
if query and (isinstance(query, int) or query.isdigit()):
q &= (
(Infraction.id == int(query)) |
(Infraction.user_id == int(query)) |
(Infraction.actor_id == int(query)))
elif query:
q &= (Infraction.reason ** '%{}%'.format(query))
else:
if query and (isinstance(query, int) or query.isdigit()):
q = (
(Infraction.id == int(query)) |
(Infraction.user_id == int(query)) |
(Infraction.actor_id == int(query)))
elif query:
q = (Infraction.reason ** '%{}%'.format(query))
user = User.alias()
actor = User.alias()
infractions = Infraction.select(Infraction, user, actor).join(
user,
on=((Infraction.user_id == user.user_id).alias('user'))
).switch(Infraction).join(
actor,
on=((Infraction.actor_id == actor.user_id).alias('actor'))
).where(q).order_by(Infraction.created_at.desc()).limit(6)
tbl = MessageTable()
tbl.set_header('ID', 'Created', 'Type', 'User', 'Moderator', 'Active', 'Reason')
for inf in infractions:
type_ = {i.index: i for i in Infraction.Types.attrs}[inf.type_]
reason = inf.reason or ''
if len(reason) > 75:
reason = reason[:75] + '...'
if inf.active:
active = 'yes'
if inf.expires_at:
active += ' (expires in {})'.format(humanize.naturaldelta(inf.expires_at - datetime.utcnow()))
else:
active = 'no'
tbl.add(
inf.id,
inf.created_at.strftime("%d-%b-%y @ %H:%M"),
str(type_),
unicode(inf.user),
unicode(inf.actor),
active,
clamp(reason, 128)
)
event.msg.reply(tbl.compile())
@Plugin.command('inf info', '<infraction:int>', group='adv', level=-1)
def adv_infraction_info(self, event, infraction):
# We don't care if we get notes here since we are a global admin anyway
try:
user = User.alias()
actor = User.alias()
infraction = Infraction.select(Infraction, user, actor).join(
user,
on=((Infraction.user_id == user.user_id).alias('user'))
).switch(Infraction).join(
actor,
on=((Infraction.actor_id == actor.user_id).alias('actor'))
).where(
(Infraction.id == infraction)
).get()
except Infraction.DoesNotExist:
raise CommandFail('Cannot find an infraction with ID `{}`'.format(infraction))
type_ = {i.index: i for i in Infraction.Types.attrs}[infraction.type_]
embed = MessageEmbed()
if type_ in (Infraction.Types.MUTE, Infraction.Types.TEMPMUTE, Infraction.Types.TEMPROLE):
embed.color = 0xfdfd96
elif type_ in (Infraction.Types.KICK, Infraction.Types.SOFTBAN):
embed.color = 0xffb347
else:
embed.color = 0xff6961
embed.title = str(type_).title()
embed.set_thumbnail(url=infraction.user.get_avatar_url())
embed.add_field(name='User', value=unicode(infraction.user), inline=True)
embed.add_field(name='Moderator', value=unicode(infraction.actor), inline=True)
embed.add_field(name='Active', value='yes' if infraction.active else 'no', inline=True)
if infraction.active and infraction.expires_at:
embed.add_field(name='Expires', value=humanize.naturaldelta(infraction.expires_at - datetime.utcnow()))
embed.add_field(name='Reason', value=infraction.reason or '_No Reason Given', inline=False)
embed.timestamp = infraction.created_at.isoformat()
event.msg.reply('', embed=embed)
# Base of this command from Dooley
@Plugin.command('status live', '[action:str]', context={'mode': 'live'}, level=-1)
@Plugin.command('status reset', context={'mode': 'reset'}, level=-1)
@Plugin.command('status', '<action:str> <status:str...>', context={'mode': 'all'}, level=-1)
def status(self, event, action=None, status=None, mode='all'):
try:
event.msg.delete()
except:
pass
if mode == 'live':
if action is None:
CommandFail('No twitch channel name defined')
self.client.update_presence(Status.online, Game(name=u'{}'.format(action), type=GameType.STREAMING, url=u'https://twitch.tv/{}'.format(action)))
if mode == 'reset':
self.client.update_presence(Status.online, Game(name=u'All the things', type=GameType.watching))
if mode == 'all':
if action is None:
return event.msg.reply(STATUS_MSG).after(15).delete()
if status is None:
CommandFail('No status defined')
self.client.update_presence(Status.online, Game(name=u'{}'.format(status), type=action))
# --------------Coded by Xenthys#0001 for Rawgoat--------------
def fetch_user(self, id, raise_on_error=True):
try:
r = self.bot.client.api.http(Routes.USERS_GET, dict(user=id))
return DiscoUser.create(self.bot.client.api.client,r.json())
except APIException:
if raise_on_error:
raise CommandFail('unknown user')
return
@contextlib.contextmanager
def send_control_message(self):
embed = MessageEmbed()
embed.set_footer(text=ROWBOAT_NAME)
embed.timestamp = datetime.utcnow().isoformat()
embed.color = 0x779ecb
try:
yield embed
self.bot.client.api.channels_messages_create(
ROWBOAT_CONTROL_CHANNEL,
embed=embed
)
except:
self.log.exception('Failed to send control message')
return
# Thanks, Dooley
@Plugin.command('wh-add', '<guild:snowflake> <flag:str>', group='guilds', level=-1)
def add_whitelist(self, event, guild, flag):
flag = Guild.WhitelistFlags.get(flag)
if not flag:
raise CommandFail('invalid flag, must be one of {}'.format(', '.join(map(lambda e : '`{}`'.format(e.value), list(Guild.WhitelistFlags)))))
try:
guild = Guild.get(guild_id=guild)
except Guild.DoesNotExist:
raise CommandFail('no guild exists with that id')
if guild.is_whitelisted(flag):
raise CommandFail('this guild already has this flag')
guild.whitelist.append(int(flag))
guild.save()
guild.emit('GUILD_UPDATE')
event.msg.reply('Ok, added flag `{}` to guild {}'.format(str(flag), guild.guild_id))
@Plugin.command('wh-rmv', '<guild:snowflake> <flag:str>', group='guilds', level=-1)
def rmv_whitelist(self, event, guild, flag):
flag = Guild.WhitelistFlags.get(flag)
if not flag:
raise CommandFail('invalid flag, must be one of {}'.format(', '.join(map(lambda e : '`{}`'.format(e.value), list(Guild.WhitelistFlags)))))
try:
guild = Guild.get(guild_id=guild)
except Guild.DoesNotExist:
raise CommandFail('no guild exists with that id')
if not guild.is_whitelisted(flag):
raise CommandFail('this guild doesn\'t have this flag')
guild.whitelist.remove(int(flag))
guild.save()
guild.emit('GUILD_UPDATE')
event.msg.reply('Ok, removed flag `{}` from guild {}'.format(str(flag), guild.guild_id))
@Plugin.command('frestart', level=-1)
def force_restart(self, event):
try:
event.msg.add_reaction(GREEN_TICK_EMOJI)
except:
pass
import os, signal
os.kill(os.getppid(), signal.SIGUSR1)
|
|
import logging
import datetime
import copy
import os
import aiohttp.web
import ray.new_dashboard.modules.tune.tune_consts \
as tune_consts
import ray.new_dashboard.utils as dashboard_utils
from ray.new_dashboard.utils import async_loop_forever, rest_response
logger = logging.getLogger(__name__)
try:
from ray.tune import Analysis
from tensorboard import program
# The `pip install ray` will not install pandas,
# so `from ray.tune import Analysis` may raises
# `AttributeError: module 'pandas' has no attribute 'core'`
# if the pandas version is incorrect.
except (ImportError, AttributeError) as ex:
logger.warning("tune module is not available: %s", ex)
Analysis = None
routes = dashboard_utils.ClassMethodRouteTable
class TuneController(dashboard_utils.DashboardHeadModule):
def __init__(self, dashboard_head):
"""
This dashboard module is responsible for enabling the Tune tab of
the dashboard. To do so, it periodically scrapes Tune output logs,
transforms them, and serves them up over an API.
"""
super().__init__(dashboard_head)
self._logdir = None
self._trial_records = {}
self._trials_available = False
self._tensor_board_dir = ""
self._enable_tensor_board = False
self._errors = {}
@routes.get("/tune/info")
async def tune_info(self, req) -> aiohttp.web.Response:
stats = self.get_stats()
return rest_response(
success=True, message="Fetched tune info", result=stats)
@routes.get("/tune/availability")
async def get_availability(self, req) -> aiohttp.web.Response:
availability = {
"available": Analysis is not None,
"trials_available": self._trials_available
}
return rest_response(
success=True,
message="Fetched tune availability",
result=availability)
@routes.get("/tune/set_experiment")
async def set_tune_experiment(self, req) -> aiohttp.web.Response:
experiment = req.query["experiment"]
err, experiment = self.set_experiment(experiment)
if err:
return rest_response(success=False, error=err)
return rest_response(
success=True, message="Successfully set experiment", **experiment)
@routes.get("/tune/enable_tensorboard")
async def enable_tensorboard(self, req) -> aiohttp.web.Response:
self._enable_tensorboard()
if not self._tensor_board_dir:
return rest_response(
success=False, message="Error enabling tensorboard")
return rest_response(success=True, message="Enabled tensorboard")
def get_stats(self):
tensor_board_info = {
"tensorboard_current": self._logdir == self._tensor_board_dir,
"tensorboard_enabled": self._tensor_board_dir != ""
}
return {
"trial_records": copy.deepcopy(self._trial_records),
"errors": copy.deepcopy(self._errors),
"tensorboard": tensor_board_info
}
def set_experiment(self, experiment):
if os.path.isdir(os.path.expanduser(experiment)):
self._logdir = os.path.expanduser(experiment)
return None, {"experiment": self._logdir}
else:
return "Not a Valid Directory", None
def _enable_tensorboard(self):
if not self._tensor_board_dir:
tb = program.TensorBoard()
tb.configure(argv=[None, "--logdir", str(self._logdir)])
tb.launch()
self._tensor_board_dir = self._logdir
def collect_errors(self, df):
sub_dirs = os.listdir(self._logdir)
trial_names = filter(
lambda d: os.path.isdir(os.path.join(self._logdir, d)), sub_dirs)
for trial in trial_names:
error_path = os.path.join(self._logdir, trial, "error.txt")
if os.path.isfile(error_path):
self._trials_available = True
with open(error_path) as f:
text = f.read()
self._errors[str(trial)] = {
"text": text,
"job_id": os.path.basename(self._logdir),
"trial_id": "No Trial ID"
}
other_data = df[df["logdir"].str.contains(trial)]
if len(other_data) > 0:
trial_id = str(other_data["trial_id"].values[0])
self._errors[str(trial)]["trial_id"] = trial_id
if trial_id in self._trial_records.keys():
self._trial_records[trial_id]["error"] = text
self._trial_records[trial_id]["status"] = "ERROR"
@async_loop_forever(tune_consts.TUNE_STATS_UPDATE_INTERVAL_SECONDS)
async def collect(self):
"""
Collects and cleans data on the running Tune experiment from the
Tune logs so that users can see this information in the front-end
client
"""
self._trial_records = {}
self._errors = {}
if not self._logdir or not Analysis:
return
# search through all the sub_directories in log directory
analysis = Analysis(str(self._logdir))
df = analysis.dataframe(metric=None, mode=None)
if len(df) == 0 or "trial_id" not in df.columns:
return
self._trials_available = True
# make sure that data will convert to JSON without error
df["trial_id_key"] = df["trial_id"].astype(str)
df = df.fillna(0)
trial_ids = df["trial_id"]
for i, value in df["trial_id"].iteritems():
if type(value) != str and type(value) != int:
trial_ids[i] = int(value)
df["trial_id"] = trial_ids
# convert df to python dict
df = df.set_index("trial_id_key")
trial_data = df.to_dict(orient="index")
# clean data and update class attribute
if len(trial_data) > 0:
trial_data = self.clean_trials(trial_data)
self._trial_records.update(trial_data)
self.collect_errors(df)
def clean_trials(self, trial_details):
first_trial = trial_details[list(trial_details.keys())[0]]
config_keys = []
float_keys = []
metric_keys = []
# list of static attributes for trial
default_names = {
"logdir", "time_this_iter_s", "done", "episodes_total",
"training_iteration", "timestamp", "timesteps_total",
"experiment_id", "date", "timestamp", "time_total_s", "pid",
"hostname", "node_ip", "time_since_restore",
"timesteps_since_restore", "iterations_since_restore",
"experiment_tag", "trial_id"
}
# filter attributes into floats, metrics, and config variables
for key, value in first_trial.items():
if isinstance(value, float):
float_keys.append(key)
if str(key).startswith("config/"):
config_keys.append(key)
elif key not in default_names:
metric_keys.append(key)
# clean data into a form that front-end client can handle
for trial, details in trial_details.items():
ts = os.path.getctime(details["logdir"])
formatted_time = datetime.datetime.fromtimestamp(ts).strftime(
"%Y-%m-%d %H:%M:%S")
details["start_time"] = formatted_time
details["params"] = {}
details["metrics"] = {}
# round all floats
for key in float_keys:
details[key] = round(details[key], 12)
# group together config attributes
for key in config_keys:
new_name = key[7:]
details["params"][new_name] = details[key]
details.pop(key)
# group together metric attributes
for key in metric_keys:
details["metrics"][key] = details[key]
details.pop(key)
if details["done"]:
details["status"] = "TERMINATED"
else:
details["status"] = "RUNNING"
details.pop("done")
details["job_id"] = os.path.basename(self._logdir)
details["error"] = "No Error"
return trial_details
async def run(self, server):
# Forever loop the collection process
await self.collect()
|
|
# -*- coding: utf-8 -*-
"""
Tests for methods.
"""
from datetime import datetime
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.utils.http import urlquote
from django.utils.translation import ugettext
from ..models import Method, MethodFile
class MethodTests(TestCase):
fixtures = ['users.json',
'methods.json']
def test_is_published(self):
m = Method.objects.all()[0]
self.failUnlessEqual(m.is_published(), True)
self.failUnlessEqual(m.is_draft(), False)
def test_latest_feed(self):
response = self.client.get(reverse('methods-feed',
kwargs={'url': 'latest'}))
self.failUnlessEqual(response.status_code, 200)
class MethodUnauthorizedTests(TestCase):
fixtures = ['users.json',
'methods.json']
def test_index_unauth(self):
""" Check that the index page renders. """
response = self.client.get(reverse('methods-index'))
self.failUnlessEqual(response.status_code, 200)
def test_method_views_unauth(self):
""" Make sure all the views render. """
methods = Method.objects.exclude(status='DRAFT')
for method in methods:
p_at = method.published_at
response = self.client.get(reverse('methods-show-method',
kwargs={'year': p_at.year,
'month': p_at.month,
'day': p_at.day,
'slug': method.slug}))
self.failUnlessEqual(response.status_code, 200)
def test_create_method_view_unauth(self):
""" Make sure the create method view redirects if not authorized. """
response = self.client.get(reverse('methods-create-method'))
self.assertRedirects(response,
reverse('login') + "?next=" + \
reverse('methods-create-method'),
status_code=302,
target_status_code=200)
def test_edit_method_view_unauth(self):
""" Make sure the edit method view redirects if not authorized. """
m = Method.objects.all()[0]
response = self.client.get(reverse('methods-edit-method',
kwargs={'slug': m.slug}))
self.assertRedirects(response,
reverse('login') + "?next=" + \
reverse('methods-edit-method',
kwargs={'slug': m.slug}),
status_code=302,
target_status_code=200)
class MethodAuthorizedTests(TestCase):
fixtures = ['users.json',
'methods.json']
def setUp(self):
login = self.client.login(username='testclient', password='password')
self.failUnless(login, 'Could not log in')
def tearDown(self):
self.client.logout()
def test_index(self):
""" Check that the index page renders when logged in. """
response = self.client.get(reverse('methods-index'))
self.failUnlessEqual(response.status_code, 200)
self.failUnlessEqual(response.context['user'].username, 'testclient')
def test_method_views(self):
""" Make sure all the views render when logged in. """
methods = Method.objects.exclude(status='DRAFT')
for method in methods:
p_at = method.published_at
response = self.client.get(reverse('methods-show-method',
kwargs={'year': p_at.year,
'month': p_at.month,
'day': p_at.day,
'slug': method.slug}))
self.failUnlessEqual(response.status_code, 200)
self.failUnlessEqual(response.context['user'].username,
'testclient')
def test_create_method_view(self):
""" Make sure the create method view renders when logged in. """
response = self.client.get(reverse('methods-create-method'))
self.failUnlessEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_publish_method(self):
method = Method.objects.filter(status='DRAFT')[0]
response = self.client.post(reverse('methods-edit-method',
kwargs={'slug': method.slug}),
{'publish': "Publish"})
self.assertRedirects(response,
reverse('methods-index'),
status_code=302,
target_status_code=200)
def test_diff_method(self):
method = Method.objects.filter(status='PUBLISHED')[0]
# Trying to show diff without older revision should fail
p_at = method.published_at
response = self.client.get(reverse('methods-diff-method',
kwargs={'year': p_at.year,
'month': p_at.month,
'day': p_at.day,
'slug': method.slug}),
{'diff1': 'latest',
'diff2': 1})
self.failUnlessEqual(response.status_code, 404)
# Edit the method, thus creating a revision history entry
response = self.client.post(reverse('methods-edit-method',
kwargs={'slug': method.slug}),
{'method': "Update",
'method-title': "Edited method",
'method-description': "Some text",
'method-editor_comment':
"Updated title"})
self.assertRedirects(response,
reverse('methods-index'),
status_code=302,
target_status_code=200)
# Show diff
response = self.client.get(reverse('methods-diff-method',
kwargs={'year': p_at.year,
'month': p_at.month,
'day': p_at.day,
'slug': method.slug}),
{'diff1': 'latest',
'diff2': 1})
self.failUnlessEqual(response.status_code, 200)
class EditMethodLoggedInAsNonOwnerTests(TestCase):
fixtures = ['users.json',
'methods.json']
def setUp(self):
# Log in as 'user'
login = self.client.login(username='user', password='password')
self.failUnless(login, 'Could not log in')
def test_edit_view_when_method_not_owned_by_logged_in_user(self):
"""
Make sure the edit method view renders when logged in as
non-owner of a method.
"""
# Try to render edit view for method created by another user
m = Method.objects.get(id=2)
response = self.client.get(reverse('methods-edit-method',
kwargs={'slug': m.slug}))
self.failUnlessEqual(response.status_code, 200)
def test_edit_method_owned_by_other_user(self):
"""
Make sure the edit method form accepts input from a user
that didn't create the method.
"""
# Try to post update to own method
m = Method.objects.get(id=2)
response = self.client.post(reverse('methods-edit-method',
kwargs={'slug': m.slug}),
{'method': "Update",
'method-title': "Hello",
'method-description': "Hello World",
'method-editor_comment':
"Updated title"})
self.assertRedirects(response,
reverse('methods-index'),
status_code=302,
target_status_code=200)
m = Method.objects.get(id=2)
self.failUnlessEqual(m.title, "Hello")
class EditMethodTests(TestCase):
fixtures = ['users.json',
'methods.json']
def setUp(self):
# Log in as 'user'
login = self.client.login(username='user', password='password')
self.failUnless(login, 'Could not log in')
def test_edit_method_view(self):
"""
Make sure the edit method view renders when logged in as
the user who created the method.
"""
# Should be able to render edit view for own method
m = Method.objects.get(id=1)
response = self.client.get(reverse('methods-edit-method',
kwargs={'slug': m.slug}))
self.failUnlessEqual(response.status_code, 200)
def test_edit_method_by_form(self):
""" Make sure the edit method form works. """
# Try to post update to own method
m = Method.objects.get(id=1)
response = self.client.post(reverse('methods-edit-method',
kwargs={'slug': m.slug}),
{'method': "Update",
'method-title': "Hello",
'method-description': "Hello World",
'method-editor_comment':
"Updated title"})
self.assertRedirects(response,
reverse('methods-index'),
status_code=302,
target_status_code=200)
m = Method.objects.get(id=1)
self.failUnlessEqual(m.title, "Hello")
# def test_delete_method_by_form(self):
# """ Delete own method. """
#
# m = Method.objects.get(id=1)
# comments = Comment...
# self.failUnless(comments)
#
# response = self.client.post(reverse('molnet-polls-edit-poll',
# kwargs={'slug': p.slug}),
# {'delete': "Delete"})
# self.assertRedirects(response, reverse('molnet-polls-startpage'))
# self.assertRaises(ObjectDoesNotExist, Poll.objects.get, id=p.id)
#
# # Verify that choices have been cascade deleted
# choices_post = Choice.objects.filter(poll=p.id) \
# .values_list('id', flat=True)
# self.failIf(choices_post)
#
# # All votes linked to the original choices should have been
# # cascade deleted
# votes_post = Vote.objects.filter(choice__in=choices)
# self.failIf(votes_post)
def test_publish_method_by_form(self):
""" Publish own method. """
m = Method.objects.filter(user__username='user') \
.filter(status='DRAFT')[0]
response = self.client.post(reverse('methods-edit-method',
kwargs={'slug': m.slug}),
{'publish': "Publish"})
self.assertRedirects(response,
reverse('methods-index'),
status_code=302,
target_status_code=200)
m2 = Method.objects.get(id=m.id)
self.failUnless(m2.is_published())
self.failIf(m2.is_draft())
def test_publish_already_published_method_by_form(self):
""" Try to publish an already published method. """
m = Method.objects.filter(user__username='user') \
.filter(status='PUBLISHED')[0]
response = self.client.post(reverse('methods-edit-method',
kwargs={'slug': m.slug}),
{'publish': "Publish"})
self.assertRedirects(response,
reverse('methods-index'),
status_code=302,
target_status_code=200)
m2 = Method.objects.get(id=m.id)
self.failUnless(m2.is_published())
|
|
"""This file implements a transformer-based model to detect intents and the
corresponding slots for a given query.
The model consists of the transformer encoder layer that comutes the
contextual embedding of the input tokens. The slot corresponding to
a token is predicted by passing the contextual embeddings into a linear
layer. Similarly, the intent is predicted by passing a concatenation of
all the contextual embeddings though another linear layer. Most of the
code was in this file was taken from
https://www.tensorflow.org/tutorials/text/transformer.
Typical usage example:
model = Net(num_layers=3, d_model=128, num_heads=8, dff=512, \
input_vocab_size=800, intent_vocab_size=50, slot_vocab_size=100, \
pe_input=64, max_seq_len=max_seq_len)
"""
import tensorflow as tf
import numpy as np
def get_angles(pos, i, d_model):
"""Computes the angles for all the input positions which will then be used
to compute the positional encodings.
Args:
pos: a numpy array of the position indices
i: a numpy array of the embedding indexes
d_model: the dimensionality of the embeddings
Returns:
angles
"""
angle_rates = 1 / np.power(10000, (2 * (i // 2)) / np.float32(d_model))
return pos * angle_rates
def positional_encoding(position, d_model):
"""Computes the positional encodings for all the input positions. These
embeddings are a function of the embedding dimension.
Args:
position: the maximum index upto which the positional encoding is to be computed
d_model: the dimensionality of the embeddings
Returns:
pos_encoding
"""
angle_rads = get_angles(
np.arange(position)[:, np.newaxis],
np.arange(d_model)[np.newaxis, :], d_model)
# apply sin to even indices in the array; 2i
angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
# apply cos to odd indices in the array; 2i+1
angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
pos_encoding = angle_rads[np.newaxis, ...]
return tf.cast(pos_encoding, dtype=tf.float32)
def scaled_dot_product_attention(query, key, value, mask):
"""Calculates the attention weights.
query, key, value must have matching leading dimensions.
key, value must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.
The mask has different shapes depending on its type(padding or look ahead)
but it must be broadcastable for addition.
Args:
query: query shape == (..., seq_len_q, depth)
key: key shape == (..., seq_len_k, depth)
value: value shape == (..., seq_len_v, depth_v)
mask: Float tensor with shape broadcastable
to (..., seq_len_q, seq_len_k). Defaults to None.
Returns:
output, attention_weights
"""
matmul_qk = tf.matmul(query, key,
transpose_b=True) # (..., seq_len_q, seq_len_k)
# scale matmul_qk
dkey = tf.cast(tf.shape(key)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dkey)
# add the mask to the scaled tensor.
if mask is not None:
scaled_attention_logits += (mask * -1e9)
# softmax is normalized on the last axis (seq_len_k) so that the scores
# add up to 1.
attention_weights = tf.nn.softmax(scaled_attention_logits,
axis=-1) # (..., seq_len_q, seq_len_k)
output = tf.matmul(attention_weights, value) # (..., seq_len_q, depth_v)
return output, attention_weights
class MultiHeadAttention(tf.keras.layers.Layer):
"""Defines the Multi-Headed Attention layer.
This class defines the tensorflow layers and helper function required
to perform multi-headed attention.
Attributes:
num_heads: The number of heads on which attention is computed on.
d_model: The dimensionality of the contextual embeddings.
"""
def __init__(self, d_model, num_heads):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.d_model = d_model
assert d_model % self.num_heads == 0
self.depth = self.d_model // self.num_heads
self.wquery = tf.keras.layers.Dense(self.d_model)
self.wkey = tf.keras.layers.Dense(self.d_model)
self.wvalue = tf.keras.layers.Dense(self.d_model)
self.dense = tf.keras.layers.Dense(d_model)
def split_heads(self, combined_input, batch_size):
"""Split the last dimension into (num_heads, depth).
Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth)
"""
split_output = tf.reshape(combined_input,
(batch_size, -1, self.num_heads, self.depth))
return tf.transpose(split_output, perm=[0, 2, 1, 3])
def call(self, value, key, query, mask):
"""Forward pass for the Multi-Head Attention layer.
"""
batch_size = tf.shape(query)[0]
query = self.wquery(query) # (batch_size, seq_len, d_model)
key = self.wkey(key) # (batch_size, seq_len, d_model)
value = self.wvalue(value) # (batch_size, seq_len, d_model)
query = self.split_heads(
query, batch_size) # (batch_size, num_heads, seq_len_q, depth)
key = self.split_heads(
key, batch_size) # (batch_size, num_heads, seq_len_k, depth)
value = self.split_heads(
value, batch_size) # (batch_size, num_heads, seq_len_v, depth)
scaled_attention, attention_weights = scaled_dot_product_attention(
query, key, value, mask)
scaled_attention = tf.transpose(
scaled_attention,
perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, num_heads, depth)
concat_attention = tf.reshape(
scaled_attention,
(batch_size, -1, self.d_model)) # (batch_size, seq_len_q, d_model)
output = self.dense(
concat_attention) # (batch_size, seq_len_q, d_model)
return output, attention_weights
def point_wise_feed_forward_network(d_model, dff):
"""Defines the feed forward component of the transformer.
The feed forward layer consists of a dense two dense layers.
Args:
d_model: The dimensionality of the embeddings.
dff: The output dimension of the first dense layer.
Returns:
keras layers
"""
return tf.keras.Sequential([
tf.keras.layers.Dense(dff,
activation='relu'), # (batch_size, seq_len, dff)
tf.keras.layers.Dense(d_model) # (batch_size, seq_len, d_model)
])
class EncoderLayer(tf.keras.layers.Layer):
"""Defines the Encoder layer of the transformer.
This class defines the tensorflow layers and the forward pass for the
transformer layer.
Attributes:
d_model: The dimensionality of the contextual embeddings.
num_heads: The number of heads on which attention is computed on.
dff: The hidden dimension of the feed forward component of the
transformer.
rate: The dropout rate to be used.
"""
def __init__(self, d_model, num_heads, dff, rate=0.1):
super(EncoderLayer, self).__init__()
self.mha = MultiHeadAttention(d_model, num_heads)
self.ffn = point_wise_feed_forward_network(d_model, dff)
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
def call(self, inputs, training, mask):
"""Forward pass for the Encoder layer.
"""
attn_output, _ = self.mha(inputs, inputs, inputs,
mask) # (batch_size, input_seq_len, d_model)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(
inputs + attn_output) # (batch_size, input_seq_len, d_model)
ffn_output = self.ffn(out1) # (batch_size, input_seq_len, d_model)
ffn_output = self.dropout2(ffn_output, training=training)
out2 = self.layernorm2(
out1 + ffn_output) # (batch_size, input_seq_len, d_model)
return out2
class Encoder(tf.keras.layers.Layer):
"""Defines the Encoder component of the transformer.
The encoder of a transformer consists of multiple encoder layers
stacked on top of each other. This class defines the tensorflow
layers and the forward pass for the encoder.
Attributes:
d_model: The dimensionality of the contextual embeddings.
num_heads: The number of heads on which attention is computed on.
dff: The hidden dimension of the feed forward component of the
transformer.
input_vocab_size: The size of the input vocabulary.
maximum_positional_encoding: The maximum number of input position.
rate: The dropout rate to be used.
"""
def __init__(self,
num_layers,
d_model,
num_heads,
dff,
input_vocab_size,
maximum_position_encoding,
rate=0.1):
super(Encoder, self).__init__()
self.d_model = d_model
self.num_layers = num_layers
self.embedding = tf.keras.layers.Embedding(input_vocab_size, d_model)
self.pos_encoding = positional_encoding(maximum_position_encoding,
self.d_model)
self.enc_layers = [
EncoderLayer(d_model, num_heads, dff, rate)
for _ in range(num_layers)
]
self.dropout = tf.keras.layers.Dropout(rate)
def call(self, inputs, training, mask):
"""Forward pass for the Encoder.
"""
seq_len = tf.shape(inputs)[1]
# adding embedding and position encoding.
inputs = self.embedding(inputs) # (batch_size, input_seq_len, d_model)
inputs *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
inputs += self.pos_encoding[:, :seq_len, :]
output = self.dropout(inputs, training=training)
for i in range(self.num_layers):
output = self.enc_layers[i](output, training, mask)
return output # (batch_size, input_seq_len, d_model)
class SlotHead(tf.keras.layers.Layer):
"""Defines the SlotHead which computes the slot for each input token.
The slot head uses a linear layer on top of the contextual embeddings
from the transformer encoder to predict the slots for each input token.
Attributes:
slot_vocab_size: The size of the slot vocabulary.
"""
def __init__(self, slot_vocab_size):
super(SlotHead, self).__init__()
self.slot_layer = tf.keras.layers.Dense(slot_vocab_size)
def call(self, inputs):
"""Forward pass for the Slot Head.
"""
out = self.slot_layer(inputs)
return out # (batch_size, input_seq_len, slot_vocab_size)
class IntentHead(tf.keras.layers.Layer):
"""Defines the IntentHead which computes the intent of the given input.
The intent head uses a linear layer on the concatenation of all the
contextual embeddings for the given input to predict the intent.
Attributes:
intent_vocab_size: The size of the intent vocabulary.
d_model: The dimensionality of the embeddings.
seq_len: The sequence length of the inputs to the transformer.
"""
def __init__(self, intent_vocab_size, d_model, seq_len):
super(IntentHead, self).__init__()
self.d_model = d_model
self.seq_len = seq_len
self.intent_layer = tf.keras.layers.Dense(intent_vocab_size)
def call(self, inputs, mask):
"""Forward pass for the Intent head.
"""
batch_size = tf.shape(inputs)[0]
#Apply masking to remove padding embeddings
inputs = tf.multiply(inputs, mask)
inputs = tf.reshape(
inputs, (batch_size, self.seq_len *
self.d_model)) # (batch_size, input_seq_len*d_model)
out = self.intent_layer(inputs)
return out # (batch_size, intent_vocab_size)
class Net(tf.keras.Model):
"""Defines the transformer based network to predict slots and intents.
The network consists of the transformer encode on top of which linear
layers are applied to predict the slots and intents.
Attributes:
num_layers: The number of transformer encoder layers.
d_model: The dimensionality of the contextual embeddings.
num_heads: The number of heads on which attention is computed on.
dff: The hidden dimension of the feed forward component of the
transformer.
input_vocab_size: The size of the input vocabulary.
slot_vocab_size: The size of the slot vocabulary.
intent_vocab_size: The size of the intent vocabulary.
pe_input: The maximum number of positional embeddings needed.
max_seq_len: The maimum sequence length of the input.
rate: The dropout rate to be used.
"""
def __init__(self,
num_layers,
d_model,
num_heads,
dff,
input_vocab_size,
slot_vocab_size,
intent_vocab_size,
pe_input,
max_seq_len=46,
rate=0.1):
super(Net, self).__init__()
self.encoder = Encoder(num_layers, d_model, num_heads, dff,
input_vocab_size, pe_input, rate)
self.intent_head = IntentHead(intent_vocab_size, d_model, max_seq_len)
self.slot_head = SlotHead(slot_vocab_size)
def call(self, inputs, training, enc_padding_mask, intent_mask):
"""Forward pass for the entire model.
"""
enc_output = self.encoder(
inputs, training,
enc_padding_mask) # (batch_size, inp_seq_len, d_model)
slot_output = self.slot_head(
enc_output) # (batch_size, tar_seq_len, slot_vocab_size)
intent_output = self.intent_head(
enc_output,
intent_mask) # (batch_size, tar_seq_len, intent_vocab_size)
return slot_output, intent_output
|
|
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the MIT license.
# See the LICENSE file in the project root for more information.
import sys
import os
if __name__ == '__main__':
#Dynamically append current script path to PYTHONPATH
sys.path.append(os.path.dirname(sys.argv[0]))
import SchemaDef2 as sd
from CsCommonGen2 import *
from CsNativeFormatGen2 import *
from odict import odict
class WriterGen(object):
#==========================================================================================================
def __init__(self):
self.nsReader = 'Internal.Metadata.NativeFormat'
self.Ty = TypeContainer()
PublishWellKnownTypes(self.Ty)
self.Ty.MetadataRecord = StructDef('MetadataRecord', flags = AccessFlags.Public | TypeFlags.Partial)
self.Ty.Handle = self.Ty.MetadataRecord
self.Ty.NativeWriter = ClassDef('NativeWriter')
self.records = odict([(rName, self.CreateRecord(rName, rMembers)) for (rName,rMembers) in sd.recordSchema.iteritems()])
#==========================================================================================================
def IsRecord(self, t):
return isinstance(t, TypeDef) and str(t) in sd.recordSchema.iterkeys()
#==========================================================================================================
def GetOrCreateType(self, ty, flags):
if type(ty) == tuple:
if not flags.IsRef(): raise Exception('Unexpected ty type.')
ty = self.Ty.Handle
if flags.IsArray():
return self.Ty.get(ty + '[]', TypeRef(
name = ty + '[]',
flags = TypeFlags.Public | TypeFlags.Array,
underlyingType = self.GetOrCreateType(ty, flags ^ sd.MemberDefFlags.Array)))
if type(ty) == str:
if ty == 'Handle':
ty = 'MetadataRecord'
if ty == 'Handle[]':
ty = 'MetadataRecord[]'
ty = self.Ty[ty]
if flags.IsCollection() and not flags.IsRef() and not flags.IsArray():
raise Exception('Unexpected collection element type "{}" (flags = {}.'.format(type(ty), flags))
if flags.IsMap():
# return self.Ty[TypeInst(self.Ty.Dictionary, self.Ty.string, ty)]
return self.Ty[TypeInst(self.Ty.List, ty)]
elif flags.IsList():
return self.Ty[TypeInst(self.Ty.List, ty)]
else:
return ty
#==========================================================================================================
def ProcessRecordMemberForVisitor(self, rec, mName, mType, flags):
if flags.IsRef():
if flags.IsMap():
if flags.IsChild():
# rec.onVisit.body += '\n{0} = visitor.Visit(this, {0}.AsEnumerable());'.format(mName)
rec.onVisit.body += '\n{0} = visitor.Visit(this, {0}.AsEnumerable());'.format(mName)
else:
# rec.onVisit.body += '\nforeach (var key in {0}.Keys)\n {0}[key] = visitor.Visit(this, {0}[key]);'.format(mName)
rec.onVisit.body += '\n{0} = {0}.Select(value => visitor.Visit(this, value)).ToList();'.format(mName)
elif flags.IsSequence():
if flags.IsChild():
rec.onVisit.body += '\n{0} = visitor.Visit(this, {0}.AsEnumerable());'.format(mName)
else:
rec.onVisit.body += '\n{0} = {0}.Select(value => visitor.Visit(this, value)).ToList();'.format(mName)
else:
if flags.IsChild():
rec.onVisit.body += '\n{0} = visitor.Visit(this, {0}.AsSingleEnumerable()).FirstOrDefault();'.format(mName)
else:
rec.onVisit.body += '\n{0} = visitor.Visit(this, {0});'.format(mName)
#==========================================================================================================
def ProcessRecordMember(self, rec, mName, mType, flags):
members = list()
if flags.IsNotPersisted():
return members
mTypeSet = mType if type(mType) == tuple else (mType,)
mType = self.GetOrCreateType(mType, flags)
mType.schemaFlags = flags
field = FieldDef(mName, mType, flags = AccessFlags.Public | MemberFlags.Serialize)
field.schemaFlags = flags
members.append(field)
if flags.IsCollection() and not flags.IsArray():
field.autoInitialize = True
if (flags.IsCompare() or not rec.defFlags.IsCustomCompare()):
if flags.IsCollection():
rec.mEquals.body += '\nif (!{0}.SequenceEqual(other.{0})) return false;'.format(field)
elif str(field.fieldType) in sd.primitiveTypes or str(field.fieldType) in sd.enumTypes:
rec.mEquals.body += '\nif ({0} != other.{0}) return false;'.format(field)
else:
rec.mEquals.body += '\nif (!Object.Equals({0}, other.{0})) return false;'.format(field)
# Being very selective here to prevent reentrancy in GetHashCode.
if str(field.fieldType) in sd.stringRecordTypes:
rec.mHashCode.body += '\nhash = ((hash << 13) - (hash >> 19)) ^ ({0} == null ? 0 : {0}.GetHashCode());'.format(field)
elif str(field.fieldType) == 'string':
rec.mHashCode.body += '\nhash = ((hash << 13) - (hash >> 19)) ^ ({0} == null ? 0 : {0}.GetHashCode());'.format(field)
elif str(field.fieldType) in sd.primitiveTypes or str(field.fieldType) in sd.enumTypes:
rec.mHashCode.body += '\nhash = ((hash << 13) - (hash >> 19)) ^ {0}.GetHashCode();'.format(field)
elif flags.IsArray() and (str(field.fieldType.underlyingType) in sd.primitiveTypes):
rec.mHashCode.body += '''
if ({0} != null)
{{
for (int i = 0; i < {0}.Length; i++)
{{
hash = ((hash << 13) - (hash >> 19)) ^ {0}[i].GetHashCode();
}}
}}'''.format(field)
elif flags.IsList() and flags.IsEnumerateForHashCode():
rec.mHashCode.body += '''
if ({0} != null)
{{
for (int i = 0; i < {0}.Count; i++)
{{
hash = ((hash << 13) - (hash >> 19)) ^ ({0}[i] == null ? 0 : {0}[i].GetHashCode());
}}
}}'''.format(field)
elif not flags.IsCollection():
rec.mHashCode.body += '\nhash = ((hash << 13) - (hash >> 19)) ^ ({0} == null ? 0 : {0}.GetHashCode());'.format(field)
if flags.IsRef() and len(mTypeSet) > 1:
valueName = str(field)
rec.recordEmit.body += '\nDebug.Assert('
if flags.IsSequence():
rec.recordEmit.body += '{}.TrueForAll(handle => '.format(field)
valueName = 'handle'
rec.recordEmit.body += ' ||\n '.join(['{} == null'.format(valueName)] + ['{}.HandleType == HandleType.{}'.format(valueName, ty) for ty in mTypeSet])
if flags.IsSequence():
rec.recordEmit.body += ')'
rec.recordEmit.body += ');'
if flags.IsRef() and flags.IsMap():
# rec.recordEmit.body += '\nwriter.Write({}.Values);'.format(field)
rec.recordEmit.body += '\nwriter.Write({0});'.format(field)
else:
rec.recordEmit.body += '\nwriter.Write({0});'.format(field)
# if mName == "CustomAttributes":
# rec.interfaces += [self.Ty.ICustomAttributeMetadataRecord]
# members.append(MethodDef(
# 'ICustomAttributeMetadataRecord.GetCustomAttributes',
# flags = MemberFlags(0),
# sig = [TypeInst(self.Ty.IListT, self.Ty.CustomAttribute), []],
# body = 'return CustomAttributes;'))
return members
#==========================================================================================================
def CreateRecord(self, name, members):
flags = AccessFlags.Public | TypeFlags.Partial | TypeFlags.Struct
recordDef = sd.recordDefs[name]
rec = ClassDef(name, self.Ty.get(recordDef.baseTypeName or 'MetadataRecord'), flags)
rec.enumType = self.Ty.HandleType
rec.enumValue = str(rec)
rec.handle = self.Ty['{}Handle'.format(rec)]
rec.defFlags = recordDef.flags
self.Ty[name] = rec
rec.ctor = CtorDef(body = '')
rec.members.add(rec.ctor)
rec.members.add(PropertyDef(
'HandleType',
self.Ty.HandleType,
flags = AccessFlags.Public | MemberFlags.Override,
getter = PropertyGetter(
body = 'return HandleType.{0};'.format(str(rec)))))
rec.onVisit = rec.members.add(MethodDef(
'Visit',
flags = AccessFlags.Internal | MemberFlags.Override,
sig = [self.Ty.void, [(self.Ty.IRecordVisitor, 'visitor')]],
body = ''))
rec.mEquals = rec.members.add(MethodDef(
'Equals',
flags = AccessFlags.Public | MemberFlags.Override | MemberFlags.Sealed,
sig = [self.Ty.bool, [(self.Ty.Object, 'obj')]],
body = '''
if (Object.ReferenceEquals(this, obj)) return true;
var other = obj as {0};
if (other == null) return false;'''.format(rec)))
if rec.defFlags.IsReentrantEquals():
rec.members.add(FieldDef(
'_equalsReentrancyGuard',
fieldType = self.Ty[TypeInst(self.Ty.ThreadLocal, self.Ty['ReentrancyGuardStack'])],
flags = AccessFlags.Private,
autoInitialize = False))
rec.mEquals.body += '''
if (_equalsReentrancyGuard.Value.Contains(other))
return true;
_equalsReentrancyGuard.Value.Push(other);
try
{'''
rec.ctor.body += '_equalsReentrancyGuard = new ThreadLocal<ReentrancyGuardStack>(() => new ReentrancyGuardStack());'
rec.mHashCode = rec.members.add(MethodDef(
'GetHashCode',
flags = AccessFlags.Public | MemberFlags.Override | MemberFlags.Sealed,
sig = [self.Ty.int, []],
body = '''
if (_hash != 0)
return _hash;
EnterGetHashCode();
int hash = {};'''.format(hash(str(rec)))))
rec.recordEmit = MethodDef(
'Save',
flags = AccessFlags.Internal | MemberFlags.Override,
sig = [self.Ty.void, [(self.Ty.NativeWriter, 'writer')]],
body = '''
''')
rec.members.add(rec.recordEmit)
rec.members.add(MethodDef(
'AsHandle'.format(rec),
sig = [rec.handle, [(rec, 'record')]],
flags = AccessFlags.Internal | MemberFlags.Static,
body = '''
if (record == null)
{{
return new {0}(0);
}}
else
{{
return record.Handle;
}}
'''.format(rec.handle, rec.enumType, rec.enumValue)))
# String records with a null Value property are translated to null handle values so that
# we can tell the difference between the empty and null string values.
if str(rec) in sd.stringRecordTypes:
rec.recordEmit.body = 'if (Value == null)\n return;\n' + rec.recordEmit.body
rec.members.add(PropertyDef(
'Handle'.format(rec),
'{}Handle'.format(rec),
flags = AccessFlags.Internal | MemberFlags.New,
getter = PropertyGetter(body = '''
if (Value == null)
return new {0}Handle(0);
else
return new {0}Handle(HandleOffset);
'''.format(rec, rec.enumType, rec.enumValue))))
else:
rec.members.add(PropertyDef(
'Handle'.format(rec),
'{}Handle'.format(rec),
flags = AccessFlags.Internal | MemberFlags.New,
getter = PropertyGetter(body = '''
return new {0}Handle(HandleOffset);
'''.format(rec, rec.enumType, rec.enumValue))))
for m in members:
rec.members += self.ProcessRecordMember(rec, *m)
for m in sorted(members, lambda (m1,t1,f1),(m2,t2,f2): cmp(not f1.IsChild(), not f2.IsChild())):
self.ProcessRecordMemberForVisitor(rec, *m)
if rec.defFlags.IsReentrantEquals():
rec.mEquals.body += '''
}
finally
{
var popped = _equalsReentrancyGuard.Value.Pop();
Debug.Assert(Object.ReferenceEquals(other, popped));
}'''
rec.mEquals.body += '\nreturn true;'
rec.mHashCode.body += '\nLeaveGetHashCode();\n_hash = hash;\nreturn _hash;'
return rec
#==========================================================================================================
def CreateWriterMembers(self, reader, recs):
members = list()
# for rec in recs:
# field = FieldDef(
# CsMakePrivateName(Plural(str(rec))),
# self.Ty[TypeInst(self.Ty.List, rec)],
# flags = AccessFlags.Internal,
# autoInitialize = True)
# members.append(field)
# members.append(MethodDef(
# 'AddRecord',
# flags = AccessFlags.Internal,
# sig = [self.Ty.void, [(rec, 'record')]],
# body = '{0}.Add(record);'.format(field)))
return members
#==========================================================================================================
def CreateWriter(self, name, recs):
writer = ClassDef(name, flags = AccessFlags.Public | TypeFlags.Partial)
writer.members += self.CreateWriterMembers(writer, recs)
return writer
#==========================================================================================================
def CreateBinaryWriterExtensionMethod(self, tDecl, tReal = None, body = None, argName = 'value'):
tReal = tReal or tDecl
body = body or 'writer.Write(({}){});'.format(tReal, argName)
return MethodDef(
'Write',
sig = [self.Ty.void, [(self.Ty.MdBinaryWriter, 'writer'), (tDecl, argName)]],
flags = AccessFlags.Internal | MemberFlags.Extension,
body = body)
#==========================================================================================================
def CreateDictionaryExtensionClass(self, recs):
cl = ClassDef(
'DictionaryExtensions',
flags = AccessFlags.Public | TypeFlags.Static | TypeFlags.Partial)
for rec in recs.itervalues():
if len(filter(lambda field: isinstance(field, FieldDef) and str(field) == 'Name', rec.members)) > 0:
cl.members.add(MethodDef(
'Add',
flags = AccessFlags.Public | MemberFlags.Extension,
sig = [self.Ty.void, [(TypeInst(self.Ty.Dictionary, self.Ty.string, rec), 'dict'), (rec, 'record')]],
body = 'dict.Add(record.Name.Value ?? "<null>", record);'))
return cl
#==========================================================================================================
# def CreateVisitorInterface(self):
# itf = InterfaceDef(
# 'IRecordVisitor',
# flags = AccessFlags.Internal | TypeFlags.Partial)
# for item in itertools.chain(
# self.records.itervalues(),
# map(lambda k: self.Ty[k], sd.primitiveTypes.iterkeys()),
# map(lambda e: self.Ty[e], sd.enumTypes.iterkeys())):
# itf.members.add(MethodDef(
# 'Visit',
# sig = [self.Ty.void, [(item, 'item')]]))
# return itf
#==========================================================================================================
# def CreateVisitorEnumerate(self):
# cls = ClassDef(
# 'IRecordVisitor',
# flags = AccessFlags.Internal | TypeFlags.Partial)
# for item in itertools.chain(
# self.records.itervalues(),
# map(lambda k: self.Ty[k], sd.primitiveTypes.iterkeys()),
# map(lambda e: self.Ty[e], sd.enumTypes.iterkeys())):
# cls.members.add(MethodDef(
# 'Visit',
# sig = [self.Ty.void, [(item, 'item')]]))
# return cls
#==========================================================================================================
def CsEmitSource(self):
ns = NamespaceDef('Internal.Metadata.NativeFormat.Writer')
ns.members += self.records.values()
writer = self.CreateWriter('MetadataWriter', self.records.values())
ns.members.add(writer)
# ns.members.add(self.CreateDictionaryExtensionClass(self.records))
# ns.members.add(self.CreateVisitorInterface())
# for hnd in filter(lambda h: getattr(h, 'record', None), hnds.values()):
# ns.members.add(CreateHandleEnumerator(reader, hnd))
# ns.members.add(CreateHandleEnumerable(reader, hnd))
# Source NativeFormatReaderGen.cs
with open(r'..\..\..\..\..\ILCompiler.MetadataWriter\src\Internal\Metadata\NativeFormat\Writer\NativeFormatWriterGen.cs', 'w') as output :
iprint = IPrint(output)
CsEmitFileHeader(iprint)
iprint('#pragma warning disable 649')
iprint()
iprint('using System;')
iprint('using System.Linq;')
iprint('using System.IO;')
iprint('using System.Collections.Generic;')
iprint('using System.Reflection;')
iprint('using System.Threading;')
iprint('using Internal.Metadata.NativeFormat.Writer;')
iprint('using Internal.NativeFormat;')
iprint('using HandleType = Internal.Metadata.NativeFormat.HandleType;')
iprint('using Debug = System.Diagnostics.Debug;')
iprint()
ns.CsDefine(iprint)
#==========================================================================================================
if __name__ == '__main__':
WriterGen().CsEmitSource()
|
|
"""Test the module cluster centroids."""
from __future__ import print_function
import numpy as np
from numpy.testing import assert_raises
from numpy.testing import assert_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_warns
from sklearn.datasets import make_classification
from sklearn.utils.estimator_checks import check_estimator
from collections import Counter
from imblearn.under_sampling import ClusterCentroids
# Generate a global dataset to use
RND_SEED = 0
# Data generated for the toy example
X = np.array([[0.04352327, -0.20515826],
[0.92923648, 0.76103773],
[0.20792588, 1.49407907],
[0.47104475, 0.44386323],
[0.22950086, 0.33367433],
[0.15490546, 0.3130677],
[0.09125309, -0.85409574],
[0.12372842, 0.6536186],
[0.13347175, 0.12167502],
[0.094035, -2.55298982]])
Y = np.array([1, 0, 1, 0, 1, 1, 1, 1, 0, 1])
def test_cc_sk_estimator():
"""Test the sklearn estimator compatibility"""
check_estimator(ClusterCentroids)
def test_cc_bad_ratio():
"""Test either if an error is raised with a wrong decimal value for
the ratio"""
# Define a negative ratio
ratio = -1.0
cc = ClusterCentroids(ratio=ratio, random_state=RND_SEED)
assert_raises(ValueError, cc.fit, X, Y)
# Define a ratio greater than 1
ratio = 100.0
cc = ClusterCentroids(ratio=ratio, random_state=RND_SEED)
assert_raises(ValueError, cc.fit, X, Y)
# Define ratio as an unknown string
ratio = 'rnd'
cc = ClusterCentroids(ratio=ratio, random_state=RND_SEED)
assert_raises(ValueError, cc.fit, X, Y)
# Define ratio as a list which is not supported
ratio = [.5, .5]
cc = ClusterCentroids(ratio=ratio, random_state=RND_SEED)
assert_raises(ValueError, cc.fit, X, Y)
def test_init():
"""Test the initialisation of the object"""
# Define a ratio
ratio = 1.
cc = ClusterCentroids(ratio=ratio, random_state=RND_SEED)
assert_equal(cc.ratio, ratio)
def test_cc_fit_single_class():
"""Test either if an error when there is a single class"""
# Define the parameter for the under-sampling
ratio = 'auto'
# Create the object
cc = ClusterCentroids(ratio=ratio, random_state=RND_SEED)
# Resample the data
# Create a wrong y
y_single_class = np.zeros((X.shape[0], ))
assert_warns(RuntimeWarning, cc.fit, X, y_single_class)
def test_cc_fit_invalid_ratio():
"""Test either if an error is raised when the balancing ratio to fit is
smaller than the one of the data"""
# Create the object
ratio = 1. / 10000.
cc = ClusterCentroids(ratio=ratio, random_state=RND_SEED)
# Fit the data
assert_raises(RuntimeError, cc.fit, X, Y)
def test_cc_fit():
"""Test the fitting method"""
# Define the parameter for the under-sampling
ratio = 'auto'
# Create the object
cc = ClusterCentroids(ratio=ratio, random_state=RND_SEED)
# Fit the data
cc.fit(X, Y)
# Check if the data information have been computed
assert_equal(cc.min_c_, 0)
assert_equal(cc.maj_c_, 1)
assert_equal(cc.stats_c_[0], 3)
assert_equal(cc.stats_c_[1], 7)
def test_sample_wrong_X():
"""Test either if an error is raised when X is different at fitting
and sampling"""
# Define the parameter for the under-sampling
ratio = 'auto'
# Create the object
cc = ClusterCentroids(ratio=ratio, random_state=RND_SEED)
cc.fit(X, Y)
assert_raises(RuntimeError, cc.sample, np.random.random((100, 40)),
np.array([0] * 50 + [1] * 50))
def test_sample_wt_fit():
"""Test either if an error is raised when sample is called before
fitting"""
# Define the parameter for the under-sampling
ratio = 'auto'
# Create the object
cc = ClusterCentroids(ratio=ratio, random_state=RND_SEED)
assert_raises(RuntimeError, cc.sample, X, Y)
def test_fit_sample_auto():
"""Test fit and sample routines with auto ratio"""
# Define the parameter for the under-sampling
ratio = 'auto'
# Create the object
cc = ClusterCentroids(ratio=ratio, random_state=RND_SEED)
# Fit and sample
X_resampled, y_resampled = cc.fit_sample(X, Y)
X_gt = np.array([[0.92923648, 0.76103773],
[0.47104475, 0.44386323],
[0.13347175, 0.12167502],
[0.06738818, -0.529627],
[0.17901516, 0.69860992],
[0.094035, -2.55298982]])
y_gt = np.array([0, 0, 0, 1, 1, 1])
assert_array_almost_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
def test_fit_sample_half():
"""Test fit and sample routines with ratio of .5"""
# Define the parameter for the under-sampling
ratio = .5
# Create the object
cc = ClusterCentroids(ratio=ratio, random_state=RND_SEED)
# Fit and sample
X_resampled, y_resampled = cc.fit_sample(X, Y)
X_gt = np.array([[0.92923648, 0.76103773],
[0.47104475, 0.44386323],
[0.13347175, 0.12167502],
[0.09125309, -0.85409574],
[0.19220316, 0.32337101],
[0.094035, -2.55298982],
[0.20792588, 1.49407907],
[0.04352327, -0.20515826],
[0.12372842, 0.6536186]])
y_gt = np.array([0, 0, 0, 1, 1, 1, 1, 1, 1])
assert_array_almost_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
def test_sample_wrong_X():
"""Test either if an error is raised when X is different at fitting
and sampling"""
# Create the object
cc = ClusterCentroids(random_state=RND_SEED)
cc.fit(X, Y)
assert_raises(RuntimeError, cc.sample, np.random.random((100, 40)),
np.array([0] * 50 + [1] * 50))
def test_continuous_error():
"""Test either if an error is raised when the target are continuous
type"""
# continuous case
y = np.linspace(0, 1, 10)
cc = ClusterCentroids(random_state=RND_SEED)
assert_warns(UserWarning, cc.fit, X, y)
def test_multiclass_fit_sample():
"""Test fit sample method with multiclass target"""
# Make y to be multiclass
y = Y.copy()
y[5] = 2
y[6] = 2
# Resample the data
cc = ClusterCentroids(random_state=RND_SEED)
X_resampled, y_resampled = cc.fit_sample(X, y)
# Check the size of y
count_y_res = Counter(y_resampled)
assert_equal(count_y_res[0], 2)
assert_equal(count_y_res[1], 2)
assert_equal(count_y_res[2], 2)
|
|
# Copyright 2008-2011 Nokia Networks
# Copyright 2011-2016 Ryan Tomac, Ed Manlove and contributors
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from typing import Union
from robot.api import logger
from robot.utils import NormalizedDict
from selenium.webdriver.remote.webelement import WebElement
from selenium.webdriver.support.event_firing_webdriver import EventFiringWebElement
from selenium.webdriver.common.by import By
from SeleniumLibrary.base import ContextAware
from SeleniumLibrary.errors import ElementNotFound
from SeleniumLibrary.utils import escape_xpath_value, events, is_falsy
from .customlocator import CustomLocator
class ElementFinder(ContextAware):
def __init__(self, ctx):
ContextAware.__init__(self, ctx)
strategies = {
"identifier": self._find_by_identifier,
"id": self._find_by_id,
"name": self._find_by_name,
"xpath": self._find_by_xpath,
"dom": self._find_by_dom,
"link": self._find_by_link_text,
"partial link": self._find_by_partial_link_text,
"css": self._find_by_css_selector,
"class": self._find_by_class_name,
"jquery": self._find_by_jquery_selector,
"sizzle": self._find_by_jquery_selector,
"tag": self._find_by_tag_name,
"scLocator": self._find_by_sc_locator,
"data": self._find_by_data_locator,
"default": self._find_by_default,
}
self._strategies = NormalizedDict(
initial=strategies, caseless=True, spaceless=True
)
self._default_strategies = list(strategies)
self._key_attrs = {
None: ["@id", "@name"],
"a": [
"@id",
"@name",
"@href",
"normalize-space(descendant-or-self::text())",
],
"img": ["@id", "@name", "@src", "@alt"],
"input": ["@id", "@name", "@value", "@src"],
"button": [
"@id",
"@name",
"@value",
"normalize-space(descendant-or-self::text())",
],
}
self._split_re = re.compile(
r" >> (?=identifier ?[:|=]|id ?[:|=]|name ?[:|=]|xpath ?[:|=]|dom ?[:|=]|link ?[:|=]|partial link ?[:|=]"
r"|css ?[:|=]|class ?[:|=]|jquery ?[:|=]|sizzle ?[:|=]|tag ?[:|=]|scLocator ?[:|=])",
re.IGNORECASE,
)
def find(
self,
locator: Union[str, list],
tag=None,
first_only=True,
required=True,
parent=None,
):
element = parent
locators = self._split_locator(locator)
for split_locator in locators[:-1]:
element = self._find(
split_locator, first_only=True, required=True, parent=element
)
return self._find(locators[-1], tag, first_only, required, element)
def _split_locator(self, locator: Union[str, list]) -> list:
if isinstance(locator, list):
return locator
if not isinstance(locator, str):
return [locator]
match = self._split_re.search(locator)
if not match:
return [locator]
parts = []
while match:
span = match.span()
parts.append(locator[: span[0]])
locator = locator[span[1] :]
match = self._split_re.search(locator)
parts.append(locator)
return parts
def _find(self, locator, tag=None, first_only=True, required=True, parent=None):
element_type = "Element" if not tag else tag.capitalize()
if parent and not self._is_webelement(parent):
raise ValueError(
f"Parent must be Selenium WebElement but it was {type(parent)}."
)
if self._is_webelement(locator):
return locator
prefix, criteria = self._parse_locator(locator)
strategy = self._strategies[prefix]
tag, constraints = self._get_tag_and_constraints(tag)
elements = strategy(criteria, tag, constraints, parent=parent or self.driver)
if required and not elements:
raise ElementNotFound(f"{element_type} with locator '{locator}' not found.")
if first_only:
if not elements:
return None
return elements[0]
return elements
def register(self, strategy_name, strategy_keyword, persist=False):
strategy = CustomLocator(self.ctx, strategy_name, strategy_keyword)
if strategy.name in self._strategies:
raise RuntimeError(
f"The custom locator '{strategy.name}' cannot be registered. "
"A locator of that name already exists."
)
self._strategies[strategy.name] = strategy.find
if is_falsy(persist):
# Unregister after current scope ends
events.on("scope_end", "current", self.unregister, strategy.name)
def unregister(self, strategy_name):
if strategy_name in self._default_strategies:
raise RuntimeError(
f"Cannot unregister the default strategy '{strategy_name}'."
)
if strategy_name not in self._strategies:
raise RuntimeError(
f"Cannot unregister the non-registered strategy '{strategy_name}'."
)
del self._strategies[strategy_name]
def _is_webelement(self, element):
# Hook for unit tests
return isinstance(element, (WebElement, EventFiringWebElement))
def _disallow_webelement_parent(self, element):
if self._is_webelement(element):
raise ValueError("This method does not allow WebElement as parent")
def _find_by_identifier(self, criteria, tag, constraints, parent):
elements = self._normalize(
parent.find_elements(By.ID, criteria)
) + self._normalize(parent.find_elements(By.NAME, criteria))
return self._filter_elements(elements, tag, constraints)
def _find_by_id(self, criteria, tag, constraints, parent):
return self._filter_elements(
parent.find_elements(By.ID, criteria), tag, constraints
)
def _find_by_name(self, criteria, tag, constraints, parent):
return self._filter_elements(
parent.find_elements(By.NAME, criteria), tag, constraints
)
def _find_by_xpath(self, criteria, tag, constraints, parent):
return self._filter_elements(
parent.find_elements(By.XPATH, criteria), tag, constraints
)
def _find_by_dom(self, criteria, tag, constraints, parent):
self._disallow_webelement_parent(parent)
result = self.driver.execute_script(f"return {criteria};")
if result is None:
return []
if not isinstance(result, list):
result = [result]
return self._filter_elements(result, tag, constraints)
def _find_by_jquery_selector(self, criteria, tag, constraints, parent):
self._disallow_webelement_parent(parent)
criteria = criteria.replace("'", "\\'")
js = f"return jQuery('{criteria}').get();"
return self._filter_elements(self.driver.execute_script(js), tag, constraints)
def _find_by_link_text(self, criteria, tag, constraints, parent):
return self._filter_elements(
parent.find_elements(By.LINK_TEXT, criteria), tag, constraints
)
def _find_by_partial_link_text(self, criteria, tag, constraints, parent):
return self._filter_elements(
parent.find_elements(By.PARTIAL_LINK_TEXT, criteria), tag, constraints
)
def _find_by_css_selector(self, criteria, tag, constraints, parent):
return self._filter_elements(
parent.find_elements(By.CSS_SELECTOR, criteria), tag, constraints
)
def _find_by_class_name(self, criteria, tag, constraints, parent):
return self._filter_elements(
parent.find_elements(By.CLASS_NAME, criteria), tag, constraints
)
def _find_by_tag_name(self, criteria, tag, constraints, parent):
return self._filter_elements(
parent.find_elements(By.TAG_NAME, criteria), tag, constraints
)
def _find_by_data_locator(self, criteria, tag, constraints, parent):
try:
name, value = criteria.split(":", 2)
if "" in [name, value]:
raise ValueError
except ValueError:
raise ValueError(
f"Provided selector ({criteria}) is malformed. Correct format: name:value."
)
local_criteria = f'//*[@data-{name}="{value}"]'
return self._find_by_xpath(local_criteria, tag, constraints, parent)
def _find_by_sc_locator(self, criteria, tag, constraints, parent):
self._disallow_webelement_parent(parent)
criteria = criteria.replace("'", "\\'")
js = f"return isc.AutoTest.getElement('{criteria}')"
return self._filter_elements([self.driver.execute_script(js)], tag, constraints)
def _find_by_default(self, criteria, tag, constraints, parent):
if tag in self._key_attrs:
key_attrs = self._key_attrs[tag]
else:
key_attrs = self._key_attrs[None]
xpath_criteria = escape_xpath_value(criteria)
xpath_tag = tag if tag is not None else "*"
xpath_constraints = self._get_xpath_constraints(constraints)
xpath_searchers = [f"{attr}={xpath_criteria}" for attr in key_attrs]
xpath_searchers.extend(self._get_attrs_with_url(key_attrs, criteria))
xpath = (
f"//{xpath_tag}[{' and '.join(xpath_constraints)}"
f"{' and ' if xpath_constraints else ''}({' or '.join(xpath_searchers)})]"
)
return self._normalize(parent.find_elements(By.XPATH, xpath))
def _get_xpath_constraints(self, constraints):
xpath_constraints = [
self._get_xpath_constraint(name, value)
for name, value in constraints.items()
]
return xpath_constraints
def _get_xpath_constraint(self, name, value):
if isinstance(value, list):
value = "' or . = '".join(value)
return f"@{name}[. = '{value}']"
else:
return f"@{name}='{value}'"
def _get_tag_and_constraints(self, tag):
if tag is None:
return None, {}
tag = tag.lower()
constraints = {}
if tag == "link":
tag = "a"
if tag == "partial link":
tag = "a"
elif tag == "image":
tag = "img"
elif tag == "list":
tag = "select"
elif tag == "radio button":
tag = "input"
constraints["type"] = "radio"
elif tag == "checkbox":
tag = "input"
constraints["type"] = "checkbox"
elif tag == "text field":
tag = "input"
constraints["type"] = [
"date",
"datetime-local",
"email",
"month",
"number",
"password",
"search",
"tel",
"text",
"time",
"url",
"week",
"file",
]
elif tag == "file upload":
tag = "input"
constraints["type"] = "file"
elif tag == "text area":
tag = "textarea"
return tag, constraints
def _parse_locator(self, locator):
if re.match(r"\(*//", locator):
return "xpath", locator
index = self._get_locator_separator_index(locator)
if index != -1:
prefix = locator[:index].strip()
if prefix in self._strategies:
return prefix, locator[index + 1 :].lstrip()
return "default", locator
def _get_locator_separator_index(self, locator):
if "=" not in locator:
return locator.find(":")
if ":" not in locator:
return locator.find("=")
return min(locator.find("="), locator.find(":"))
def _element_matches(self, element, tag, constraints):
if not element.tag_name.lower() == tag:
return False
for name in constraints:
if isinstance(constraints[name], list):
if element.get_attribute(name) not in constraints[name]:
return False
elif element.get_attribute(name) != constraints[name]:
return False
return True
def _filter_elements(self, elements, tag, constraints):
elements = self._normalize(elements)
if tag is None:
return elements
return [
element
for element in elements
if self._element_matches(element, tag, constraints)
]
def _get_attrs_with_url(self, key_attrs, criteria):
attrs = []
url = None
xpath_url = None
for attr in ["@src", "@href"]:
if attr in key_attrs:
if url is None or xpath_url is None:
url = self._get_base_url() + "/" + criteria
xpath_url = escape_xpath_value(url)
attrs.append(f"{attr}={xpath_url}")
return attrs
def _get_base_url(self):
url = self.driver.current_url
if "/" in url:
url = "/".join(url.split("/")[:-1])
return url
def _normalize(self, elements):
# Apparently IEDriver has returned invalid data earlier and recently
# ChromeDriver has done sometimes returned None:
# https://github.com/SeleniumHQ/selenium/issues/4555
if not isinstance(elements, list):
logger.debug(f"WebDriver find returned {elements}")
return []
return elements
|
|
"""
An extension of the TransportCoefficients module for two-phase flow in porous media
.. inheritance-diagram:: proteus.TwophaseDarcyCoefficients
:parts: 1
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from builtins import zip
from builtins import range
from past.utils import old_div
from math import *
from .TransportCoefficients import TC_base
import numpy
from .Profiling import logEvent
#base class for setting up fluid and material properties
class TwophaseDarcyFlow_base(TC_base):
def __init__(self,
g=9.8,
rhon=1.0,
rhow=0.0,
mun = 1.0,
muw = 1.0,
Ksw=1.0,
psk_model='VGM',
vg_alpha = 5.0,
vg_m = 0.75,
bc_pd = old_div(1.0,5.47),
bc_lambda = 0.5,
omega = 1.0,
Sw_max = 1.0,
Sw_min = 0.0,
density_w_model = 'Constant',
density_n_model = 'Constant'):
#set psk model and parameters
self.psk_model = psk_model
self.psk_types={'simp':0,
'VGM':1,
'VGB':2,
'BCM':3,
'BCB':4}
assert(self.psk_model in list(self.psk_types.keys()))
self.vg_alpha=vg_alpha
self.vg_m = vg_m
self.bc_pd = bc_pd
self.bc_lambda=bc_lambda
#normalize gravity
self.g = numpy.array(g,dtype='d')
gMag= sqrt(numpy.dot(self.g,self.g))
if gMag <= 0.0:
gMag = 1.0
self.g /= gMag
#set media properties
self.hasMaterialTypes=False
self.setParams=None
self.Ksw=Ksw
self.omega=omega
self.Sw_min = Sw_min
self.Sw_max = Sw_max
#set fluid properies
self.b = old_div(rhon,rhow) #normalize density
self.rhon=old_div(rhon,rhon)
self.rhow=old_div(rhow,rhow)
self.muw=old_div(muw,muw)
self.mun=old_div(mun,muw)
#setup rwork arrays for homogeneous media, make heterogeneity later
if self.psk_model == 'simp':
self.len_rwork_psk=2
self.rwork_psk = numpy.array([self.Sw_min,self.Sw_max],dtype='d')
elif self.psk_model in ['VGM','VGB']:
self.len_rwork_psk=4
self.rwork_psk = numpy.array([self.Sw_min,self.Sw_max,self.vg_alpha,self.vg_m],dtype='d')
elif self.psk_model in ['BCM','BCB']:
self.len_rwork_psk=4
self.rwork_psk = numpy.array([self.Sw_min,self.Sw_max,bc_pd,bc_lambda],dtype='d')
#density eos options
self.density_w_model = density_w_model
self.density_n_model = density_n_model
self.density_types={'Constant':0,
'Exponential':1,
'IdealGas':2}
assert self.density_w_model in self.density_types
assert self.density_n_model in self.density_types
#default is Constant density, put in more general init later
self.rwork_density_w=numpy.array([self.rhow],dtype='d')
self.rwork_density_n=numpy.array([self.rhon],dtype='d')
#self.sd = sd
def setMaterialTypes(self,
Ksw_types=[1.0],
omega_types = [0.4],
Sw_max_types = [1.0],
Sw_min_types = [0.0],
bc_lambda_types = None,
bc_pd_types = None,
vg_alpha_types = None,
vg_m_types = None):
self.nTypesAvailable=len(Ksw_types)
self.hasMaterialTypes=True
self.Ksw_types = Ksw_types
self.omega_types= omega_types
self.setParams=None
if self.psk_model == 'simp':
self.rwork_psk = numpy.zeros((self.nTypesAvailable,2),'d')
for Sw_min,Sw_max,i in zip(Sw_min_types,Sw_max_types,list(range(self.nTypesAvailable))):
self.rwork_psk[i,0] = Sw_min
self.rwork_psk[i,1] = Sw_max
elif self.psk_model in ['VGM','VGB']:
assert(vg_alpha_types is not None and vg_m_types is not None)
self.rwork_psk = numpy.zeros((self.nTypesAvailable,4),'d')
for Sw_min,Sw_max,vg_alpha,vg_m,i in zip(Sw_min_types,Sw_max_types,vg_alpha_types,vg_m_types,list(range(self.nTypesAvailable))):
self.rwork_psk[i,0] = Sw_min
self.rwork_psk[i,1] = Sw_max
self.rwork_psk[i,2] = vg_alpha
self.rwork_psk[i,3] = vg_m
elif self.psk_model in ['BCM','BCB']:
assert(bc_lambda_types is not None and bc_lambda_types is not None)
self.rwork_psk = numpy.zeros((self.nTypesAvailable,4),'d')
for Sw_min,Sw_max,bc_pd,bc_lambda,i in zip(Sw_min_types,Sw_max_types,bc_pd_types,bc_lambda_types,list(range(self.nTypesAvailable))):
self.rwork_psk[i,0] = Sw_min
self.rwork_psk[i,1] = Sw_max
self.rwork_psk[i,2] = bc_pd
self.rwork_psk[i,3] = bc_lambda
def setMaterialFunction(self,setParams):
self.hasMaterialTypes=False
self.setParams=setParams
def initializeMesh(self,mesh):
if self.hasMaterialTypes:
self.elementMaterialTypes = mesh.elementMaterialTypes
#want element boundary material types for evaluating heterogeneity
#not boundary conditions
self.exteriorElementBoundaryTypes = numpy.zeros((mesh.nExteriorElementBoundaries_global),'i')
for ebNE in range(mesh.nExteriorElementBoundaries_global):
ebN = mesh.exteriorElementBoundariesArray[ebNE]
eN = mesh.elementBoundaryElementsArray[ebN,0]
self.exteriorElementBoundaryTypes[ebNE] = self.elementMaterialTypes[eN]
def initializeElementQuadrature(self,t,cq):
#mwf not sure if this is ok
cq['psi_n'] = numpy.zeros(cq[('u',0)].shape,'d')
cq[('dpsi_n',0)] = numpy.zeros(cq[('u',0)].shape,'d')
cq[('dpsi_n',1)] = numpy.zeros(cq[('u',0)].shape,'d')
if self.hasMaterialTypes:
self.materialTypes_q = self.elementMaterialTypes
self.q_shape = cq[('u',0)].shape
elif self.setParams is not None:
self.rwork_psk_q = numpy.zeros(cq[('u',0)].shape+(self.len_rwork_psk,),'d')
self.Ks_q = numpy.zeros(cq[('u',0)].shape,'d')
self.omega_q = numpy.zeros(cq[('u',0)].shape,'d')
self.setParamsFunc(cq['x'],
self.rwork_psk_q,
self.Ks_q,
self.omega_q)
def initializeElementBoundaryQuadrature(self,t,cebq,cebq_global):
#mwf not sure if this is ok
if ('u',0) in cebq:
cebq['psi_n'] = numpy.zeros(cebq[('u',0)].shape,'d')
cebq[('dpsi_n',0)] = numpy.zeros(cebq[('u',0)].shape,'d')
cebq[('dpsi_n',1)] = numpy.zeros(cebq[('u',0)].shape,'d')
if ('u',0) in cebq_global:
cebq_global['psi_n'] = numpy.zeros(cebq_global[('u',0)].shape,'d')
cebq_global[('dpsi_n',0)] = numpy.zeros(cebq_global[('u',0)].shape,'d')
cebq_global[('dpsi_n',1)] = numpy.zeros(cebq_global[('u',0)].shape,'d')
if self.hasMaterialTypes:
self.materialTypes_ebq = numpy.zeros(cebq[('u',0)].shape[0:2],'i')
self.ebq_shape = cebq[('u',0)].shape
for eN in range(self.elementMaterialTypes.shape[0]):
self.materialTypes_ebq[eN,:] = self.elementMaterialTypes[eN]
elif self.setParams is not None:
self.rwork_psk_ebq = numpy.zeros(cebq[('u',0)].shape+(self.len_rwork_psk,),'d')
self.Ks_ebq = numpy.zeros(cebq[('u',0)].shape,'d')
self.omega_ebq = numpy.zeros(cebq[('u',0)].shape,'d')
self.setParamsFunc(cebq['x'],
self.rwork_psk_ebq,
self.Ks_ebq,
self.omega_ebq)
def initializeGlobalExteriorElementBoundaryQuadrature(self,t,cebqe):
cebqe['psi_n'] = numpy.zeros(cebqe[('u',0)].shape,'d')
cebqe[('dpsi_n',0)] = numpy.zeros(cebqe[('u',0)].shape,'d')
cebqe[('dpsi_n',1)] = numpy.zeros(cebqe[('u',0)].shape,'d')
if self.hasMaterialTypes:
self.materialTypes_ebqe = numpy.zeros(cebqe[('u',0)].shape[0],'i')
self.ebqe_shape = cebqe[('u',0)].shape
for ebNE in range(self.exteriorElementBoundaryTypes.shape[0]):
self.materialTypes_ebqe[ebNE] = self.exteriorElementBoundaryTypes[ebNE]
elif self.setParams is not None:
self.rwork_psk_ebqe = numpy.zeros(cebqe[('u',0)].shape+(self.len_rwork_psk,),'d')
self.Ks_ebqe = numpy.zeros(cebqe[('u',0)].shape,'d')
self.omega_ebqe = numpy.zeros(cebqe[('u',0)].shape,'d')
self.setParamsFunc(cebqe['x'],
self.rwork_psk_ebqe,
self.Ks_ebqe,
self.omega_ebqe)
def initializeGeneralizedInterpolationPointQuadrature(self,t,cip):
cip['psi_n'] = numpy.zeros(cip[('u',0)].shape,'d')
cip[('dpsi_n',0)] = numpy.zeros(cip[('u',0)].shape,'d')
cip[('dpsi_n',1)] = numpy.zeros(cip[('u',0)].shape,'d')
if self.hasMaterialTypes:
self.ip_shape = cip[('u',0)].shape
#should be element based so can use elementMaterialTypes
self.materialTypes_ip = self.elementMaterialTypes
elif self.setParams is not None:
self.rwork_psk_ip = numpy.zeros(cip[('u',0)].shape+(self.len_rwork_psk,),'d')
self.Ks_ip = numpy.zeros(cip[('u',0)].shape,'d')
self.omega_ip = numpy.zeros(cip[('u',0)].shape,'d')
self.setParamsFunc(cip['x'],
self.rwork_psk_ip,
self.Ks_ip,
self.omega_ip)
#primitive fully coupled formulation
class TwophaseDarcy_fc(TwophaseDarcyFlow_base):
from proteus.cTwophaseDarcyCoefficients import twophaseDarcy_fc_sd_het_matType
def __init__(self,
g=9.8,
rhon=1.0,
rhow=0.0,
mun = 1.0,
muw = 1.0,
Ksw=1.0,
psk_model='VGM',
vg_alpha = 5.0,
vg_m = 0.75,
bc_pd = old_div(1.0,5.47),
bc_lambda = 0.5,
omega = 1.0,
Sw_max = 1.0,
Sw_min = 0.0,
#mwf come up with a more succinct way of handling density variation
density_w_parameters = None, #{'model',...}
density_n_parameters = None,
diagonalHet = False,
sparseDiffusionTensors={},
sd = True):
self.nc=2
variableNames=['s_w','psi_w']
#just assume mass is a function of psi w now?
mass = {0:{0:'linear',1:'nonlinear'},
1:{0:'nonlinear',1:'nonlinear'}}
advection = {0:{0:'nonlinear'},
1:{0:'nonlinear'}}
hamiltonian={}
potential = {0:{1:'nonlinear',
0:'nonlinear'},
1:{0:'nonlinear',
1:'nonlinear'}}
diffusion = {0:{0:{0:'nonlinear',
1:'nonlinear'}},
1:{1:{0:'nonlinear',
1:'nonlinear'}}}
reaction = {0:{0:'linear'},
1:{1:'linear'}}
#now account for density parameterization
self.density_w_parameters = density_w_parameters
self.density_n_parameters = density_n_parameters
density_w_model = 'Constant'
density_n_model = 'Constant'
if self.density_w_parameters is not None:
density_w_model = self.density_w_parameters['model']
if self.density_n_parameters is not None:
density_n_model = self.density_n_parameters['model']
#for handling sparse diffusion options
assert not diagonalHet
self.nd = len(g) #need to check
assert len(sparseDiffusionTensors) > 0
TC_base.__init__(self,
self.nc,
mass,
advection,
diffusion,
potential,
reaction,
hamiltonian,
variableNames,
sparseDiffusionTensors = sparseDiffusionTensors,
useSparseDiffusion = sd)
TwophaseDarcyFlow_base.__init__(self,
g,
rhon,
rhow,
mun,
muw,
Ksw,
psk_model,
vg_alpha,
vg_m,
bc_pd,
bc_lambda,
omega,
Sw_max,
Sw_min,
density_w_model,
density_n_model)
#set up density relationships
for params,rwork in zip([self.density_w_parameters,self.density_n_parameters],
['rwork_density_w','rwork_density_n']):
if params is not None:
if params['model'] == 'Exponential':
setattr(self,rwork,numpy.array([old_div(params['rho_0'],params['rho_0']),#normalize by phase density
params['psi_0'],
params['beta']],dtype='d'))
elif params['model'] == 'IdealGas':
setattr(self,rwork,numpy.array([params['T'],
old_div(params['W'],params['rho_0']),#normalize by phase density
params['R'],
params['headToPressure'],
old_div(params['rho_0'],params['rho_0']),#normalize by phase density
params['psi_0']],dtype='d'))
else:
assert False, 'TwophaseDarcy_fc density params= %s not found ' % params
#
def evaluate(self,t,c):
assert self.hasMaterialTypes
if c[('u',0)].shape == self.q_shape:
materialTypes = self.materialTypes_q
elif c[('u',0)].shape == self.ebqe_shape:
materialTypes = self.materialTypes_ebqe
elif c[('u',0)].shape == self.ip_shape:
materialTypes = self.materialTypes_ip
elif c[('u',0)].shape == self.ebq_shape:
materialTypes = self.materialTypes_ebq
else:
assert False, "no materialType found to match c[('u',0)].shape= %s " % c[('u',0)].shape
self.twophaseDarcy_fc_sd_het_matType(self.psk_types[self.psk_model],
self.density_types[self.density_w_model],
self.density_types[self.density_n_model],
materialTypes,
self.muw,
self.mun,
self.omega_types,
self.Ksw_types,
self.b,
self.rwork_psk,
self.rwork_density_w,
self.rwork_density_n,
self.g,
c['x'],
c[('u',0)],
c[('u',1)],
c[('m',0)],
c[('dm',0,0)],
c[('dm',0,1)],
c[('m',1)],
c[('dm',1,0)],
c[('dm',1,1)],
c['psi_n'],
c[('dpsi_n',0)],
c[('dpsi_n',1)],
c[('phi',0)],
c[('dphi',0,1)],
c[('phi',1)],
c[('dphi',1,1)],
c[('dphi',1,0)],
c[('a',0,0)],
c[('da',0,0,0)],
c[('da',0,0,1)],
c[('a',1,1)],
c[('da',1,1,0)],
c[('da',1,1,1)])
#split fractional flow formulation--pressure equation
class TwophaseDarcy_split_pressure(TwophaseDarcyFlow_base):
from proteus.cTwophaseDarcyCoefficients import twophaseDarcy_incompressible_split_sd_pressure_het_matType
def __init__(self,
g=9.8,
rhon=1.0,
rhow=0.0,
mun = 1.0,
muw = 1.0,
Ksw=1.0,
psk_model='VGM',
vg_alpha = 5.0,
vg_m = 0.75,
bc_pd = old_div(1.0,5.47),
bc_lambda = 0.5,
omega = 1.0,
Sw_max = 1.0,
Sw_min = 0.0,
swConstant=0.5,
capillaryDiffusionScaling=1.0,
nModel = 1,
diagonalHet = False,
sparseDiffusionTensors={},
sd = True):
self.swConstant=swConstant
self.nc=1
variableNames=['psi_w']
#these are only nonlinear for compressible flow
mass = {0:{0:'nonlinear'}}
advection = {0:{0:'nonlinear'}}
hamiltonian={}
diffusion = {0:{0: {0:'nonlinear'}}}
potential = {0:{0: 'u'}} # if phi is nonlinear
reaction = {0:{0:'linear'}}
#for handling sparse diffusion options
assert not diagonalHet
self.nd = len(g) #need to check
assert len(sparseDiffusionTensors) > 0
TC_base.__init__(self,
self.nc,
mass,
advection,
diffusion,
potential,
reaction,
hamiltonian,
variableNames,
sparseDiffusionTensors = sparseDiffusionTensors,
useSparseDiffusion = sd)
TwophaseDarcyFlow_base.__init__(self,
g,
rhon,
rhow,
mun,
muw,
Ksw,
psk_model,
vg_alpha,
vg_m,
bc_pd,
bc_lambda,
omega,
Sw_max,
Sw_min)
self.nModel=nModel
#capillary term combined with gravity in 'f'
#so can't turn off easily
self.capillaryDiffusionScaling = capillaryDiffusionScaling
def attachModels(self,modelList):
if self.nModel is None:
print('Warning Twophase_split_pressure nModel is None returning in attachModels')
return
self.q_s_w = modelList[self.nModel].q[('u',0)]
self.ebqe_s_w = modelList[self.nModel].ebqe[('u',0)]
if ('u',0) in modelList[self.nModel].ebq:
self.ebq_s_w = modelList[self.nModel].ebq[('u',0)]
assert ('u',0) in modelList[self.nModel].phi_ip
assert self.ip_s_w.shape == modelList[self.nModel].phi_ip[('u',0)].shape
self.ip_s_w = modelList[self.nModel].phi_ip[('u',0)]
self.ip_grad_psic = None#modelList[self.nModel].phi_ip[('grad(phi)',0)]
self.q_grad_psic = modelList[self.nModel].q[('grad(phi)',0)]
self.ebqe_grad_psic = modelList[self.nModel].ebqe[('grad(phi)',0)]
if ('grad(phi)',0) in modelList[self.nModel].ebq:
self.ebq_grad_psic = modelList[self.nModel].ebq[('grad(phi)',0)]
self.q_psic = modelList[self.nModel].q[('phi',0)]
self.ebqe_psic= modelList[self.nModel].ebqe[('phi',0)]
if ('phi',0) in modelList[self.nModel].ebq:
self.ebq_psic = modelList[self.nModel].ebq[('phi',0)]
assert ('phi',0) in modelList[self.nModel].phi_ip
assert self.ip_psic.shape == modelList[self.nModel].phi_ip[('phi',0)].shape
self.ip_psic = modelList[self.nModel].phi_ip[('phi',0)]
def initializeElementQuadrature(self,t,cq):
TwophaseDarcyFlow_base.initializeElementQuadrature(self,t,cq)
#set up dummy values in case we're not running the other model
self.q_s_w = numpy.zeros(cq[('u',0)].shape,'d')
self.q_s_w[:] = self.swConstant
for i in range(old_div(len(self.q_s_w.flat),2),len(self.q_s_w.flat)):
self.q_s_w.flat[i] = 1.0e-4
self.q_grad_psic = numpy.zeros(cq[('f',0)].shape,'d')
self.q_psic = numpy.zeros(cq[('u',0)].shape,'d')
cq['psi_n'] = numpy.zeros(cq[('u',0)].shape,'d')
cq[('dpsi_n',0)] = numpy.ones(cq[('u',0)].shape,'d')
def initializeElementBoundaryQuadrature(self,t,cebq,cebq_global):
TwophaseDarcyFlow_base.initializeElementBoundaryQuadrature(self,t,cebq,cebq_global)
#set up dummy values in case we're not running the other model
self.ebq_s_w = numpy.zeros(cebq[('u',0)].shape,'d')
self.ebq_s_w[:]=self.swConstant
for i in range(old_div(len(self.ebq_s_w.flat),2),len(self.ebq_s_w.flat)):
self.ebq_s_w.flat[i] = 1.0e-4
self.ebq_grad_psic = numpy.zeros(cebq[('f',0)].shape,'d')
self.ebq_psic = numpy.zeros(cebq[('u',0)].shape,'d')
if ('u',0) in cebq:
cebq['psi_n'] = numpy.zeros(cebq[('u',0)].shape,'d')
cebq[('dpsi_n',0)] = numpy.ones(cebq[('u',0)].shape,'d')
if ('u',0) in cebq_global:
cebq_global['psi_n'] = numpy.zeros(cebq_global[('u',0)].shape,'d')
cebq_global[('dpsi_n',0)] = numpy.ones(cebq_global[('u',0)].shape,'d')
def initializeGlobalExteriorElementBoundaryQuadrature(self,t,cebqe):
TwophaseDarcyFlow_base.initializeGlobalExteriorElementBoundaryQuadrature(self,t,cebqe)
#set up dummy values in case we're not running the other model
self.ebqe_s_w = numpy.zeros(cebqe[('u',0)].shape,'d')
self.ebqe_s_w[:]=self.swConstant
for i in range(old_div(len(self.ebqe_s_w.flat),2),len(self.ebqe_s_w.flat)):
self.ebqe_s_w.flat[i] = 1.0e-4
self.ebqe_grad_psic = numpy.zeros(cebqe[('f',0)].shape,'d')
self.ebqe_psic = numpy.zeros(cebqe[('u',0)].shape,'d')
cebqe['psi_n'] = numpy.zeros(cebqe[('u',0)].shape,'d')
cebqe[('dpsi_n',0)] = numpy.ones(cebqe[('u',0)].shape,'d')
def initializeGeneralizedInterpolationPointQuadrature(self,t,cip):
TwophaseDarcyFlow_base.initializeGeneralizedInterpolationPointQuadrature(self,t,cip)
#set up dummy values in case we're not running the other model
self.ip_s_w = numpy.zeros(cip[('u',0)].shape,'d')
self.ip_s_w[:]=self.swConstant
for i in range(old_div(len(self.ip_s_w.flat),2),len(self.ip_s_w.flat)):
self.ip_s_w.flat[i] = 1.0e-4
self.ip_grad_psic = numpy.zeros(cip[('f',0)].shape,'d')
self.ip_psic = numpy.zeros(cip[('u',0)].shape,'d')
cip['psi_n'] = numpy.zeros(cip[('u',0)].shape,'d')
cip[('dpsi_n',0)] = numpy.ones(cip[('u',0)].shape,'d')
def evaluate(self,t,c):
if c[('u',0)].shape == self.q_s_w.shape:
s_w = self.q_s_w
grad_psic = self.q_grad_psic
c['psi_n']= numpy.copy(self.q_psic)
c['psi_n'] += c[('u',0)]
elif c[('u',0)].shape == self.ebqe_s_w.shape:
s_w = self.ebqe_s_w
grad_psic = self.ebqe_grad_psic
c['psi_n']= numpy.copy(self.ebqe_psic)
c['psi_n'] += c[('u',0)]
elif c[('u',0)].shape == self.ip_s_w.shape:
c['psi_n']= numpy.copy(self.ip_psic)
c['psi_n'] += c[('u',0)]
s_w = self.ip_s_w
grad_psic = self.ip_grad_psic
else:
assert c[('u',0)].shape == self.ebq_s_w.shape
s_w = self.ebq_s_w
grad_psic = self.ebq_grad_psic
c['psi_n']= numpy.copy(self.ebq_psic)
c['psi_n'] += c[('u',0)]
if c[('u',0)].shape == self.q_shape:
materialTypes = self.materialTypes_q
elif c[('u',0)].shape == self.ebqe_shape:
materialTypes = self.materialTypes_ebqe
elif c[('u',0)].shape == self.ip_shape:
materialTypes = self.materialTypes_ip
elif c[('u',0)].shape == self.ebq_shape:
materialTypes = self.materialTypes_ebq
else:
assert False, "no materialType found to match c[('u',0)].shape= %s " % c[('u',0)].shape
self.twophaseDarcy_incompressible_split_sd_pressure_het_matType(self.psk_types[self.psk_model],
materialTypes,
self.muw,
self.mun,
self.omega_types,
self.Ksw_types,
self.b,
self.capillaryDiffusionScaling,
self.rwork_psk,
self.rwork_density_w,
self.rwork_density_n,
self.g,
s_w,
grad_psic,
c[('f',0)],
c[('a',0,0)])
#split fractional flow formulation--saturation equation
class TwophaseDarcy_split_saturation(TwophaseDarcyFlow_base):
from proteus.cTwophaseDarcyCoefficients import twophaseDarcy_incompressible_split_sd_saturation_het_matType
def __init__(self,
g=[9.8],
rhon=1.0,
rhow=1.0,
mun = 1.0,
muw = 1.0,
Ksw=1.0,
psk_model='VGM',
vg_alpha = 5.0,
vg_m = 0.75,
bc_pd = old_div(1.0,5.47),
bc_lambda = 0.5,
omega = 1.0,
Sw_max = 1.0,
Sw_min = 0.0,
qScalarConstant=1.0,
capillaryDiffusionScaling=1.0,
nModel = 0,
diagonalHet = False,
sparseDiffusionTensors={},
sd = True):
self.qScalarConstant=qScalarConstant
self.nc=1
variableNames=['s_w']
mass = {0:{0:'nonlinear'}}
advection = {0:{0:'nonlinear'}}
hamiltonian={}
diffusion = {0:{0:{0:'nonlinear'}}}
potential = {0:{0: 'nonlinear'}}
reaction = {0:{0:'linear'}}
#for handling sparse diffusion options
assert not diagonalHet
self.nd = len(g) #need to check
assert len(sparseDiffusionTensors) > 0
TC_base.__init__(self,
self.nc,
mass,
advection,
diffusion,
potential,
reaction,
hamiltonian,
variableNames,
sparseDiffusionTensors = sparseDiffusionTensors,
useSparseDiffusion= sd)
TwophaseDarcyFlow_base.__init__(self,
g,
rhon,
rhow,
mun,
muw,
Ksw,
psk_model,
vg_alpha,
vg_m,
bc_pd,
bc_lambda,
omega,
Sw_max,
Sw_min)
self.nModel=nModel
#could just turn off diffusion to test hyperbolic problem but
#capillary diffusion combined with gravity in pressure 'f' so use
#scaling factor in evals instead
self.capillaryDiffusionScaling = capillaryDiffusionScaling
def attachModels(self,modelList):
if self.nModel is None:
print('Warning Twophase_split_saturation nModel is None returning in attachModels')
return
self.flowModel = modelList[self.nModel]
self.q_q_t = modelList[self.nModel].q[('velocity',0)]
self.ebqe_q_t = modelList[self.nModel].ebqe[('velocity',0)]
if ('velocity',0) in modelList[self.nModel].ebq:
self.ebq_q_t = modelList[self.nModel].ebq[('velocity',0)]
#do we really need other model values for q_t in potential calculation?
assert self.ip_psiw.shape == modelList[self.nModel].phi_ip[('u',0)].shape
self.ip_psiw = modelList[self.nModel].phi_ip[('u',0)]
self.q_psiw = modelList[self.nModel].q[('u',0)]
self.ebqe_psiw = modelList[self.nModel].ebqe[('u',0)]
if ('u',0) in modelList[self.nModel].ebq:
self.ebq_psiw = modelList[self.nModel].ebq[('u',0)]
def initializeElementQuadrature(self,t,cq):
TwophaseDarcyFlow_base.initializeElementQuadrature(self,t,cq)
#set up dummy values in case we're not running the other model
self.q_q_t = numpy.zeros(cq[('f',0)].shape,'d')
self.q_q_t[:] = self.qScalarConstant
self.q_psiw = numpy.ones(cq[('u',0)].shape,'d')
def initializeElementBoundaryQuadrature(self,t,cebq,cebq_global):
TwophaseDarcyFlow_base.initializeElementBoundaryQuadrature(self,t,cebq,cebq_global)
#set up dummy values in case we're not running the other model
self.ebq_q_t = numpy.zeros(cebq[('f',0)].shape,'d')
self.ebq_q_t[:] = self.qScalarConstant
self.ebq_psiw = numpy.ones(cebq[('u',0)].shape,'d')
def initializeGlobalExteriorElementBoundaryQuadrature(self,t,cebqe):
TwophaseDarcyFlow_base.initializeGlobalExteriorElementBoundaryQuadrature(self,t,cebqe)
#set up dummy values in case we're not running the other model
self.ebqe_q_t = numpy.zeros(cebqe[('f',0)].shape,'d')
self.ebqe_q_t[:] = self.qScalarConstant
self.ebqe_psiw = numpy.ones(cebqe[('u',0)].shape,'d')
def initializeGeneralizedInterpolationPointQuadrature(self,t,cip):
TwophaseDarcyFlow_base.initializeGeneralizedInterpolationPointQuadrature(self,t,cip)
#set up dummy values in case we're not running the other model
self.ip_q_t = numpy.zeros(cip[('f',0)].shape,'d')
self.ip_q_t[:] = self.qScalarConstant
self.ip_psiw = numpy.ones(cip[('u',0)].shape,'d')
def evaluate(self,t,c):
if c[('f',0)].shape == self.q_q_t.shape:
q_t = self.q_q_t
psiw = self.q_psiw
elif c[('f',0)].shape == self.ebqe_q_t.shape:
q_t = self.ebqe_q_t
psiw = self.ebqe_psiw
elif c[('f',0)].shape == self.ip_q_t.shape:
q_t = self.ip_q_t
psiw = self.ip_psiw
else:
assert c[('f',0)].shape == self.ebq_q_t.shape
q_t = self.ebq_q_t
psiw = self.ebq_psiw
if c[('u',0)].shape == self.q_shape:
materialTypes = self.materialTypes_q
elif c[('u',0)].shape == self.ebqe_shape:
materialTypes = self.materialTypes_ebqe
elif c[('u',0)].shape == self.ip_shape:
materialTypes = self.materialTypes_ip
elif c[('u',0)].shape == self.ebq_shape:
materialTypes = self.materialTypes_ebq
else:
assert False, "no materialType found to match c[('u',0)].shape= %s " % c[('u',0)].shape
self.twophaseDarcy_incompressible_split_sd_saturation_het_matType(self.psk_types[self.psk_model],
materialTypes,
self.muw,
self.mun,
self.omega_types,
self.Ksw_types,
self.b,self.capillaryDiffusionScaling,
self.rwork_psk,
self.rwork_density_w,
self.rwork_density_n,
self.g,
q_t,
c[('u',0)],
c[('m',0)],
c[('dm',0,0)],
c[('phi',0)],
c[('dphi',0,0)],
c[('f',0)],
c[('df',0,0)],
c[('a',0,0)],
c[('da',0,0,0)])
|
|
import functools
import itertools
from django.db import models
from django.views.generic import base as views_base
from django.template import loader
from django import template as template_module
from django import http
from django.conf import settings
_namespace_registry = {}
class TemplatePrefixMeta(type):
def __new__(meta, classname, bases, attrs):
if attrs.get("app_name") and not attrs.get("template_prefix"):
attrs["template_prefix"] = attrs["app_name"]
return type.__new__(meta, classname, bases, attrs)
class HasUrls(object):
@property
def urls(self):
patterns = self._get_urls()
return patterns, self.app_name, self.namespace
def _get_urls(self):
raise NotImplementedError
class SubApp(HasUrls):
@property
def urls(self):
return self._get_urls()
def __init__(self, app):
self.app = app
class App(HasUrls):
__metaclass__ = TemplatePrefixMeta
from django.core import urlresolvers
app_name = None
extra_context = {}
app_dict = {}
template_prefix = None
def __init__(self, namespace=None, app_name=None,
app_namespace=None, template_prefix=None,
parent=None):
if self.app_name is None and app_name is None:
raise ValueError("Must define app_name in either "
"class scope or in instantiation")
if app_name is not None:
self.app_name = app_name
if template_prefix:
self.template_prefix = template_prefix
self.namespace = namespace or self.app_name
self.app_namespace = app_namespace or self.namespace
self.__class__.app_dict.setdefault(self.app_name, []).append(self)
_namespace_registry[self.namespace] = self
self.parent = parent
@staticmethod
def get_by_namespace(self, namespace):
return _namespace_registry.get(namespace)
def get_template_names(self, template_select,
denominator=None, extensions=["html"]):
some_prefixes = [getattr(p, "template_prefix", None)
for p
in self.__class__.__mro__]
prefixes = [self.template_prefix] + list(filter(bool, some_prefixes))
candidates = []
for (prefix, template, ext
) in itertools.product(prefixes, template_select, extensions):
if denominator is not None:
candidates.append(
"%(app_name)s/custom/%(denominator)s/%(template)s.%(ext)s" % {
"app_name": prefix,
"denominator": denominator,
"template": template,
"ext": ext}
)
candidates.append(
"%(app_name)s/%(template)s.%(ext)s" % {
"app_name": prefix,
"template": template,
"ext": ext},
)
for (template, ext
) in itertools.product(template_select, extensions):
candidates.append(
"%(template)s.%(ext)s" % {
"template": template,
"ext": ext,
}
)
return candidates
def get_response(self, request, context={}, template=None,
denominator=None, extend_template=None,
template_select=None):
if extend_template is None:
extend_templates = [getattr(self, "base_template", "base")]
else:
extend_templates = [extend_template]
if (request.is_ajax()
or (settings.DEBUG and "_lyra_ajax" in request.GET)):
extend_templates.insert(0, "ajax_base")
if template and template_select is None:
template_select = [template]
elif template_select is None:
raise ValueError("provide one of template or "
"template_select, not both or neither")
tpl = loader.select_template(
self.get_template_names(template_select, denominator))
base_context = dict(
self.extra_context,
**{
"base": loader.select_template(
self.get_template_names(extend_templates, denominator))}
)
context = dict(base_context, **context)
ctx = template_module.RequestContext(request, context)
ctx.current_app = self.app_namespace
return http.HttpResponse(tpl.render(ctx))
def reverse(self, view_name, kwargs={}):
return self.urlresolvers.reverse(
"%s:%s" % (self.namespace, view_name),
args=(),
kwargs=kwargs,
current_app=self.namespace)
# --- Views ---
def requires_app(f):
@functools.wraps(f)
def requirer(self, *args, **kwargs):
if self.app is None:
raise AttributeError("app aware views need to know"
"the app")
return f(self, *args, **kwargs)
return requirer
class AppAwareMixin(object):
app = None
class AppAwareTemplate(views_base.TemplateResponseMixin):
app = None
base_template_names = ["base"]
@requires_app
def render_to_response(self, *args, **kwargs):
kwargs["current_app"] = self.app.namespace
return super(AppAwareTemplate, self).render_to_response(*args, **kwargs)
def get_context_data(self, **kwargs):
data = super(AppAwareTemplate, self).get_context_data(**kwargs)
base_template_names = list(self.base_template_names)
if (self.request.is_ajax()
or (settings.DEBUG and "_lyra_ajax" in self.request.GET)):
base_template_names.insert(0, "ajax_base")
data.update({
"base": loader.select_template(self.app.get_template_names(
base_template_names,
denominator=self.get_denominator())),
})
return data
@requires_app
def get_template_names(self):
return self.app.get_template_names(
super(AppAwareTemplate, self).get_template_names(),
denominator=self.get_denominator())
def get_denominator(self):
return None
class AppAwareQuerysetMixin(object):
@requires_app
def get_queryset(self):
return self.app.queryset
def get_slug_field():
return "slug"
def get_object(self, queryset=None):
"""
Returns the object the view is displaying.
By default this requires `self.queryset` and a `pk` or `slug` argument
in the URLconf, but subclasses can override this to return any object.
"""
# Use a custom queryset if provided; this is required for subclasses
# like DateDetailView
if queryset is None:
queryset = self.get_queryset()
# Next, try looking up by primary key.
pk = self.kwargs.get('pk', None)
slug = self.kwargs.get('slug', None)
if pk is not None:
queryset = queryset.filter(pk=pk)
# Next, try looking up by slug.
elif slug is not None:
slug_field = self.get_slug_field()
queryset = queryset.filter(**{slug_field: slug})
# If none of those are defined, it's an error.
else:
raise AttributeError(u"Generic detail view %s must be called with "
u"either an object pk or a slug."
% self.__class__.__name__)
try:
obj = queryset.get()
except ObjectDoesNotExist:
raise Http404(_(u"No %(verbose_name)s found matching the query") %
{'verbose_name': queryset.model._meta.verbose_name})
return obj
class AppAwareSecurityMixin(object):
permissions = ["view"]
@requires_app
def dispatch(self, request, *args, **kwargs):
is_forbidden = self.app.check_forbidden(
request,
self.permissions)
if is_forbidden:
return is_forbidden
return super(AppAwareSecurityMixin, self).dispatch(request, *args, **kwargs)
class AppAwareObjectSecurityMixin(AppAwareQuerysetMixin):
permissions = ["view"]
@requires_app
def dispatch(self, request, *args, **kwargs):
self.kwargs = kwargs
obj = self.get_object()
is_forbidden = self.app.check_forbidden(
request,
self.permissions,
obj)
if is_forbidden:
return is_forbidden
return super(AppAwareObjectSecurityMixin, self).dispatch(
request, *args, **kwargs)
# --- Models ---
class QuerySetManager(models.Manager):
use_for_related_fields = True
def __init__(self, qs_class=models.query.QuerySet):
self.queryset_class = qs_class
super(QuerySetManager, self).__init__()
def get_query_set(self):
return self.queryset_class(self.model)
def __getattr__(self, attr, *args):
try:
return getattr(self.__class__, attr, *args)
except AttributeError:
return getattr(self.get_query_set(), attr, *args)
class QuerySet(models.query.QuerySet):
"""Base QuerySet class for adding custom methods that are made
available on both the manager and subsequent cloned QuerySets"""
@classmethod
def as_manager(cls, ManagerClass=QuerySetManager):
return ManagerClass(cls)
choices_placeholder = ("1", "1")
choices = lambda: [choices_placeholder]
def list_get_name(L, key):
for i in L:
if i.name == key:
return i
raise IndexError
def clear_choices(model, field_name="namespace"):
choices = list_get_name(model._meta.fields, field_name).choices
try:
del choices[choices.index(choices_placeholder)]
except ValueError:
pass
ALL_NAMESPACES = {}
def make_registerer(model, field_name="namespace"):
def register_app(section_tuple, app_name):
L = list_get_name(model._meta.fields, field_name).choices
(section_namespace, section_desc) = section_tuple
if section_namespace not in [i for i,d in L]:
L.append(section_tuple)
ALL_NAMESPACES.setdefault(app_name, {})[section_namespace] = section_desc
clear_choices(model, field_name)
return register_app
|
|
#!/usr/bin/python
# coding=utf-8
##########################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from test import run_only
from mock import Mock
from mock import patch
from diamond.collector import Collector
from diskspace import DiskSpaceCollector
##########################################################################
def run_only_if_major_is_available(func):
try:
import os
os.major
major = True
except AttributeError:
major = None
pred = lambda: major is not None
return run_only(func, pred)
class TestDiskSpaceCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('DiskSpaceCollector', {
'interval': 10,
'byte_unit': ['gigabyte'],
'exclude_filters': [
'^/export/home',
]
})
self.collector = DiskSpaceCollector(config, None)
def test_import(self):
self.assertTrue(DiskSpaceCollector)
@run_only_if_major_is_available
@patch('os.access', Mock(return_value=True))
def test_get_file_systems(self):
result = None
os_stat_mock = patch('os.stat')
os_major_mock = patch('os.major')
os_minor_mock = patch('os.minor')
os_realpath_mock = patch('os.path.realpath')
open_mock = patch('__builtin__.open',
Mock(return_value=self.getFixture('proc_mounts')))
stat_mock = os_stat_mock.start()
stat_mock.return_value.st_dev = 42
major_mock = os_major_mock.start()
major_mock.return_value = 9
minor_mock = os_minor_mock.start()
minor_mock.return_value = 0
realpath_mock = os_realpath_mock.start()
realpath_mock.return_value = '/dev/sda1'
omock = open_mock.start()
result = self.collector.get_file_systems()
os_stat_mock.stop()
os_major_mock.stop()
os_minor_mock.stop()
os_realpath_mock.stop()
open_mock.stop()
stat_mock.assert_called_once_with('/')
major_mock.assert_called_once_with(42)
minor_mock.assert_called_once_with(42)
realpath_mock.assert_called_once_with(
'/dev/disk/by-uuid/81969733-a724-4651-9cf5-64970f86daba')
self.assertEqual(result, {
(9, 0): {
'device':
'/dev/sda1',
'fs_type': 'ext3',
'mount_point': '/'}
})
omock.assert_called_once_with('/proc/mounts')
return result
@run_only_if_major_is_available
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
statvfs_mock = Mock()
statvfs_mock.f_bsize = 1048576
statvfs_mock.f_frsize = 4096
statvfs_mock.f_blocks = 360540255
statvfs_mock.f_bfree = 285953527
statvfs_mock.f_bavail = 267639130
statvfs_mock.f_files = 91578368
statvfs_mock.f_ffree = 91229495
statvfs_mock.f_favail = 91229495
statvfs_mock.f_flag = 4096
statvfs_mock.f_namemax = 255
os_stat_mock = patch('os.stat')
os_major_mock = patch('os.major', Mock(return_value=9))
os_minor_mock = patch('os.minor', Mock(return_value=0))
os_path_isdir_mock = patch('os.path.isdir', Mock(return_value=False))
open_mock = patch('__builtin__.open',
Mock(return_value=self.getFixture('proc_mounts')))
os_statvfs_mock = patch('os.statvfs', Mock(return_value=statvfs_mock))
os_stat_mock.start()
os_major_mock.start()
os_minor_mock.start()
os_path_isdir_mock.start()
open_mock.start()
os_statvfs_mock.start()
self.collector.collect()
os_stat_mock.stop()
os_major_mock.stop()
os_minor_mock.stop()
os_path_isdir_mock.stop()
open_mock.stop()
os_statvfs_mock.stop()
metrics = {
'root.gigabyte_used': (284.525, 2),
'root.gigabyte_free': (1090.826, 2),
'root.gigabyte_avail': (1020.962, 2),
'root.inodes_used': 348873,
'root.inodes_free': 91229495,
'root.inodes_avail': 91229495,
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
@run_only_if_major_is_available
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_should_work_with_tmpfs(self, publish_mock):
config = get_collector_config('DiskSpaceCollector', {
'interval': 10,
'byte_unit': ['gigabyte'],
'exclude_filters': [],
'filesystems': 'tmpfs'
})
self.collector = DiskSpaceCollector(config, None)
statvfs_mock = Mock()
statvfs_mock.f_bsize = 4096
statvfs_mock.f_frsize = 4096
statvfs_mock.f_blocks = 360540255
statvfs_mock.f_bfree = 285953527
statvfs_mock.f_bavail = 267639130
statvfs_mock.f_files = 91578368
statvfs_mock.f_ffree = 91229495
statvfs_mock.f_favail = 91229495
statvfs_mock.f_flag = 4096
statvfs_mock.f_namemax = 255
os_stat_mock = patch('os.stat')
os_major_mock = patch('os.major', Mock(return_value=4))
os_minor_mock = patch('os.minor', Mock(return_value=0))
os_path_isdir_mock = patch('os.path.isdir', Mock(return_value=False))
open_mock = patch('__builtin__.open',
Mock(return_value=self.getFixture('proc_mounts')))
os_statvfs_mock = patch('os.statvfs', Mock(return_value=statvfs_mock))
os_stat_mock.start()
os_major_mock.start()
os_minor_mock.start()
os_path_isdir_mock.start()
open_mock.start()
os_statvfs_mock.start()
self.collector.collect()
os_stat_mock.stop()
os_major_mock.stop()
os_minor_mock.stop()
os_path_isdir_mock.stop()
open_mock.stop()
os_statvfs_mock.stop()
metrics = {
'tmp.gigabyte_used': (284.525, 2),
'tmp.gigabyte_free': (1090.826, 2),
'tmp.gigabyte_avail': (1020.962, 2),
'tmp.inodes_used': 348873,
'tmp.inodes_free': 91229495,
'tmp.inodes_avail': 91229495,
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
##########################################################################
if __name__ == "__main__":
unittest.main()
|
|
import warnings
from collections import deque
from functools import total_ordering
from django.db.migrations.state import ProjectState
from django.utils.datastructures import OrderedSet
from .exceptions import CircularDependencyError, NodeNotFoundError
RECURSION_DEPTH_WARNING = (
"Maximum recursion depth exceeded while generating migration graph, "
"falling back to iterative approach. If you're experiencing performance issues, "
"consider squashing migrations as described at "
"https://docs.djangoproject.com/en/dev/topics/migrations/#squashing-migrations."
)
@total_ordering
class Node:
"""
A single node in the migration graph. Contains direct links to adjacent
nodes in either direction.
"""
def __init__(self, key):
self.key = key
self.children = set()
self.parents = set()
def __eq__(self, other):
return self.key == other
def __lt__(self, other):
return self.key < other
def __hash__(self):
return hash(self.key)
def __getitem__(self, item):
return self.key[item]
def __str__(self):
return str(self.key)
def __repr__(self):
return '<Node: (%r, %r)>' % self.key
def add_child(self, child):
self.children.add(child)
def add_parent(self, parent):
self.parents.add(parent)
# Use manual caching, @cached_property effectively doubles the
# recursion depth for each recursion.
def ancestors(self):
# Use self.key instead of self to speed up the frequent hashing
# when constructing an OrderedSet.
if '_ancestors' not in self.__dict__:
ancestors = deque([self.key])
for parent in sorted(self.parents):
ancestors.extendleft(reversed(parent.ancestors()))
self.__dict__['_ancestors'] = list(OrderedSet(ancestors))
return self.__dict__['_ancestors']
# Use manual caching, @cached_property effectively doubles the
# recursion depth for each recursion.
def descendants(self):
# Use self.key instead of self to speed up the frequent hashing
# when constructing an OrderedSet.
if '_descendants' not in self.__dict__:
descendants = deque([self.key])
for child in sorted(self.children):
descendants.extendleft(reversed(child.descendants()))
self.__dict__['_descendants'] = list(OrderedSet(descendants))
return self.__dict__['_descendants']
class DummyNode(Node):
def __init__(self, key, origin, error_message):
super().__init__(key)
self.origin = origin
self.error_message = error_message
def __repr__(self):
return '<DummyNode: (%r, %r)>' % self.key
def promote(self):
"""
Transition dummy to a normal node and clean off excess attribs.
Creating a Node object from scratch would be too much of a
hassle as many dependendies would need to be remapped.
"""
del self.origin
del self.error_message
self.__class__ = Node
def raise_error(self):
raise NodeNotFoundError(self.error_message, self.key, origin=self.origin)
class MigrationGraph:
"""
Represents the digraph of all migrations in a project.
Each migration is a node, and each dependency is an edge. There are
no implicit dependencies between numbered migrations - the numbering is
merely a convention to aid file listing. Every new numbered migration
has a declared dependency to the previous number, meaning that VCS
branch merges can be detected and resolved.
Migrations files can be marked as replacing another set of migrations -
this is to support the "squash" feature. The graph handler isn't responsible
for these; instead, the code to load them in here should examine the
migration files and if the replaced migrations are all either unapplied
or not present, it should ignore the replaced ones, load in just the
replacing migration, and repoint any dependencies that pointed to the
replaced migrations to point to the replacing one.
A node should be a tuple: (app_path, migration_name). The tree special-cases
things within an app - namely, root nodes and leaf nodes ignore dependencies
to other apps.
"""
def __init__(self):
self.node_map = {}
self.nodes = {}
self.cached = False
def add_node(self, key, migration):
# If the key already exists, then it must be a dummy node.
dummy_node = self.node_map.get(key)
if dummy_node:
# Promote DummyNode to Node.
dummy_node.promote()
else:
node = Node(key)
self.node_map[key] = node
self.nodes[key] = migration
self.clear_cache()
def add_dummy_node(self, key, origin, error_message):
node = DummyNode(key, origin, error_message)
self.node_map[key] = node
self.nodes[key] = None
def add_dependency(self, migration, child, parent, skip_validation=False):
"""
This may create dummy nodes if they don't yet exist.
If `skip_validation` is set, validate_consistency should be called afterwards.
"""
if child not in self.nodes:
error_message = (
"Migration %s dependencies reference nonexistent"
" child node %r" % (migration, child)
)
self.add_dummy_node(child, migration, error_message)
if parent not in self.nodes:
error_message = (
"Migration %s dependencies reference nonexistent"
" parent node %r" % (migration, parent)
)
self.add_dummy_node(parent, migration, error_message)
self.node_map[child].add_parent(self.node_map[parent])
self.node_map[parent].add_child(self.node_map[child])
if not skip_validation:
self.validate_consistency()
self.clear_cache()
def remove_replaced_nodes(self, replacement, replaced):
"""
Removes each of the `replaced` nodes (when they exist). Any
dependencies that were referencing them are changed to reference the
`replacement` node instead.
"""
# Cast list of replaced keys to set to speed up lookup later.
replaced = set(replaced)
try:
replacement_node = self.node_map[replacement]
except KeyError as err:
raise NodeNotFoundError(
"Unable to find replacement node %r. It was either never added"
" to the migration graph, or has been removed." % (replacement, ),
replacement
) from err
for replaced_key in replaced:
self.nodes.pop(replaced_key, None)
replaced_node = self.node_map.pop(replaced_key, None)
if replaced_node:
for child in replaced_node.children:
child.parents.remove(replaced_node)
# We don't want to create dependencies between the replaced
# node and the replacement node as this would lead to
# self-referencing on the replacement node at a later iteration.
if child.key not in replaced:
replacement_node.add_child(child)
child.add_parent(replacement_node)
for parent in replaced_node.parents:
parent.children.remove(replaced_node)
# Again, to avoid self-referencing.
if parent.key not in replaced:
replacement_node.add_parent(parent)
parent.add_child(replacement_node)
self.clear_cache()
def remove_replacement_node(self, replacement, replaced):
"""
The inverse operation to `remove_replaced_nodes`. Almost. Removes the
replacement node `replacement` and remaps its child nodes to
`replaced` - the list of nodes it would have replaced. Its parent
nodes are not remapped as they are expected to be correct already.
"""
self.nodes.pop(replacement, None)
try:
replacement_node = self.node_map.pop(replacement)
except KeyError as err:
raise NodeNotFoundError(
"Unable to remove replacement node %r. It was either never added"
" to the migration graph, or has been removed already." % (replacement, ),
replacement
) from err
replaced_nodes = set()
replaced_nodes_parents = set()
for key in replaced:
replaced_node = self.node_map.get(key)
if replaced_node:
replaced_nodes.add(replaced_node)
replaced_nodes_parents |= replaced_node.parents
# We're only interested in the latest replaced node, so filter out
# replaced nodes that are parents of other replaced nodes.
replaced_nodes -= replaced_nodes_parents
for child in replacement_node.children:
child.parents.remove(replacement_node)
for replaced_node in replaced_nodes:
replaced_node.add_child(child)
child.add_parent(replaced_node)
for parent in replacement_node.parents:
parent.children.remove(replacement_node)
# NOTE: There is no need to remap parent dependencies as we can
# assume the replaced nodes already have the correct ancestry.
self.clear_cache()
def validate_consistency(self):
"""
Ensure there are no dummy nodes remaining in the graph.
"""
[n.raise_error() for n in self.node_map.values() if isinstance(n, DummyNode)]
def clear_cache(self):
if self.cached:
for node in self.nodes:
self.node_map[node].__dict__.pop('_ancestors', None)
self.node_map[node].__dict__.pop('_descendants', None)
self.cached = False
def forwards_plan(self, target):
"""
Given a node, returns a list of which previous nodes (dependencies)
must be applied, ending with the node itself.
This is the list you would follow if applying the migrations to
a database.
"""
if target not in self.nodes:
raise NodeNotFoundError("Node %r not a valid node" % (target, ), target)
# Use parent.key instead of parent to speed up the frequent hashing in ensure_not_cyclic
self.ensure_not_cyclic(target, lambda x: (parent.key for parent in self.node_map[x].parents))
self.cached = True
node = self.node_map[target]
try:
return node.ancestors()
except RuntimeError:
# fallback to iterative dfs
warnings.warn(RECURSION_DEPTH_WARNING, RuntimeWarning)
return self.iterative_dfs(node)
def backwards_plan(self, target):
"""
Given a node, returns a list of which dependent nodes (dependencies)
must be unapplied, ending with the node itself.
This is the list you would follow if removing the migrations from
a database.
"""
if target not in self.nodes:
raise NodeNotFoundError("Node %r not a valid node" % (target, ), target)
# Use child.key instead of child to speed up the frequent hashing in ensure_not_cyclic
self.ensure_not_cyclic(target, lambda x: (child.key for child in self.node_map[x].children))
self.cached = True
node = self.node_map[target]
try:
return node.descendants()
except RuntimeError:
# fallback to iterative dfs
warnings.warn(RECURSION_DEPTH_WARNING, RuntimeWarning)
return self.iterative_dfs(node, forwards=False)
def iterative_dfs(self, start, forwards=True):
"""
Iterative depth first search, for finding dependencies.
"""
visited = deque()
visited.append(start)
if forwards:
stack = deque(sorted(start.parents))
else:
stack = deque(sorted(start.children))
while stack:
node = stack.popleft()
visited.appendleft(node)
if forwards:
children = sorted(node.parents, reverse=True)
else:
children = sorted(node.children, reverse=True)
# reverse sorting is needed because prepending using deque.extendleft
# also effectively reverses values
stack.extendleft(children)
return list(OrderedSet(visited))
def root_nodes(self, app=None):
"""
Returns all root nodes - that is, nodes with no dependencies inside
their app. These are the starting point for an app.
"""
roots = set()
for node in self.nodes:
if not any(key[0] == node[0] for key in self.node_map[node].parents) and (not app or app == node[0]):
roots.add(node)
return sorted(roots)
def leaf_nodes(self, app=None):
"""
Returns all leaf nodes - that is, nodes with no dependents in their app.
These are the "most current" version of an app's schema.
Having more than one per app is technically an error, but one that
gets handled further up, in the interactive command - it's usually the
result of a VCS merge and needs some user input.
"""
leaves = set()
for node in self.nodes:
if not any(key[0] == node[0] for key in self.node_map[node].children) and (not app or app == node[0]):
leaves.add(node)
return sorted(leaves)
def ensure_not_cyclic(self, start, get_children):
# Algo from GvR:
# http://neopythonic.blogspot.co.uk/2009/01/detecting-cycles-in-directed-graph.html
todo = set(self.nodes)
while todo:
node = todo.pop()
stack = [node]
while stack:
top = stack[-1]
for node in get_children(top):
if node in stack:
cycle = stack[stack.index(node):]
raise CircularDependencyError(", ".join("%s.%s" % n for n in cycle))
if node in todo:
stack.append(node)
todo.remove(node)
break
else:
node = stack.pop()
def __str__(self):
return 'Graph: %s nodes, %s edges' % self._nodes_and_edges()
def __repr__(self):
nodes, edges = self._nodes_and_edges()
return '<%s: nodes=%s, edges=%s>' % (self.__class__.__name__, nodes, edges)
def _nodes_and_edges(self):
return len(self.nodes), sum(len(node.parents) for node in self.node_map.values())
def make_state(self, nodes=None, at_end=True, real_apps=None):
"""
Given a migration node or nodes, returns a complete ProjectState for it.
If at_end is False, returns the state before the migration has run.
If nodes is not provided, returns the overall most current project state.
"""
if nodes is None:
nodes = list(self.leaf_nodes())
if len(nodes) == 0:
return ProjectState()
if not isinstance(nodes[0], tuple):
nodes = [nodes]
plan = []
for node in nodes:
for migration in self.forwards_plan(node):
if migration not in plan:
if not at_end and migration in nodes:
continue
plan.append(migration)
project_state = ProjectState(real_apps=real_apps)
for node in plan:
project_state = self.nodes[node].mutate_state(project_state, preserve=False)
return project_state
def __contains__(self, node):
return node in self.nodes
|
|
#!/usr/bin/env python
from __future__ import print_function
from setuptools import setup
from setuptools import Distribution
from setuptools.command.sdist import sdist
from setuptools.extension import Extension
import platform
import re
import sys
import os
SKIP_CYTHON_FILE = '__dont_use_cython__.txt'
if os.path.exists(SKIP_CYTHON_FILE):
print("In distributed package, building from C files...", file=sys.stderr)
SOURCE_EXT = 'c'
else:
try:
from Cython.Build import cythonize
print("Building from Cython files...", file=sys.stderr)
SOURCE_EXT = 'pyx'
except ImportError:
print("Cython not found, building from C files...",
file=sys.stderr)
SOURCE_EXT = 'c'
get_output = None
try:
import commands
get_output = commands.getoutput
except ImportError:
import subprocess
def _get_output(*args, **kwargs):
res = subprocess.check_output(*args, shell=True, **kwargs)
decoded = res.decode('utf-8')
return decoded.strip()
get_output = _get_output
# get the compile and link args
link_args = os.environ.get('GSSAPI_LINKER_ARGS', None)
compile_args = os.environ.get('GSSAPI_COMPILER_ARGS', None)
osx_has_gss_framework = False
if sys.platform == 'darwin':
mac_ver = [int(v) for v in platform.mac_ver()[0].split('.')]
osx_has_gss_framework = (mac_ver >= [10, 7, 0])
if link_args is None:
if osx_has_gss_framework:
link_args = '-framework GSS'
else:
link_args = get_output('krb5-config --libs gssapi')
if compile_args is None:
if osx_has_gss_framework:
compile_args = '-framework GSS -DOSX_HAS_GSS_FRAMEWORK'
else:
compile_args = get_output('krb5-config --cflags gssapi')
link_args = link_args.split()
compile_args = compile_args.split()
# add in the extra workarounds for different include structures
prefix = get_output('krb5-config gssapi --prefix')
gssapi_ext_h = os.path.join(prefix, 'include/gssapi/gssapi_ext.h')
if os.path.exists(gssapi_ext_h):
compile_args.append("-DHAS_GSSAPI_EXT_H")
# ensure that any specific directories are listed before any generic system
# directories inserted by setuptools
library_dirs = [arg[2:] for arg in link_args if arg.startswith('-L')]
link_args = [arg for arg in link_args if not arg.startswith('-L')]
ENABLE_SUPPORT_DETECTION = \
(os.environ.get('GSSAPI_SUPPORT_DETECT', 'true').lower() == 'true')
if ENABLE_SUPPORT_DETECTION:
import ctypes.util
main_lib = os.environ.get('GSSAPI_MAIN_LIB', None)
if main_lib is None and osx_has_gss_framework:
main_lib = ctypes.util.find_library('GSS')
elif main_lib is None:
for opt in link_args:
if opt.startswith('-lgssapi'):
main_lib = 'lib%s.so' % opt[2:]
if main_lib is None:
raise Exception("Could not find main GSSAPI shared library. Please "
"try setting GSSAPI_MAIN_LIB yourself or setting "
"ENABLE_SUPPORT_DETECTION to 'false'")
GSSAPI_LIB = ctypes.CDLL(main_lib)
# add in the flag that causes us not to compile from Cython when
# installing from an sdist
class sdist_gssapi(sdist):
def run(self):
if not self.dry_run:
with open(SKIP_CYTHON_FILE, 'w') as flag_file:
flag_file.write('COMPILE_FROM_C_ONLY')
sdist.run(self)
os.remove(SKIP_CYTHON_FILE)
DONT_CYTHONIZE_FOR = ('clean',)
class GSSAPIDistribution(Distribution, object):
def run_command(self, command):
self._last_run_command = command
Distribution.run_command(self, command)
@property
def ext_modules(self):
if SOURCE_EXT != 'pyx':
return getattr(self, '_ext_modules', None)
if getattr(self, '_ext_modules', None) is None:
return None
if getattr(self, '_last_run_command', None) in DONT_CYTHONIZE_FOR:
return self._ext_modules
if getattr(self, '_cythonized_ext_modules', None) is None:
self._cythonized_ext_modules = cythonize(self._ext_modules)
return self._cythonized_ext_modules
@ext_modules.setter
def ext_modules(self, mods):
self._cythonized_ext_modules = None
self._ext_modules = mods
@ext_modules.deleter
def ext_modules(self):
del self._ext_modules
del self._cythonized_ext_modules
# detect support
def main_file(module):
return Extension('gssapi.raw.%s' % module,
extra_link_args=link_args,
extra_compile_args=compile_args,
library_dirs=library_dirs,
sources=['gssapi/raw/%s.%s' % (module, SOURCE_EXT)])
ENUM_EXTS = []
def extension_file(module, canary):
if ENABLE_SUPPORT_DETECTION and not hasattr(GSSAPI_LIB, canary):
print('Skipping the %s extension because it '
'is not supported by your GSSAPI implementation...' % module)
return None
else:
enum_ext_path = 'gssapi/raw/_enum_extensions/ext_%s.%s' % (module,
SOURCE_EXT)
if os.path.exists(enum_ext_path):
ENUM_EXTS.append(
Extension('gssapi.raw._enum_extensions.ext_%s' % module,
extra_link_args=link_args,
extra_compile_args=compile_args,
sources=[enum_ext_path],
library_dirs=library_dirs,
include_dirs=['gssapi/raw/']))
return Extension('gssapi.raw.ext_%s' % module,
extra_link_args=link_args,
extra_compile_args=compile_args,
library_dirs=library_dirs,
sources=['gssapi/raw/ext_%s.%s' % (module,
SOURCE_EXT)])
def gssapi_modules(lst):
# filter out missing files
res = [mod for mod in lst if mod is not None]
# add in supported mech files
MECHS_SUPPORTED = os.environ.get('GSSAPI_MECHS', 'krb5').split(',')
for mech in MECHS_SUPPORTED:
res.append(Extension('gssapi.raw.mech_%s' % mech,
extra_link_args=link_args,
extra_compile_args=compile_args,
library_dirs=library_dirs,
sources=['gssapi/raw/mech_%s.%s' % (mech,
SOURCE_EXT)]))
# add in any present enum extension files
res.extend(ENUM_EXTS)
return res
long_desc = re.sub('\.\. role:: \w+\(code\)\s*\n\s*.+', '',
re.sub(r':(python|bash|code):', '',
re.sub(r'\.\. code-block:: \w+', '::',
open('README.txt').read())))
install_requires = [
'decorator',
'six >= 1.4.0'
]
if sys.version_info < (3, 4):
install_requires.append('enum34')
setup(
name='gssapi',
version='1.2.0',
author='The Python GSSAPI Team',
author_email='sross@redhat.com',
packages=['gssapi', 'gssapi.raw', 'gssapi.raw._enum_extensions',
'gssapi.tests'],
description='Python GSSAPI Wrapper',
long_description=long_desc,
license='LICENSE.txt',
url="https://github.com/pythongssapi/python-gssapi",
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Intended Audience :: Developers',
'License :: OSI Approved :: ISC License (ISCL)',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Cython',
'Topic :: Security',
'Topic :: Software Development :: Libraries :: Python Modules'
],
distclass=GSSAPIDistribution,
cmdclass={'sdist': sdist_gssapi},
ext_modules=gssapi_modules([
main_file('misc'),
main_file('exceptions'),
main_file('creds'),
main_file('names'),
main_file('sec_contexts'),
main_file('types'),
main_file('message'),
main_file('oids'),
main_file('cython_converters'),
main_file('chan_bindings'),
extension_file('s4u', 'gss_acquire_cred_impersonate_name'),
extension_file('cred_store', 'gss_store_cred_into'),
extension_file('rfc5588', 'gss_store_cred'),
extension_file('cred_imp_exp', 'gss_import_cred'),
extension_file('dce', 'gss_wrap_iov'),
extension_file('iov_mic', 'gss_get_mic_iov'),
# see ext_rfc6680_comp_oid for more information on this split
extension_file('rfc6680', 'gss_display_name_ext'),
extension_file('rfc6680_comp_oid', 'GSS_C_NT_COMPOSITE_EXPORT'),
# see ext_password{,_add}.pyx for more information on this split
extension_file('password', 'gss_acquire_cred_with_password'),
extension_file('password_add', 'gss_add_cred_with_password'),
]),
keywords=['gssapi', 'security'],
install_requires=install_requires
)
|
|
# Copyright (c) 2014 Cisco Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import requests
import mock
from oslo.config import cfg
from neutron.tests import base
OK = requests.codes.ok
APIC_HOSTS = ['fake.controller.local']
APIC_PORT = 7580
APIC_USR = 'notadmin'
APIC_PWD = 'topsecret'
APIC_TENANT = 'citizen14'
APIC_NETWORK = 'network99'
APIC_NETNAME = 'net99name'
APIC_SUBNET = '10.3.2.1/24'
APIC_L3CTX = 'layer3context'
APIC_AP = 'appProfile001'
APIC_EPG = 'endPointGroup001'
APIC_CONTRACT = 'signedContract'
APIC_SUBJECT = 'testSubject'
APIC_FILTER = 'carbonFilter'
APIC_ENTRY = 'forcedEntry'
APIC_SYSTEM_ID = 'sysid'
APIC_DOMAIN = 'cumuloNimbus'
APIC_NODE_PROF = 'red'
APIC_LEAF = 'green'
APIC_LEAF_TYPE = 'range'
APIC_NODE_BLK = 'blue'
APIC_PORT_PROF = 'yellow'
APIC_PORT_SEL = 'front'
APIC_PORT_TYPE = 'range'
APIC_PORT_BLK1 = 'block01'
APIC_PORT_BLK2 = 'block02'
APIC_ACC_PORT_GRP = 'alpha'
APIC_FUNC_PROF = 'beta'
APIC_ATT_ENT_PROF = 'delta'
APIC_VLAN_NAME = 'gamma'
APIC_VLAN_MODE = 'dynamic'
APIC_VLANID_FROM = 2900
APIC_VLANID_TO = 2999
APIC_VLAN_FROM = 'vlan-%d' % APIC_VLANID_FROM
APIC_VLAN_TO = 'vlan-%d' % APIC_VLANID_TO
APIC_ROUTER = 'router_id'
APIC_EXT_SWITCH = '203'
APIC_EXT_MODULE = '1'
APIC_EXT_PORT = '34'
APIC_EXT_ENCAP = 'vlan-100'
APIC_EXT_CIDR_EXPOSED = '10.10.40.2/16'
APIC_EXT_GATEWAY_IP = '10.10.40.1'
APIC_KEY = 'key'
KEYSTONE_TOKEN = '123Token123'
APIC_UPLINK_PORTS = ['uplink_port']
SERVICE_HOST = 'host1'
SERVICE_HOST_IFACE = 'eth0'
SERVICE_HOST_MAC = 'aa:ee:ii:oo:uu:yy'
SERVICE_PEER_CHASSIS_NAME = 'leaf4'
SERVICE_PEER_CHASSIS = 'topology/pod-1/node-' + APIC_EXT_SWITCH
SERVICE_PEER_PORT_LOCAL = 'Eth%s/%s' % (APIC_EXT_MODULE, APIC_EXT_PORT)
SERVICE_PEER_PORT_DESC = ('topology/pod-1/paths-%s/pathep-[%s]' %
(APIC_EXT_SWITCH, SERVICE_PEER_PORT_LOCAL.lower()))
class ControllerMixin(object):
"""Mock the controller for APIC driver and service unit tests."""
def __init__(self):
self.response = None
def set_up_mocks(self):
# The mocked responses from the server are lists used by
# mock.side_effect, which means each call to post or get will
# return the next item in the list. This allows the test cases
# to stage a sequence of responses to method(s) under test.
self.response = {'post': [], 'get': []}
self.reset_reponses()
def reset_reponses(self, req=None):
# Clear all staged responses.
reqs = [req] if req else ['post', 'get'] # Both if none specified.
for req in reqs:
del self.response[req][:]
self.restart_responses(req)
def restart_responses(self, req):
responses = mock.MagicMock(side_effect=self.response[req])
if req == 'post':
requests.Session.post = responses
elif req == 'get':
requests.Session.get = responses
def mock_response_for_post(self, mo, **attrs):
attrs['debug_mo'] = mo # useful for debugging
self._stage_mocked_response('post', OK, mo, **attrs)
def _stage_mocked_response(self, req, mock_status, mo, **attrs):
response = mock.MagicMock()
response.status_code = mock_status
mo_attrs = [{mo: {'attributes': attrs}}] if attrs else []
response.json.return_value = {'imdata': mo_attrs}
self.response[req].append(response)
def mock_apic_manager_login_responses(self, timeout=300):
# APIC Manager tests are based on authenticated session
self.mock_response_for_post('aaaLogin', userName=APIC_USR,
token='ok', refreshTimeoutSeconds=timeout)
@contextlib.contextmanager
def fake_transaction(self, *args, **kwargs):
yield 'transaction'
class ConfigMixin(object):
"""Mock the config for APIC driver and service unit tests."""
def __init__(self):
self.mocked_parser = None
def set_up_mocks(self):
# Mock the configuration file
base.BaseTestCase.config_parse()
# Configure global option apic_system_id
cfg.CONF.set_override('apic_system_id', APIC_SYSTEM_ID)
# Configure option keystone_authtoken
cfg.CONF.keystone_authtoken = KEYSTONE_TOKEN
# Configure the ML2 mechanism drivers and network types
ml2_opts = {
'mechanism_drivers': ['apic'],
'tenant_network_types': ['vlan'],
}
for opt, val in ml2_opts.items():
cfg.CONF.set_override(opt, val, 'ml2')
# Configure the ML2 type_vlan opts
ml2_type_vlan_opts = {
'vlan_ranges': ['physnet1:100:199'],
}
cfg.CONF.set_override('network_vlan_ranges',
ml2_type_vlan_opts['vlan_ranges'],
'ml2_type_vlan')
self.vlan_ranges = ml2_type_vlan_opts['vlan_ranges']
# Configure the Cisco APIC mechanism driver
apic_test_config = {
'apic_hosts': APIC_HOSTS,
'apic_username': APIC_USR,
'apic_password': APIC_PWD,
'apic_domain_name': APIC_SYSTEM_ID,
'apic_vlan_ns_name': APIC_VLAN_NAME,
'apic_vlan_range': '%d:%d' % (APIC_VLANID_FROM, APIC_VLANID_TO),
'apic_node_profile': APIC_NODE_PROF,
'apic_entity_profile': APIC_ATT_ENT_PROF,
'apic_function_profile': APIC_FUNC_PROF,
'apic_host_uplink_ports': APIC_UPLINK_PORTS
}
for opt, val in apic_test_config.items():
cfg.CONF.set_override(opt, val, 'ml2_cisco_apic')
self.apic_config = cfg.CONF.ml2_cisco_apic
# Configure switch topology
apic_switch_cfg = {
'apic_switch:101': {'ubuntu1,ubuntu2': ['3/11']},
'apic_switch:102': {'rhel01,rhel02': ['4/21'],
'rhel03': ['4/22']},
}
self.switch_dict = {
'101': {
'3/11': ['ubuntu1', 'ubuntu2'],
},
'102': {
'4/21': ['rhel01', 'rhel02'],
'4/22': ['rhel03'],
},
}
self.vpc_dict = {
'201': '202',
'202': '201',
}
self.external_network_dict = {
APIC_NETWORK + '-name': {
'switch': APIC_EXT_SWITCH,
'port': APIC_EXT_MODULE + '/' + APIC_EXT_PORT,
'encap': APIC_EXT_ENCAP,
'cidr_exposed': APIC_EXT_CIDR_EXPOSED,
'gateway_ip': APIC_EXT_GATEWAY_IP,
},
}
self.mocked_parser = mock.patch.object(
cfg, 'MultiConfigParser').start()
self.mocked_parser.return_value.read.return_value = [apic_switch_cfg]
self.mocked_parser.return_value.parsed = [apic_switch_cfg]
class FakeDbContract(object):
def __init__(self, contract_id):
self.contract_id = contract_id
|
|
## @file
# classes represent data in FDF
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
## FD data in FDF
#
#
class FDClassObject:
## The constructor
#
# @param self The object pointer
#
def __init__(self):
self.FdUiName = ''
self.CreateFileName = None
self.BaseAddress = None
self.BaseAddressPcd = None
self.Size = None
self.SizePcd = None
self.ErasePolarity = None
# 3-tuple list (blockSize, numBlocks, pcd)
self.BlockSizeList = []
# DefineVarDict[var] = value
self.DefineVarDict = {}
# SetVarDict[var] = value
self.SetVarDict = {}
self.RegionList = []
## FFS data in FDF
#
#
class FfsClassObject:
## The constructor
#
# @param self The object pointer
#
def __init__(self):
self.NameGuid = None
self.Fixed = False
self.CheckSum = False
self.Alignment = None
self.SectionList = []
## FILE statement data in FDF
#
#
class FileStatementClassObject (FfsClassObject) :
## The constructor
#
# @param self The object pointer
#
def __init__(self):
FfsClassObject.__init__(self)
self.FvFileType = None
self.FileName = None
self.KeyStringList = []
self.FvName = None
self.FdName = None
self.DefineVarDict = {}
self.KeepReloc = None
## INF statement data in FDF
#
#
class FfsInfStatementClassObject(FfsClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
FfsClassObject.__init__(self)
self.Rule = None
self.Version = None
self.Ui = None
self.InfFileName = None
self.BuildNum = ''
self.KeyStringList = []
self.KeepReloc = None
self.UseArch = None
## section data in FDF
#
#
class SectionClassObject:
## The constructor
#
# @param self The object pointer
#
def __init__(self):
self.Alignment = None
## Depex expression section in FDF
#
#
class DepexSectionClassObject (SectionClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
self.DepexType = None
self.Expression = None
self.ExpressionProcessed = False
## Compress section data in FDF
#
#
class CompressSectionClassObject (SectionClassObject) :
## The constructor
#
# @param self The object pointer
#
def __init__(self):
SectionClassObject.__init__(self)
self.CompType = None
self.SectionList = []
## Data section data in FDF
#
#
class DataSectionClassObject (SectionClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
SectionClassObject.__init__(self)
self.SecType = None
self.SectFileName = None
self.SectionList = []
self.KeepReloc = True
## Rule section data in FDF
#
#
class EfiSectionClassObject (SectionClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
SectionClassObject.__init__(self)
self.SectionType = None
self.Optional = False
self.FileType = None
self.StringData = None
self.FileName = None
self.FileExtension = None
self.BuildNum = None
self.KeepReloc = None
## FV image section data in FDF
#
#
class FvImageSectionClassObject (SectionClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
SectionClassObject.__init__(self)
self.Fv = None
self.FvName = None
self.FvFileType = None
self.FvFileName = None
self.FvFileExtension = None
self.FvAddr = None
## GUIDed section data in FDF
#
#
class GuidSectionClassObject (SectionClassObject) :
## The constructor
#
# @param self The object pointer
#
def __init__(self):
SectionClassObject.__init__(self)
self.NameGuid = None
self.SectionList = []
self.SectionType = None
self.ProcessRequired = False
self.AuthStatusValid = False
self.ExtraHeaderSize = -1
self.FvAddr = []
self.FvParentAddr = None
self.IncludeFvSection = False
## UI section data in FDF
#
#
class UiSectionClassObject (SectionClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
SectionClassObject.__init__(self)
self.StringData = None
self.FileName = None
## Version section data in FDF
#
#
class VerSectionClassObject (SectionClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
SectionClassObject.__init__(self)
self.BuildNum = None
self.StringData = None
self.FileName = None
## Rule data in FDF
#
#
class RuleClassObject :
## The constructor
#
# @param self The object pointer
#
def __init__(self):
self.Arch = None
self.ModuleType = None # For Module Type
self.TemplateName = None
self.NameGuid = None
self.Fixed = False
self.Alignment = None
self.SectAlignment = None
self.CheckSum = False
self.FvFileType = None # for Ffs File Type
self.KeyStringList = []
self.KeepReloc = None
## Complex rule data in FDF
#
#
class RuleComplexFileClassObject(RuleClassObject) :
## The constructor
#
# @param self The object pointer
#
def __init__(self):
RuleClassObject.__init__(self)
self.SectionList = []
## Simple rule data in FDF
#
#
class RuleSimpleFileClassObject(RuleClassObject) :
## The constructor
#
# @param self The object pointer
#
def __init__(self):
RuleClassObject.__init__(self)
self.FileName = None
self.SectionType = ''
self.FileExtension = None
## File extension rule data in FDF
#
#
class RuleFileExtensionClassObject(RuleClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
RuleClassObject.__init__(self)
self.FileExtension = None
## Capsule data in FDF
#
#
class CapsuleClassObject :
## The constructor
#
# @param self The object pointer
#
def __init__(self):
self.SpecName = None
self.UiCapsuleName = None
self.CreateFile = None
self.GroupIdNumber = None
# DefineVarDict[var] = value
self.DefineVarDict = {}
# SetVarDict[var] = value
self.SetVarDict = {}
# TokensDict[var] = value
self.TokensDict = {}
self.CapsuleDataList = []
self.FmpPayloadList = []
## OptionROM data in FDF
#
#
class OptionRomClassObject:
## The constructor
#
# @param self The object pointer
#
def __init__(self):
self.DriverName = None
self.FfsList = []
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Gtacoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Base class for RPC testing
import logging
import optparse
import os
import sys
import shutil
import tempfile
import traceback
from .util import (
initialize_chain,
start_nodes,
connect_nodes_bi,
sync_blocks,
sync_mempools,
stop_nodes,
stop_node,
enable_coverage,
check_json_precision,
initialize_chain_clean,
PortSeed,
)
from .authproxy import JSONRPCException
class GtacoinTestFramework(object):
def __init__(self):
self.num_nodes = 4
self.setup_clean_chain = False
self.nodes = None
def run_test(self):
raise NotImplementedError
def add_options(self, parser):
pass
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
if self.setup_clean_chain:
initialize_chain_clean(self.options.tmpdir, self.num_nodes)
else:
initialize_chain(self.options.tmpdir, self.num_nodes)
def stop_node(self, num_node):
stop_node(self.nodes[num_node], num_node)
def setup_nodes(self):
return start_nodes(self.num_nodes, self.options.tmpdir)
def setup_network(self, split = False):
self.nodes = self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
# If we joined network halves, connect the nodes from the joint
# on outward. This ensures that chains are properly reorganised.
if not split:
connect_nodes_bi(self.nodes, 1, 2)
sync_blocks(self.nodes[1:3])
sync_mempools(self.nodes[1:3])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 2, 3)
self.is_network_split = split
self.sync_all()
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
assert not self.is_network_split
stop_nodes(self.nodes)
self.setup_network(True)
def sync_all(self):
if self.is_network_split:
sync_blocks(self.nodes[:2])
sync_blocks(self.nodes[2:])
sync_mempools(self.nodes[:2])
sync_mempools(self.nodes[2:])
else:
sync_blocks(self.nodes)
sync_mempools(self.nodes)
def join_network(self):
"""
Join the (previously split) network halves together.
"""
assert self.is_network_split
stop_nodes(self.nodes)
self.setup_network(False)
def main(self):
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave gtacoinds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop gtacoinds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
help="Source directory containing gtacoind/gtacoin-cli (default: %default)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
# backup dir variable for removal at cleanup
self.options.root, self.options.tmpdir = self.options.tmpdir, self.options.tmpdir + '/' + str(self.options.port_seed)
if self.options.trace_rpc:
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
if self.options.coveragedir:
enable_coverage(self.options.coveragedir)
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir+":"+self.options.srcdir+"/qt:"+os.environ['PATH']
check_json_precision()
success = False
try:
os.makedirs(self.options.tmpdir, exist_ok=False)
self.setup_chain()
self.setup_network()
self.run_test()
success = True
except JSONRPCException as e:
print("JSONRPC error: "+e.error['message'])
traceback.print_tb(sys.exc_info()[2])
except AssertionError as e:
print("Assertion failed: " + str(e))
traceback.print_tb(sys.exc_info()[2])
except KeyError as e:
print("key not found: "+ str(e))
traceback.print_tb(sys.exc_info()[2])
except Exception as e:
print("Unexpected exception caught during testing: " + repr(e))
traceback.print_tb(sys.exc_info()[2])
except KeyboardInterrupt as e:
print("Exiting after " + repr(e))
if not self.options.noshutdown:
print("Stopping nodes")
stop_nodes(self.nodes)
else:
print("Note: gtacoinds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success:
print("Cleaning up")
shutil.rmtree(self.options.tmpdir)
if not os.listdir(self.options.root):
os.rmdir(self.options.root)
else:
print("Not cleaning up dir %s" % self.options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
# Test framework for doing p2p comparison testing, which sets up some gtacoind
# binaries:
# 1 binary: test binary
# 2 binaries: 1 test binary, 1 ref binary
# n>2 binaries: 1 test binary, n-1 ref binaries
class ComparisonTestFramework(GtacoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("GTACOIND", "gtacoind"),
help="gtacoind binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("GTACOIND", "gtacoind"),
help="gtacoind binary to use for reference nodes (if any)")
def setup_network(self):
self.nodes = start_nodes(
self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']] * self.num_nodes,
binary=[self.options.testbinary] +
[self.options.refbinary]*(self.num_nodes-1))
|
|
#!/usr/bin/env python
# Corey Brune - Sep 2016
# This script performs a rewind of a vdb
# requirements
# pip install --upgrade setuptools pip docopt delphixpy.v1_8_0
# The below doc follows the POSIX compliant standards and allows us to use
# this doc to also define our arguments for the script.
"""Rewinds a vdb
Usage:
dx_rewind_vdb.py (--vdb <name> [--timestamp_type <type>] [--timestamp <timepoint_semantic>])
[--bookmark <type>]
[ --engine <identifier> --all]
[--debug] [--parallel <n>] [--poll <n>]
[--config <path_to_file>] [--logdir <path_to_file>]
dx_rewind_vdb.py -h | --help | -v | --version
Rewinds a Delphix VDB
Examples:
Rollback to latest snapshot using defaults:
dx_rewind_vdb.py --vdb testVdbUF
Rollback using a specific timestamp:
dx_rewind_vdb.py --vdb testVdbUF --timestamp_type snapshot --timestamp 2016-11-15T11:30:17.857Z
Options:
--vdb <name> Name of VDB to rewind
--type <database_type> Type of database: oracle, mssql, ase, vfiles
--timestamp_type <type> The type of timestamp being used for the reqwind.
Acceptable Values: TIME, SNAPSHOT
[default: SNAPSHOT]
--all Run against all engines.
--timestamp <timepoint_semantic>
The Delphix semantic for the point in time on
the source to rewind the VDB.
Formats:
latest point in time or snapshot: LATEST
point in time: "YYYY-MM-DD HH24:MI:SS"
snapshot name: "@YYYY-MM-DDTHH24:MI:SS.ZZZ"
snapshot time from GUI: "YYYY-MM-DD HH24:MI"
[default: LATEST]
--engine <type> Alt Identifier of Delphix engine in dxtools.conf.
--debug Enable debug logging
--parallel <n> Limit number of jobs to maxjob
--poll <n> The number of seconds to wait between job polls
[default: 10]
--config <path_to_file> The path to the dxtools.conf file
[default: ./dxtools.conf]
--logdir <path_to_file> The path to the logfile you want to use.
[default: ./dx_rewind_vdb.log]
-h --help Show this screen.
-v --version Show version.
"""
VERSION = "v.0.2.016"
import sys
import traceback
from os.path import basename
from time import sleep
from time import time
from docopt import docopt
from delphixpy.v1_8_0.exceptions import HttpError
from delphixpy.v1_8_0.exceptions import JobError
from delphixpy.v1_8_0.exceptions import RequestError
from delphixpy.v1_8_0.web import database
from delphixpy.v1_8_0.web import job
from delphixpy.v1_8_0.web.vo import OracleRollbackParameters
from delphixpy.v1_8_0.web.vo import RollbackParameters
from lib.DlpxException import DlpxException
from lib.DxLogging import logging_est
from lib.DxLogging import print_debug
from lib.DxLogging import print_exception
from lib.DxLogging import print_info
from lib.DxTimeflow import DxTimeflow
from lib.GetReferences import find_obj_by_name
from lib.GetSession import GetSession
def rewind_database(dlpx_obj, vdb_name, timestamp, timestamp_type="SNAPSHOT"):
"""
This function performs the rewind (rollback)
dlpx_obj: Virtualization Engine session object
vdb_name: VDB to be rewound
timestamp: Point in time to rewind the VDB
timestamp_type: The type of timestamp being used for the rewind
"""
engine_name = dlpx_obj.dlpx_engines.keys()[0]
dx_timeflow_obj = DxTimeflow(dlpx_obj.server_session)
container_obj = find_obj_by_name(dlpx_obj.server_session, database, vdb_name)
# Sanity check to make sure our container object has a reference
if container_obj.reference:
try:
if container_obj.virtual is not True:
raise DlpxException(
"{} in engine {} is not a virtual object. "
"Skipping.\n".format(container_obj.name, engine_name)
)
elif container_obj.staging is True:
raise DlpxException(
"{} in engine {} is a virtual object. "
"Skipping.\n".format(container_obj.name, engine_name)
)
elif container_obj.runtime.enabled == "ENABLED":
print_info(
"\nINFO: {} Rewinding {} to {}\n".format(
engine_name, container_obj.name, timestamp
)
)
# This exception is raised if rewinding a vFiles VDB
# since AppDataContainer does not have virtual, staging or
# enabled attributes.
except AttributeError:
pass
print_debug("{}: Type: {}".format(engine_name, container_obj.type))
# If the vdb is a Oracle type, we need to use a OracleRollbackParameters
if str(container_obj.reference).startswith("ORACLE"):
rewind_params = OracleRollbackParameters()
else:
rewind_params = RollbackParameters()
rewind_params.timeflow_point_parameters = dx_timeflow_obj.set_timeflow_point(
container_obj, timestamp_type, timestamp
)
print_debug("{}: {}".format(engine_name, str(rewind_params)))
try:
# Rewind the VDB
database.rollback(
dlpx_obj.server_session, container_obj.reference, rewind_params
)
dlpx_obj.jobs[engine_name] = dlpx_obj.server_session.last_job
print_info("VDB {} was rolled back.".format(container_obj.name))
except (RequestError, HttpError, JobError) as e:
print_exception(
"ERROR: {} encountered an error on {}"
" during the rewind process:\n{}".format(
engine_name, container_obj.name, e
)
)
# Don't do anything if the database is disabled
else:
print_info(
"{}: {} is not enabled. Skipping sync.".format(
engine_name, container_obj.name
)
)
def run_async(func):
"""
http://code.activestate.com/recipes/576684-simple-threading-decorator/
run_async(func)
function decorator, intended to make "func" run in a separate
thread (asynchronously).
Returns the created Thread object
E.g.:
@run_async
def task1():
do_something
@run_async
def task2():
do_something_too
t1 = task1()
t2 = task2()
...
t1.join()
t2.join()
"""
from threading import Thread
from functools import wraps
@wraps(func)
def async_func(*args, **kwargs):
func_hl = Thread(target=func, args=args, kwargs=kwargs)
func_hl.start()
return func_hl
return async_func
@run_async
def main_workflow(engine, dlpx_obj):
"""
This function is where we create our main workflow.
Use the @run_async decorator to run this function asynchronously.
The @run_async decorator allows us to run against multiple Delphix Engine
simultaneously
:param engine: Dictionary of engines
:type engine: dictionary
:param dlpx_obj: Virtualization Engine session object
:type dlpx_obj: lib.GetSession.GetSession
"""
try:
# Setup the connection to the Delphix Engine
dlpx_obj.serversess(
engine["ip_address"], engine["username"], engine["password"]
)
except DlpxException as e:
print_exception(
"ERROR: Engine {} encountered an error while"
"rewinding {}:\n{}\n".format(engine["hostname"], arguments["--target"], e)
)
thingstodo = ["thingtodo"]
try:
with dlpx_obj.job_mode(single_thread):
while len(dlpx_obj.jobs) > 0 or len(thingstodo) > 0:
if len(thingstodo) > 0:
rewind_database(
dlpx_obj,
arguments["--vdb"],
arguments["--timestamp"],
arguments["--timestamp_type"],
)
thingstodo.pop()
# get all the jobs, then inspect them
i = 0
for j in dlpx_obj.jobs.keys():
job_obj = job.get(dlpx_obj.server_session, dlpx_obj.jobs[j])
print_debug(job_obj)
print_info(
"{}: Refresh of {}: {}".format(
engine["hostname"], arguments["--vdb"], job_obj.job_state
)
)
if job_obj.job_state in ["CANCELED", "COMPLETED", "FAILED"]:
# If the job is in a non-running state, remove it
# from the running jobs list.
del dlpx_obj.jobs[j]
elif job_obj.job_state in "RUNNING":
# If the job is in a running state, increment the
# running job count.
i += 1
print_info("{}: {:d} jobs running.".format(engine["hostname"], i))
# If we have running jobs, pause before repeating the
# checks.
if len(dlpx_obj.jobs) > 0:
sleep(float(arguments["--poll"]))
except (DlpxException, RequestError, JobError, HttpError) as e:
print_exception("Error in dx_rewind_vdb: {}\n{}".format(engine["hostname"], e))
sys.exit(1)
def time_elapsed(time_start):
"""
This function calculates the time elapsed since the beginning of the script.
Call this anywhere you want to note the progress in terms of time
time_start: float containing start time of the script.
"""
return round((time() - time_start) / 60, +1)
def run_job(dlpx_obj, config_file_path):
"""
This function runs the main_workflow aynchronously against all the
servers specified
:param dlpx_obj: Virtualization Engine session object
:type dlpx_obj: lib.GetSession.GetSession
:param config_file_path: string containing path to configuration file.
:type config_file_path: str
"""
# Create an empty list to store threads we create.
threads = []
engine = None
# If the --all argument was given, run against every engine in dxtools.conf
if arguments["--all"]:
print_info("Executing against all Delphix Engines in the dxtools.conf")
try:
# For each server in the dxtools.conf...
for delphix_engine in dlpx_obj.dlpx_engines:
engine = dlpx_obj.dlpx_engines[delphix_engine]
# Create a new thread and add it to the list.
threads.append(main_workflow(engine, dlpx_obj))
except DlpxException as e:
print_exception("Error encountered in run_job():\n{}".format(e))
sys.exit(1)
elif arguments["--all"] is False:
# Else if the --engine argument was given, test to see if the engine
# exists in dxtools.conf
if arguments["--engine"]:
try:
engine = dlpx_obj.dlpx_engines[arguments["--engine"]]
print_info(
"Executing against Delphix Engine: {}\n".format(
arguments["--engine"]
)
)
except (DlpxException, RequestError, KeyError):
raise DlpxException(
"\nERROR: Delphix Engine {} cannot be "
"found in {}. Please check your value and"
" try again. Exiting.\n".format(
arguments["--engine"], config_file_path
)
)
else:
# Else search for a default engine in the dxtools.conf
for delphix_engine in dlpx_obj.dlpx_engines:
if dlpx_obj.dlpx_engines[delphix_engine]["default"] == "true":
engine = dlpx_obj.dlpx_engines[delphix_engine]
print_info(
"Executing against the default Delphix Engine "
"in the dxtools.conf: {}".format(
dlpx_obj.dlpx_engines[delphix_engine]["hostname"]
)
)
break
if engine is None:
raise DlpxException("\nERROR: No default engine found. Exiting")
# run the job against the engine
threads.append(main_workflow(engine, dlpx_obj))
# For each thread in the list...
for each in threads:
# join them back together so that we wait for all threads to complete
# before moving on
each.join()
def main():
# We want to be able to call on these variables anywhere in the script.
global single_thread
global debug
time_start = time()
single_thread = False
try:
dx_session_obj = GetSession()
logging_est(arguments["--logdir"])
print_debug(arguments)
config_file_path = arguments["--config"]
# Parse the dxtools.conf and put it into a dictionary
dx_session_obj.get_config(config_file_path)
# This is the function that will handle processing main_workflow for
# all the servers.
run_job(dx_session_obj, config_file_path)
elapsed_minutes = time_elapsed(time_start)
print_info(
"script took {:.2f} minutes to get this far.".format(elapsed_minutes)
)
# Here we handle what we do when the unexpected happens
except SystemExit as e:
# This is what we use to handle our sys.exit(#)
sys.exit(e)
except DlpxException as e:
# We use this exception handler when an error occurs in a function call.
print_exception(
"ERROR: Please check the ERROR message below:\n" "{}".format(e.message)
)
sys.exit(2)
except HttpError as e:
# We use this exception handler when our connection to Delphix fails
print_exception(
"ERROR: Connection failed to the Delphix Engine. Please"
"check the ERROR message below:\n{}".format(e.message)
)
sys.exit(2)
except JobError as e:
# We use this exception handler when a job fails in Delphix so that we
# have actionable data
print_exception("A job failed in the Delphix Engine:\n{}".format(e.job))
elapsed_minutes = time_elapsed(time_start)
print_exception(
"{} took {:.2f} minutes to get this far".format(
basename(__file__), elapsed_minutes
)
)
sys.exit(3)
except KeyboardInterrupt:
# We use this exception handler to gracefully handle ctrl+c exits
print_debug("You sent a CTRL+C to interrupt the process")
elapsed_minutes = time_elapsed(time_start)
print_info(
"{} took {:.2f} minutes to get this far".format(
basename(__file__), elapsed_minutes
)
)
except:
# Everything else gets caught here
print_exception("{}\n{}".format(sys.exc_info()[0], traceback.format_exc()))
elapsed_minutes = time_elapsed(time_start)
print_info(
"{} took {:.2f} minutes to get this far".format(
basename(__file__), elapsed_minutes
)
)
sys.exit(1)
if __name__ == "__main__":
# Grab our arguments from the doc at the top of the script
arguments = docopt(__doc__, version=basename(__file__) + " " + VERSION)
# Feed our arguments to the main function, and off we go!
main()
|
|
from functools import partial
class HiggsException(Exception):
pass
class HiggsScopeException(HiggsException):
pass
class HiggsSyntaxException(HiggsException):
pass
class HiggsDeclarationException(HiggsSyntaxException):
pass
def find_in_scope(name, kwargs):
if name in kwargs:
return kwargs[name]
else:
try:
return getattr(global_scope, name)
except AttributeError:
raise HiggsScopeException(
u"Couldn't find name {} in any scope".format(name))
class GlobalScope(object):
importables = {}
NOT_PROVIDED = object()
def assign(self, name, value=NOT_PROVIDED, type_decl=NOT_PROVIDED, *args,
**kwargs):
interface = find_in_scope('interface', kwargs)
impl = find_in_scope('impl', kwargs)
# interface[name] = type(value)
if value is not self.NOT_PROVIDED:
impl[name] = value
interface[name] = type(value)
return
if type_decl is self.NOT_PROVIDED:
raise HiggsDeclarationException(
u"You must either provide a value for a declaration, "
u"or explicitly declare a type for it")
interface[name] = type_decl
def higgs_import(self, name):
if name not in self.importables:
raise ImportError
return self.importables[name]()
class Module(GlobalScope):
def inline(self, *args, **kwargs):
"""The code here will execute on import. It will define the interface
of the module (what names it exports, and their types)
In Higgs, this won't be a real function, it will consist of the actual
module code
"""
pass
def load(self, inline, *args, **kwargs):
"""Module initialization on import - default behavior.
By default we don't declare any new attributes here...though we could
:param inline: The code declared inline, in the module body
:param args:
:param kwargs:
:return:
"""
assign = find_in_scope('assign', kwargs)
interface = {}
impl = {}
assign = partial(assign, interface=interface, impl=impl)
inline(assign=assign)
self.interface = interface
self.impl = impl
def __init__(self, *args, **kwargs):
self.interface = None
self.impl = None
self.load(self.inline, *args, **kwargs)
class HiggsObject(object):
interface = None
impl = None
class HiggsFunction(HiggsObject):
interface = {
'pre': HiggsArgsSpec(),
'post': HiggsArgsSpec(),
'rtype': HiggsObject()
}
impl = None
@classmethod
def create_literal(cls, code=None, pre=None, post=None, rtype=None):
new_function = cls()
new_function.interface = {
'pre': pre,
'post': post,
'rtype': rtype
}
new_function.impl = code
class HiggsInt(HiggsObject):
interface = {
'$add': HiggsFunction.create_literal()
}
@classmethod
def create_literal(cls, value):
new_int = HiggsInt()
class HiggsArgsList(HiggsObject):
"""Represents an ordered, (immutable?) sequence of HiggsObjects
"""
interface = {
'$positional': HiggsList(),
'$keywords': 0
}
class HiggsArgsSpec(HiggsObject):
interface = {
}
class HiggsList(HiggsObject):
"""Represents an integer indexed, 0-based ordered typed array of HiggsObjects
The interfaces of the HOs must (at compile time) satisfy the type member
of the list
"""
interface = {
'$get_item': 0,
'$set_item': 0,
'$length': 0,
'$type': 0
}
class HiggsFrozenDict(HiggsObject):
"""A dictionary that after creation, its keys and values can not be changed
IDEA:
Unlike Python dicts, the keys don't have to return the same hash value,
because their individual object IDs will be used, not their hash value
-let's see how this works out... it works OK in Python, one must
simply inherit from types like sets, lists and dicts and use the
subclass thereof.... why all this nonsense? to provide a hook
for the user to implement weird behavior
"""
interface = {
# need generics :D :(( oh well.. let's work around this
'$get_item': HiggsFunction.create_literal(post=())
}
class HiggsDict(HiggsObject):
interface = {
}
class HiggsCode(HiggsObject):
interface = {
}
class WeirdModule(Module):
def inline(self, *args, **kwargs):
assign = find_in_scope('assign', kwargs)
assign('a', 3)
def increment(x):
return x + 1
assign('increment', increment)
assign('weird_name', type_decl=str)
assign('WeirdSubType')
global_scope = GlobalScope()
GlobalScope.importables[WeirdModule.__name__] = WeirdModule
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated by generateDS.py.
#
import sys
import getopt
import re as re_
etree_ = None
Verbose_import_ = False
( XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
try:
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
except ImportError:
try:
# cElementTree from Python 2.5+
import xml.etree.cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
# ElementTree from Python 2.5+
import xml.etree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree")
except ImportError:
raise ImportError("Failed to import ElementTree from any known place")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser()
doc = etree_.parse(*args, **kwargs)
return doc
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError, exp:
class GeneratedsSuper(object):
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_integer_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of integers')
return input_data
def gds_format_float(self, input_data, input_name=''):
return '%f' % input_data
def gds_validate_float(self, input_data, node, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_float_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of floats')
return input_data
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_double_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of doubles')
return input_data
def gds_format_boolean(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean(self, input_data, node, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(node, 'Requires sequence of booleans ("true", "1", "false", "0")')
return input_data
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
return None
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'ascii'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
#
# Support/utility functions.
#
def showIndent(outfile, level):
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
if not inStr:
return ''
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if XMLParser_import_library == XMLParser_import_lxml:
msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, )
else:
msg = '%s (element %s)' % (msg, node.tag, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(outfile, level, namespace,name)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (self.name, self.value, self.name))
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' % \
(self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' % \
(self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s",\n' % \
(self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class test1element(GeneratedsSuper):
member_data_items_ = [
MemberSpec_('test1attribute', 'cimAnySimpleType', 0),
MemberSpec_('test1member', 'cimAnySimpleType', 0),
]
subclass = None
superclass = None
def __init__(self, test1attribute=None, test1member=None):
self.test1attribute = _cast(None, test1attribute)
self.test1member = test1member
def factory(*args_, **kwargs_):
if test1element.subclass:
return test1element.subclass(*args_, **kwargs_)
else:
return test1element(*args_, **kwargs_)
factory = staticmethod(factory)
def get_test1member(self): return self.test1member
def set_test1member(self, test1member): self.test1member = test1member
def get_test1attribute(self): return self.test1attribute
def set_test1attribute(self, test1attribute): self.test1attribute = test1attribute
def export(self, outfile, level, namespace_='', name_='test1element', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='test1element')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='test1element'):
if self.test1attribute is not None and 'test1attribute' not in already_processed:
already_processed.append('test1attribute')
outfile.write(' test1attribute=%s' % (quote_attrib(self.test1attribute), ))
def exportChildren(self, outfile, level, namespace_='', name_='test1element', fromsubclass_=False):
if self.test1member is not None:
self.test1member.export(outfile, level, namespace_, name_='test1member', )
def hasContent_(self):
if (
self.test1member is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='test1element'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.test1attribute is not None and 'test1attribute' not in already_processed:
already_processed.append('test1attribute')
showIndent(outfile, level)
outfile.write('test1attribute = %s,\n' % (self.test1attribute,))
def exportLiteralChildren(self, outfile, level, name_):
if self.test1member is not None:
showIndent(outfile, level)
outfile.write('test1member=model_.cimAnySimpleType(\n')
self.test1member.exportLiteral(outfile, level, name_='test1member')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('test1attribute', node)
if value is not None and 'test1attribute' not in already_processed:
already_processed.append('test1attribute')
self.test1attribute = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'test1member':
obj_ = cimAnySimpleType.factory()
obj_.build(child_)
self.set_test1member(obj_)
# end class test1element
class cimAnySimpleType(GeneratedsSuper):
member_data_items_ = [
MemberSpec_('valueOf_', 'xs:anySimpleType', 0),
]
subclass = None
superclass = None
def __init__(self, valueOf_=None):
self.valueOf_ = valueOf_
self.anyAttributes_ = {}
def factory(*args_, **kwargs_):
if cimAnySimpleType.subclass:
return cimAnySimpleType.subclass(*args_, **kwargs_)
else:
return cimAnySimpleType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def export(self, outfile, level, namespace_='', name_='cimAnySimpleType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='cimAnySimpleType')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='cimAnySimpleType'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.append(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
pass
def exportChildren(self, outfile, level, namespace_='', name_='cimAnySimpleType', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='cimAnySimpleType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class cimAnySimpleType
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'test1element'
rootClass = test1element
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
## sys.stdout.write('<?xml version="1.0" ?>\n')
## rootObj.export(sys.stdout, 0, name_=rootTag,
## namespacedef_='')
return rootObj
def parseString(inString):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'test1element'
rootClass = test1element
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
## sys.stdout.write('<?xml version="1.0" ?>\n')
## rootObj.export(sys.stdout, 0, name_="test1element",
## namespacedef_='')
return rootObj
def parseLiteral(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'test1element'
rootClass = test1element
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
## sys.stdout.write('#from anysimpletype2_sup import *\n\n')
## sys.stdout.write('import anysimpletype2_sup as model_\n\n')
## sys.stdout.write('rootObj = model_.rootTag(\n')
## rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
## sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"cimAnySimpleType",
"test1element"
]
|
|
#!/usr/bin/env python
# vim: set sw=2 ts=2 softtabstop=2 expandtab tw=80 colorcolumn=80:
"""
This script takes sets of results and tries
to infer correctness labels based on the results. The
inferred results are written to a file describing the
inferred mapping.
"""
import argparse
import logging
import os
import pprint
import sys
import yaml
from br_util import FinalResultType, classifyResult, validateMappingFile
try:
# Try to use libyaml which is faster
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
# fall back on python implementation
from yaml import Loader, Dumper
class LoadYAMLException(Exception):
pass
def loadYAMLResultFile(filePath):
if not os.path.exists(filePath):
msg = '{} does not exist'.format(filePath)
logging.error(msg)
raise LoadYAMLException(msg)
results = None
with open(filePath, 'r') as f:
results = yaml.load(f, Loader=Loader)
return results
def LoadResultSets(filePaths):
resultSets = { } # Confusingly these are actually lists, not sets
for resultFile in filePaths:
try:
if resultFile in resultSets:
logging.error('Cannot specify result file "{}" twice'.format(
resultFile))
sys.exit(1)
logging.info('Loading {}'.format(resultFile))
resultSets[resultFile] = loadYAMLResultFile(resultFile)
assert isinstance(resultSets[resultFile], list)
except LoadYAMLException as e:
sys.exit(1)
return resultSets
def findResultFromProgramNameInResultSet(resultList, programName):
assert isinstance(resultList, list)
assert isinstance(programName, str)
for result in resultList:
assert 'program' in result
if result['program'] == programName:
return result
return None
def main(args):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--trust-only-fe', default=[], action='append',
dest='ofe', help='Specify files (can specify repeatedly) where only the'
' FULLY_EXPLORED results are trusted')
parser.add_argument('--trust-only-bf', action='append', default=[],
dest='obf', help='Specify files (can specify repeatedly) where only the'
'BUG_FOUND results are trusted')
parser.add_argument('--trust-fe-and-bf', default=[], action='append',
dest='fe_and_bf', help='Specify files (can specify repeatedly) where both'
' FULLY_EXPLORED and BUG_FOUND results are trusted')
parser.add_argument('--write-disagreement-info', dest='disagreement_file',
default=None, help='Write information on disagreement to YAML file')
parser.add_argument("-l","--log-level",type=str, default="info",
dest="log_level", choices=['debug','info','warning','error'])
parser.add_argument('--ignore-overwrite', dest='ignore_overwrite',
default=False, action='store_true')
parser.add_argument('mapping_file', default=None,
help='output file for mapping')
pargs = parser.parse_args(args)
logLevel = getattr(logging, pargs.log_level.upper(),None)
logging.basicConfig(level=logLevel)
if (not pargs.ignore_overwrite) and os.path.exists(pargs.mapping_file):
logging.error('Refusing to overwrite {}'.format(pargs.mapping_file))
return 1
if (not pargs.ignore_overwrite) and pargs.disagreement_file != None:
if os.path.exists(pargs.disagreement_file):
logging.error('Refusing to overwrite {}'.format(pargs.disagreement_file))
return 1
# Load ofe, obf and fe_and_bf results
ofe = LoadResultSets(pargs.ofe)
obf = LoadResultSets(pargs.obf)
feAndBf = LoadResultSets(pargs.fe_and_bf)
# Check that the same file is not in any of the groups
toCheck = [ set(s.keys()) for s in [ ofe, obf, feAndBf] ]
for i in range(0, len(toCheck)):
for j in range(i+1, len(toCheck)):
common = toCheck[i].intersection( toCheck[j] )
if len(common) > 0:
logging.error('Found the same file(s) in multiple groups: {}'.format(
common))
return 1
# Build list of program names
programNames = set()
for category in ['ofe', 'obf', 'feAndBf']:
resultDict = locals()[category]
for resultList in resultDict.values():
assert isinstance(resultList, list)
for r in resultList:
programNames.add( r['program'])
# Build program to { 'obe: {}, 'obf': {}, 'feAndBf': {} } map
# The dicts {<filename>: <result>}
programToDataMap = { }
for programName in programNames:
assert not programName in programToDataMap
mapForProgram = programToDataMap[programName] = { }
for category in ['ofe', 'obf', 'feAndBf']:
mapForProgram[category] = { }
resultDict = locals()[category]
for fileName, resultList in resultDict.items():
assert isinstance(fileName, str)
assert isinstance(resultList, list)
resultForProgram = findResultFromProgramNameInResultSet(resultList,
programName)
if resultForProgram != None:
mapForProgram[category][fileName] = resultForProgram
else:
logging.warning('Could not find {} in {}'.format(
programName, fileName))
# Infer correctness labelling
fileToCorrectnessLabel = { }
programsWithDisagreement = [ ]
countInferredCorrect = 0
countInferredIncorrect = 0
for programName in programToDataMap.keys():
logging.debug('Inferring correctness of {}'.format(programName))
correctnessLabel = None # None means unknown
trustOnlyFullyExplored = programToDataMap[programName]['ofe']
trustOnlyBugFound = programToDataMap[programName]['obf']
trustFullyExploredAndBugFound = programToDataMap[programName]['feAndBf']
# See if one or more result sets believe the program to be correct (i.e.
# fully explored) Only do this for result sets we trust
if has(trustOnlyFullyExplored, FinalResultType.FULLY_EXPLORED) or \
has(trustFullyExploredAndBugFound, FinalResultType.FULLY_EXPLORED):
# Look for disagreement (i.e. bug found) from any results that we trust
if has(trustOnlyBugFound, FinalResultType.BUG_FOUND) or \
has(trustFullyExploredAndBugFound, FinalResultType.BUG_FOUND):
logging.warning('There is disagreement on the correctness of "{}".'
' Assuming unknown'.format(programName))
programsWithDisagreement.append(generatedDebuggingInfoFor(
programName, programToDataMap))
else:
logging.info('"{}" inferred to be correct'.format(programName))
correctnessLabel = True
countInferredCorrect += 1
elif has(trustOnlyBugFound, FinalResultType.BUG_FOUND) or \
has(trustFullyExploredAndBugFound, FinalResultType.BUG_FOUND):
# A bug was found from a result set we trust (and no result we trust fully
# explored the program) so we can infer that this program has a bug
logging.info('"{}" inferred to be incorrect'.format(programName))
correctnessLabel = False
countInferredIncorrect += 1
else:
logging.info(
'Could not infer correctness of "{}". Assuming unknown'.format(
programName))
fileToCorrectnessLabel[programName] = {'expected_correct': correctnessLabel}
#Output stats
logging.info('# of programs: {}'.format(len(programNames)))
logging.info('# of program inferred to be correct:{} ({:.2f}%)'.format(
countInferredCorrect, 100*float(countInferredCorrect)/len(programNames)))
logging.info('# of program inferred to be incorrect:{} ({:.2f}%)'.format(
countInferredIncorrect,
100*float(countInferredIncorrect)/len(programNames)))
countNotInferred = (len(programNames) - countInferredCorrect) - countInferredIncorrect
logging.info('# of programs not inferred:{} ({:.2f}%)'.format(
countNotInferred, 100*float(countNotInferred)/len(programNames)))
logging.info('There were {} programs where results'
' disagree'.format(len(programsWithDisagreement)))
if len(programsWithDisagreement) > 0:
logging.warning('There were {} programs where results'
' disagree'.format(len(programsWithDisagreement)))
if pargs.disagreement_file != None:
logging.info('Writing information on disagreements to {}'.format(
pargs.disagreement_file))
with open(pargs.disagreement_file, 'w') as f:
yamlText = yaml.dump(programsWithDisagreement,
default_flow_style=False,
Dumper=Dumper)
f.write(yamlText)
# Output mapping file
validateMappingFile(fileToCorrectnessLabel)
with open(pargs.mapping_file, 'w') as f:
f.write('# Inferred correctness mapping\n')
yamlText = yaml.dump(fileToCorrectnessLabel,
default_flow_style=False,
Dumper=Dumper)
f.write(yamlText)
return 0
def has(resultDict, desiredResultType):
assert isinstance(resultDict, dict)
assert isinstance(desiredResultType, FinalResultType)
for r in resultDict.values():
resultType = classifyResult(r)
if resultType == desiredResultType:
return True
return False
def generatedDebuggingInfoFor(programName, programToDataMap):
trustOnlyFullyExplored = programToDataMap[programName]['ofe']
trustOnlyBugFound = programToDataMap[programName]['obf']
trustFullyExploredAndBugFound = programToDataMap[programName]['feAndBf']
info = { 'program': programName, 'FinalResultTypes': {}, 'raw': {} }
for trustType in ['ofe', 'obf', 'feAndBf']:
info['FinalResultTypes'][trustType] = mapForFinalResultType = { }
info['raw'][trustType] = mapForRawResults = { }
for fileName, r in programToDataMap[programName][trustType].items():
mapForFinalResultType[fileName] = classifyResult(r).name
mapForRawResults[fileName] = r
return info
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
|
"""Tests for the dakotathon.experiment module."""
import os
from nose.tools import (
raises,
assert_true,
assert_equal,
assert_is_instance,
assert_is_none,
assert_is_not_none,
)
from dakotathon.experiment import Experiment
def setup_module():
"""Fixture called before any tests are performed."""
print("\n*** " + __name__)
global x
x = Experiment()
def teardown_module():
"""Fixture called after all tests have completed."""
pass
def test_instantiate():
"""Test whether Experiment instantiates."""
e = Experiment()
def test_get_component():
"""Test getting the component attribute."""
assert_is_none(x.component)
def test_set_component():
"""Test setting the component attribute."""
e = Experiment()
component = "hydrotrend"
e.component = component
assert_equal(e.component, component)
def test_component_sets_interface_type():
"""Test that setting component sets fork interface."""
from .test_interface_fork import Fork
component = "hydrotrend"
e = Experiment(component=component)
assert_is_instance(e.interface, Fork)
def test_component_sets_analysis_driver():
"""Test that setting component sets the analysis driver."""
component = "hydrotrend"
e = Experiment(component=component)
assert_equal(e.interface.analysis_driver, "dakota_run_component")
def test_get_plugin():
"""Test getting the plugin attribute."""
assert_is_none(x.plugin)
def test_set_plugin():
"""Test setting the plugin attribute."""
e = Experiment()
plugin = "hydrotrend"
e.plugin = plugin
assert_equal(e.plugin, plugin)
def test_plugin_sets_interface_type():
"""Test that setting plugin sets fork interface."""
from .test_interface_fork import Fork
plugin = "hydrotrend"
e = Experiment(plugin=plugin)
assert_is_instance(e.interface, Fork)
def test_plugin_sets_analysis_driver():
"""Test that setting plugin sets the analysis driver."""
plugin = "hydrotrend"
e = Experiment(plugin=plugin)
assert_equal(e.interface.analysis_driver, "dakota_run_plugin")
@raises(AttributeError)
def test_setting_component_and_plugin():
"""Test that setting component and plugin raises exception."""
component = plugin = "hydrotrend"
e = Experiment(component=component, plugin=plugin)
def test_multidim_parameter_study_uses_bounds():
"""Test that the multidim parameter study uses bounds."""
e = Experiment(method="multidim_parameter_study")
assert_is_not_none(e.variables.lower_bounds)
assert_is_not_none(e.variables.upper_bounds)
def test_get_environment():
"""Test getting the environment property."""
from .test_environment_base import EnvironmentBase
assert_is_instance(x.environment, EnvironmentBase)
def test_set_environment():
"""Test setting the environment property."""
from .test_environment_base import Concrete
e = Experiment()
inst = Concrete()
e.environment = inst
assert_equal(e.environment, inst)
@raises(TypeError)
def test_set_environment_fails_if_not_instance():
"""Test that environment fails with a non-instance input."""
e = Experiment()
answer = 42
e.environment = answer
def test_get_method():
"""Test getting the method property."""
from .test_method_base import MethodBase
assert_is_instance(x.method, MethodBase)
def test_set_method():
"""Test setting the method property."""
from .test_method_base import Concrete
e = Experiment()
inst = Concrete()
e.method = inst
assert_equal(e.method, inst)
@raises(TypeError)
def test_set_method_fails_if_not_instance():
"""Test that method fails with a non-instance input."""
e = Experiment()
answer = 42
e.method = answer
def test_get_variables():
"""Test getting the variables property."""
from .test_variables_base import VariablesBase
assert_is_instance(x.variables, VariablesBase)
def test_set_variables():
"""Test setting the variables property."""
from .test_variables_base import Concrete
e = Experiment()
inst = Concrete()
e.variables = inst
assert_equal(e.variables, inst)
@raises(TypeError)
def test_set_variables_fails_if_not_instance():
"""Test that variables fails with a non-instance input."""
e = Experiment()
answer = 42
e.variables = answer
def test_get_interface():
"""Test getting the interface property."""
from .test_interface_base import InterfaceBase
assert_is_instance(x.interface, InterfaceBase)
def test_set_interface():
"""Test setting the interface property."""
from .test_interface_base import Concrete
e = Experiment()
inst = Concrete()
e.interface = inst
assert_equal(e.interface, inst)
@raises(TypeError)
def test_set_interface_fails_if_not_instance():
"""Test that interface fails with a non-instance input."""
e = Experiment()
answer = 42
e.interface = answer
def test_get_responses():
"""Test getting the responses property."""
from .test_responses_base import ResponsesBase
assert_is_instance(x.responses, ResponsesBase)
def test_set_responses():
"""Test setting the responses property."""
from .test_responses_base import Concrete
e = Experiment()
inst = Concrete()
e.responses = inst
assert_equal(e.responses, inst)
@raises(TypeError)
def test_set_responses_fails_if_not_instance():
"""Test that responses fails with a non-instance input."""
e = Experiment()
answer = 42
e.responses = answer
def test_str_special():
"""Test type of __str__ method results."""
s = str(x)
assert_true(type(s) is str)
def test_str_length():
"""Test the default length of __str__."""
x = Experiment()
s = str(x)
n_lines = len(s.splitlines())
assert_equal(n_lines, 25)
|
|
from __future__ import unicode_literals
import json
from django.conf import settings
from django.db import transaction
from django.shortcuts import get_object_or_404
from rest_framework.response import Response
from rest_framework import status
from onadata.apps.api.viewsets.xform_submission_api import XFormSubmissionApi
from onadata.apps.eventlog.models import FieldSightLog
from onadata.apps.fieldsight.models import Site
from onadata.apps.fsforms.models import FieldSightXF, Stage, Schedule, SubmissionOfflineSite
from onadata.apps.fsforms.serializers.FieldSightSubmissionSerializer import FieldSightSubmissionSerializer
from ..fieldsight_logger_tools import safe_create_instance
from channels import Group as ChannelGroup
# 10,000,000 bytes
DEFAULT_CONTENT_LENGTH = getattr(settings, 'DEFAULT_CONTENT_LENGTH', 10000000)
def create_instance_from_xml(request, fsid, site, fs_proj_xf, proj_id, xform):
xml_file_list = request.FILES.pop('xml_submission_file', [])
xml_file = xml_file_list[0] if len(xml_file_list) else None
media_files = request.FILES.values()
return safe_create_instance(fsid, xml_file, media_files, None, request, site, fs_proj_xf, proj_id, xform)
class FSXFormSubmissionApi(XFormSubmissionApi):
serializer_class = FieldSightSubmissionSerializer
template_name = 'fsforms/submission.xml'
def create(self, request, *args, **kwargs):
if self.request.user.is_anonymous():
self.permission_denied(self.request)
fsxfid = kwargs.get('pk', None)
siteid = kwargs.get('site_id', None)
offline_submission_site = None
if fsxfid is None or siteid is None:
return self.error_response("Site Id Or Form ID Not Given", False, request)
try:
fsxfid = int(fsxfid)
fxf = get_object_or_404(FieldSightXF, pk=kwargs.get('pk'))
if fxf.project:
# project level form hack
print("redirection project form to project url")
if siteid == '0':
siteid = None
elif Site.objects.filter(pk=siteid).exists() == False:
if len(siteid) > 12:
if FieldSightXF.objects.filter(pk=fsxfid).exists():
project_form = FieldSightXF.objects.get(pk=fsxfid)
project = project_form.project
if project:
site, created = Site.objects.get_or_create(
identifier="temporary_site",
is_active=True,
name="Temporary Site",
project_id=project.id,
is_survey=False,
)
siteid = site.id
offline_submission_site, created = SubmissionOfflineSite.objects.get_or_create(
offline_site_id=kwargs.get('site_id', None), temporary_site=site, instance=None, fieldsight_form=project_form)
else:
return self.error_response("Invalid Project in Project Form id", False, request)
else:
return self.error_response("Invalid Form id", False, request)
else:
return self.error_response("siteid Invalid", False, request)
if fsxfid is None:
return self.error_response("Fieldsight Form ID Not Given", False, request)
try:
fs_proj_xf = get_object_or_404(FieldSightXF, pk=kwargs.get('pk'))
xform = fs_proj_xf.xf
proj_id = fs_proj_xf.project.id
if siteid:
site = Site.objects.get(pk=siteid)
except Exception as e:
return self.error_response("Site Id Or Project Form ID Not Vaild", False, request)
if request.method.upper() == 'HEAD':
return Response(status=status.HTTP_204_NO_CONTENT,
headers=self.get_openrosa_headers(request),
template_name=self.template_name)
error, instance = create_instance_from_xml(request, None, siteid, fs_proj_xf.id, proj_id, xform)
if error or not instance:
return self.error_response(error, False, request)
if offline_submission_site is not None:
try:
instance.fieldsight_instance.offline_submission
except Exception as e:
offline_submission_site.instance = instance.fieldsight_instance
offline_submission_site.save()
print("new submission")
if not FieldSightLog.objects.filter(object_id=instance.id, type=16).exists():
if fs_proj_xf.is_survey:
instance.fieldsight_instance.logs.create(source=self.request.user, type=16,
title="new Project level Submission",
organization=fs_proj_xf.project.organization,
project=fs_proj_xf.project,
extra_object=fs_proj_xf.project,
extra_message="project",
content_object=instance.fieldsight_instance)
else:
site = Site.objects.get(pk=siteid)
instance.fieldsight_instance.logs.create(source=self.request.user, type=16,
title="new Site level Submission",
organization=fs_proj_xf.project.organization,
project=fs_proj_xf.project, site=site,
extra_object=site,
content_object=instance.fieldsight_instance)
context = self.get_serializer_context()
serializer = FieldSightSubmissionSerializer(instance, context=context)
return Response(serializer.data,
headers=self.get_openrosa_headers(request),
status=status.HTTP_201_CREATED,
template_name=self.template_name)
# handle of site level form
fs_proj_xf = fxf.fsform.pk if fxf.fsform else None
proj_id = fxf.fsform.project.pk if fxf.fsform else fxf.site.project.pk
xform = fxf.xf
# site_id = fxf.site.pk if fxf.site else None
except:
return self.error_response("Site Id Or Form ID Not Vaild", False, request)
if request.method.upper() == 'HEAD':
return Response(status=status.HTTP_204_NO_CONTENT,
headers=self.get_openrosa_headers(request),
template_name=self.template_name)
error, instance = create_instance_from_xml(request, fsxfid, siteid, fs_proj_xf, proj_id, xform)
extra_message = ""
if error or not instance:
return self.error_response(error, False, request)
if fxf.is_staged:
instance.fieldsight_instance.site.update_current_progress()
else:
instance.fieldsight_instance.site.update_status()
if fxf.is_survey:
extra_message="project"
if not FieldSightLog.objects.filter(object_id=instance.id, type=16).exists():
instance.fieldsight_instance.logs.create(source=self.request.user, type=16, title="new Submission",
organization=instance.fieldsight_instance.site.project.organization,
project=instance.fieldsight_instance.site.project,
site=instance.fieldsight_instance.site,
extra_message=extra_message,
extra_object=instance.fieldsight_instance.site,
content_object=instance.fieldsight_instance)
context = self.get_serializer_context()
serializer = FieldSightSubmissionSerializer(instance, context=context)
return Response(serializer.data,
headers=self.get_openrosa_headers(request),
status=status.HTTP_201_CREATED,
template_name=self.template_name)
class ProjectFSXFormSubmissionApi(XFormSubmissionApi):
serializer_class = FieldSightSubmissionSerializer
template_name = 'fsforms/submission.xml'
def create(self, request, *args, **kwargs):
offline_submission_site = None
if self.request.user.is_anonymous():
self.permission_denied(self.request)
fsxfid = kwargs.get('pk', None)
siteid = kwargs.get('site_id', None)
if siteid == '0':
siteid = None
elif Site.objects.filter(pk=siteid).exists() == False:
if len(siteid) > 12:
if FieldSightXF.objects.filter(pk=fsxfid).exists():
project_form = FieldSightXF.objects.get(pk=fsxfid)
project = project_form.project
if project:
site, created = Site.objects.get_or_create(
identifier="temporary_site",
is_active=True,
name="Temporary Site",
project_id=project.id,
is_survey=False,
)
siteid = site.id
offline_submission_site, created = SubmissionOfflineSite.objects.get_or_create(offline_site_id=kwargs.get('site_id', None), temporary_site=site, instance=None, fieldsight_form=project_form)
else:
return self.error_response("Invalid Project in Project Form id", False, request)
else:
return self.error_response("Invalid Form id", False, request)
else:
return self.error_response("siteid Invalid", False, request)
if fsxfid is None:
return self.error_response("Fieldsight Form ID Not Given", False, request)
try:
fs_proj_xf = get_object_or_404(FieldSightXF, pk=kwargs.get('pk'))
xform = fs_proj_xf.xf
proj_id = fs_proj_xf.project.id
if siteid:
site = Site.objects.get(pk=siteid)
except Exception as e:
return self.error_response("Site Id Or Project Form ID Not Vaild", False, request)
if request.method.upper() == 'HEAD':
return Response(status=status.HTTP_204_NO_CONTENT,
headers=self.get_openrosa_headers(request),
template_name=self.template_name)
error, instance = create_instance_from_xml(request, None, siteid, fs_proj_xf.id, proj_id, xform)
if error or not instance:
return self.error_response(error, False, request)
if fs_proj_xf.is_staged and siteid:
site.update_current_progress()
elif siteid:
site.update_status()
if offline_submission_site is not None:
try:
instance.fieldsight_instance.offline_submission
except Exception as e:
offline_submission_site.instance = instance.fieldsight_instance
offline_submission_site.save()
print("new submission")
if not FieldSightLog.objects.filter(object_id=instance.id, type=16).exists():
if fs_proj_xf.is_survey:
instance.fieldsight_instance.logs.create(source=self.request.user, type=16, title="new Project level Submission",
organization=fs_proj_xf.project.organization,
project=fs_proj_xf.project,
extra_object=fs_proj_xf.project,
extra_message="project",
content_object=instance.fieldsight_instance)
else:
site = Site.objects.get(pk=siteid)
instance.fieldsight_instance.logs.create(source=self.request.user, type=16, title="new Site level Submission",
organization=fs_proj_xf.project.organization,
project=fs_proj_xf.project, site=site,
extra_object=site,
content_object=instance.fieldsight_instance)
context = self.get_serializer_context()
serializer = FieldSightSubmissionSerializer(instance, context=context)
return Response(serializer.data,
headers=self.get_openrosa_headers(request),
status=status.HTTP_201_CREATED,
template_name=self.template_name)
|
|
#!/usr/bin/env python
"""
@package mi.core.instrument.data_particle_generator Base data particle generator
@file mi/core/instrument/data_particle_generator.py
@author Steve Foley
@brief Contains logic to generate data particles to be exchanged between
the driver and agent. This involves a JSON interchange format
"""
import time
import ntplib
import base64
import json
from mi.core.common import BaseEnum
from mi.core.exceptions import SampleException, ReadOnlyException, NotImplementedException, InstrumentParameterException
from mi.core.log import get_logger
log = get_logger()
__author__ = 'Steve Foley'
__license__ = 'Apache 2.0'
class CommonDataParticleType(BaseEnum):
"""
This enum defines all the common particle types defined in the modules. Currently there is only one, but by
using an enum here we have the opportunity to define more common data particles.
"""
RAW = "raw"
class DataParticleKey(BaseEnum):
PKT_FORMAT_ID = "pkt_format_id"
PKT_VERSION = "pkt_version"
STREAM_NAME = "stream_name"
INTERNAL_TIMESTAMP = "internal_timestamp"
PORT_TIMESTAMP = "port_timestamp"
DRIVER_TIMESTAMP = "driver_timestamp"
PREFERRED_TIMESTAMP = "preferred_timestamp"
QUALITY_FLAG = "quality_flag"
VALUES = "values"
VALUE_ID = "value_id"
VALUE = "value"
BINARY = "binary"
NEW_SEQUENCE = "new_sequence"
class DataParticleValue(BaseEnum):
JSON_DATA = "JSON_Data"
ENG = "eng"
OK = "ok"
CHECKSUM_FAILED = "checksum_failed"
OUT_OF_RANGE = "out_of_range"
INVALID = "invalid"
QUESTIONABLE = "questionable"
class DataParticle(object):
"""
This class is responsible for storing and ultimately generating data
particles in the designated format from the associated inputs. It
fills in fields as necessary, and is a valid Data Particle
that can be sent up to the InstrumentAgent.
It is the intent that this class is subclassed as needed if an instrument must
modify fields in the outgoing packet. The hope is to have most of the superclass
code be called by the child class with just values overridden as needed.
"""
# data particle type is intended to be defined in each derived data particle class. This value should be unique
# for all data particles. Best practice is to access this variable using the accessor method:
# data_particle_type()
_data_particle_type = None
def __init__(self, raw_data,
port_timestamp=None,
internal_timestamp=None,
preferred_timestamp=DataParticleKey.PORT_TIMESTAMP,
quality_flag=DataParticleValue.OK,
new_sequence=None):
""" Build a particle seeded with appropriate information
@param raw_data The raw data used in the particle
"""
if new_sequence is not None and not isinstance(new_sequence, bool):
raise TypeError("new_sequence is not a bool")
self.contents = {
DataParticleKey.PKT_FORMAT_ID: DataParticleValue.JSON_DATA,
DataParticleKey.PKT_VERSION: 1,
DataParticleKey.PORT_TIMESTAMP: port_timestamp,
DataParticleKey.INTERNAL_TIMESTAMP: internal_timestamp,
DataParticleKey.DRIVER_TIMESTAMP: ntplib.system_to_ntp_time(time.time()),
DataParticleKey.PREFERRED_TIMESTAMP: preferred_timestamp,
DataParticleKey.QUALITY_FLAG: quality_flag,
}
self._encoding_errors = []
if new_sequence is not None:
self.contents[DataParticleKey.NEW_SEQUENCE] = new_sequence
self.raw_data = raw_data
self._values = None
def __eq__(self, arg):
"""
Quick equality check for testing purposes. If they have the same raw
data, timestamp, they are the same enough for this particle
"""
allowed_diff = .000001
if self._data_particle_type != arg._data_particle_type:
log.debug('Data particle type does not match: %s %s', self._data_particle_type, arg._data_particle_type)
return False
if self.raw_data != arg.raw_data:
log.debug('Raw data does not match')
return False
t1 = self.contents[DataParticleKey.INTERNAL_TIMESTAMP]
t2 = arg.contents[DataParticleKey.INTERNAL_TIMESTAMP]
tdiff = abs(t1 - t2)
if tdiff > allowed_diff:
log.debug('Timestamp %s does not match %s', t1, t2)
return False
generated1 = json.loads(self.generate())
generated2 = json.loads(arg.generate())
missing, differing = self._compare(generated1, generated2, ignore_keys=[DataParticleKey.DRIVER_TIMESTAMP,
DataParticleKey.PREFERRED_TIMESTAMP])
if missing:
log.error('Key mismatch between particle dictionaries: %r', missing)
return False
if differing:
log.error('Value mismatch between particle dictionaries: %r', differing)
return True
@staticmethod
def _compare(d1, d2, ignore_keys=None):
ignore_keys = ignore_keys if ignore_keys else []
missing = set(d1).symmetric_difference(d2)
differing = {}
for k in d1:
if k in ignore_keys or k in missing:
continue
if d1[k] != d2[k]:
differing[k] = (d1[k], d2[k])
return missing, differing
def set_internal_timestamp(self, timestamp=None, unix_time=None):
"""
Set the internal timestamp
@param timestamp: NTP timestamp to set
@param unit_time: Unix time as returned from time.time()
@raise InstrumentParameterException if timestamp or unix_time not supplied
"""
if timestamp is None and unix_time is None:
raise InstrumentParameterException("timestamp or unix_time required")
if unix_time is not None:
timestamp = ntplib.system_to_ntp_time(unix_time)
# Do we want this to happen here or in down stream processes?
#if(not self._check_timestamp(timestamp)):
# raise InstrumentParameterException("invalid timestamp")
self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = float(timestamp)
def set_value(self, id, value):
"""
Set a content value, restricted as necessary
@param id The ID of the value to set, should be from DataParticleKey
@param value The value to set
@raises ReadOnlyException If the parameter cannot be set
"""
if (id == DataParticleKey.INTERNAL_TIMESTAMP) and (self._check_timestamp(value)):
self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = value
else:
raise ReadOnlyException("Parameter %s not able to be set to %s after object creation!" %
(id, value))
def get_value(self, id):
""" Return a stored value
@param id The ID (from DataParticleKey) for the parameter to return
@raises NotImplementedException If there is an invalid id
"""
if DataParticleKey.has(id):
return self.contents[id]
else:
raise NotImplementedException("Value %s not available in particle!", id)
def data_particle_type(self):
"""
Return the data particle type (aka stream name)
@raise: NotImplementedException if _data_particle_type is not set
"""
if self._data_particle_type is None:
raise NotImplementedException("_data_particle_type not initialized")
return self._data_particle_type
def generate_dict(self):
"""
Generate a simple dictionary of sensor data and timestamps, without
going to JSON. This is useful for the times when JSON is not needed to
go across an interface. There are times when particles are used
internally to a component/process/module/etc.
@retval A python dictionary with the proper timestamps and data values
@throws InstrumentDriverException if there is a problem wtih the inputs
"""
# verify preferred timestamp exists in the structure...
if not self._check_preferred_timestamps():
raise SampleException("Preferred timestamp not in particle!")
# build response structure
self._encoding_errors = []
if self._values is None:
self._values = self._build_parsed_values()
result = self._build_base_structure()
result[DataParticleKey.STREAM_NAME] = self.data_particle_type()
result[DataParticleKey.VALUES] = self._values
return result
def generate(self, sorted=False):
"""
Generates a JSON_parsed packet from a sample dictionary of sensor data and
associates a timestamp with it
@param sorted Returned sorted json dict, useful for testing, but slow,
so dont do it unless it is important
@return A JSON_raw string, properly structured with port agent time stamp
and driver timestamp
@throws InstrumentDriverException If there is a problem with the inputs
"""
json_result = json.dumps(self.generate_dict(), sort_keys=sorted)
return json_result
def _build_parsed_values(self):
"""
Build values of a parsed structure. Just the values are built so
so that a child class can override this class, but call it with
super() to get the base structure before modification
@return the values tag for this data structure ready to JSONify
@raises SampleException when parsed values can not be properly returned
"""
raise SampleException("Parsed values block not overridden")
def _build_base_structure(self):
"""
Build the base/header information for an output structure.
Follow on methods can then modify it by adding or editing values.
@return A fresh copy of a core structure to be exported
"""
result = dict(self.contents)
# clean out optional fields that were missing
if not self.contents[DataParticleKey.PORT_TIMESTAMP]:
del result[DataParticleKey.PORT_TIMESTAMP]
if not self.contents[DataParticleKey.INTERNAL_TIMESTAMP]:
del result[DataParticleKey.INTERNAL_TIMESTAMP]
return result
def _check_timestamp(self, timestamp):
"""
Check to make sure the timestamp is reasonable
@param timestamp An NTP4 formatted timestamp (64bit)
@return True if timestamp is okay or None, False otherwise
"""
if timestamp is None:
return True
if not isinstance(timestamp, float):
return False
# is it sufficiently in the future to be unreasonable?
if timestamp > ntplib.system_to_ntp_time(time.time()+(86400*365)):
return False
else:
return True
def _check_preferred_timestamps(self):
"""
Check to make sure the preferred timestamp indicated in the
particle is actually listed, possibly adjusting to 2nd best
if not there.
@throws SampleException When there is a problem with the preferred
timestamp in the sample.
"""
if self.contents[DataParticleKey.PREFERRED_TIMESTAMP] is None:
raise SampleException("Missing preferred timestamp, %s, in particle" %
self.contents[DataParticleKey.PREFERRED_TIMESTAMP])
# This should be handled downstream. Don't want to not publish data because
# the port agent stopped putting out timestamps
#if self.contents[self.contents[DataParticleKey.PREFERRED_TIMESTAMP]] == None:
# raise SampleException("Preferred timestamp, %s, is not defined" %
# self.contents[DataParticleKey.PREFERRED_TIMESTAMP])
return True
def _encode_value(self, name, value, encoding_function):
"""
Encode a value using the encoding function, if it fails store the error in a queue
"""
encoded_val = None
try:
encoded_val = encoding_function(value)
except Exception as e:
log.error("Data particle error encoding. Name:%s Value:%s", name, value)
self._encoding_errors.append({name: value})
return {DataParticleKey.VALUE_ID: name,
DataParticleKey.VALUE: encoded_val}
def get_encoding_errors(self):
"""
Return the encoding errors list
"""
return self._encoding_errors
class RawDataParticleKey(BaseEnum):
PAYLOAD = "raw"
LENGTH = "length"
TYPE = "type"
CHECKSUM = "checksum"
class RawDataParticle(DataParticle):
"""
This class a common data particle for generating data particles of raw
data.
It essentially is a translation of the port agent packet
"""
_data_particle_type = CommonDataParticleType.RAW
def _build_parsed_values(self):
"""
Build a particle out of a port agent packet.
@returns A list that is ready to be added to the "values" tag before
the structure is JSONified
"""
port_agent_packet = self.raw_data
if not isinstance(port_agent_packet, dict):
raise SampleException("raw data not a dictionary")
for param in ["raw", "length", "type", "checksum"]:
if param not in port_agent_packet:
raise SampleException("raw data not a complete port agent packet. missing %s" % param)
payload = None
length = None
type = None
checksum = None
# Attempt to convert values
try:
payload = base64.b64encode(port_agent_packet.get("raw"))
except TypeError:
pass
try:
length = int(port_agent_packet.get("length"))
except TypeError:
pass
try:
type = int(port_agent_packet.get("type"))
except TypeError:
pass
try:
checksum = int(port_agent_packet.get("checksum"))
except TypeError:
pass
result = [{
DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,
DataParticleKey.VALUE: payload,
DataParticleKey.BINARY: True},
{
DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,
DataParticleKey.VALUE: length},
{
DataParticleKey.VALUE_ID: RawDataParticleKey.TYPE,
DataParticleKey.VALUE: type},
{
DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,
DataParticleKey.VALUE: checksum},
]
return result
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class NetworkWatchersOperations(object):
"""NetworkWatchersOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API version. Constant value: "2016-12-01".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-12-01"
self.config = config
def create_or_update(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a network watcher in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters that define the network watcher
resource.
:type parameters: :class:`NetworkWatcher
<azure.mgmt.network.v2016_12_01.models.NetworkWatcher>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`NetworkWatcher
<azure.mgmt.network.v2016_12_01.models.NetworkWatcher>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'NetworkWatcher')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NetworkWatcher', response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkWatcher', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get(
self, resource_group_name, network_watcher_name, custom_headers=None, raw=False, **operation_config):
"""Gets the specified network watcher by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`NetworkWatcher
<azure.mgmt.network.v2016_12_01.models.NetworkWatcher>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NetworkWatcher', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, network_watcher_name, custom_headers=None, raw=False, **operation_config):
"""Deletes the specified network watcher resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all network watchers by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`NetworkWatcherPaged
<azure.mgmt.network.v2016_12_01.models.NetworkWatcherPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.NetworkWatcherPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.NetworkWatcherPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_all(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all network watchers by subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`NetworkWatcherPaged
<azure.mgmt.network.v2016_12_01.models.NetworkWatcherPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkWatchers'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.NetworkWatcherPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.NetworkWatcherPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def get_topology(
self, resource_group_name, network_watcher_name, target_resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets the current network topology by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param target_resource_group_name: The name of the target resource
group to perform topology on.
:type target_resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Topology
<azure.mgmt.network.v2016_12_01.models.Topology>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = models.TopologyParameters(target_resource_group_name=target_resource_group_name)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/topology'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'TopologyParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Topology', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def verify_ip_flow(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Verify IP flow from the specified VM to a location given the currently
configured NSG rules.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters that define the IP flow to be verified.
:type parameters: :class:`VerificationIPFlowParameters
<azure.mgmt.network.v2016_12_01.models.VerificationIPFlowParameters>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`VerificationIPFlowResult
<azure.mgmt.network.v2016_12_01.models.VerificationIPFlowResult>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/ipFlowVerify'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VerificationIPFlowParameters')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VerificationIPFlowResult', response)
if response.status_code == 202:
deserialized = self._deserialize('VerificationIPFlowResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get_next_hop(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Gets the next hop from the specified VM.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters that define the source and destination
endpoint.
:type parameters: :class:`NextHopParameters
<azure.mgmt.network.v2016_12_01.models.NextHopParameters>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`NextHopResult
<azure.mgmt.network.v2016_12_01.models.NextHopResult>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/nextHop'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'NextHopParameters')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NextHopResult', response)
if response.status_code == 202:
deserialized = self._deserialize('NextHopResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get_vm_security_rules(
self, resource_group_name, network_watcher_name, target_resource_id, custom_headers=None, raw=False, **operation_config):
"""Gets the configured and effective security group rules on the specified
VM.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param target_resource_id: ID of the target VM.
:type target_resource_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`SecurityGroupViewResult
<azure.mgmt.network.v2016_12_01.models.SecurityGroupViewResult>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = models.SecurityGroupViewParameters(target_resource_id=target_resource_id)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/securityGroupView'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'SecurityGroupViewParameters')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SecurityGroupViewResult', response)
if response.status_code == 202:
deserialized = self._deserialize('SecurityGroupViewResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get_troubleshooting(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Initiate troubleshooting on a specified resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that define the resource to
troubleshoot.
:type parameters: :class:`TroubleshootingParameters
<azure.mgmt.network.v2016_12_01.models.TroubleshootingParameters>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`TroubleshootingResult
<azure.mgmt.network.v2016_12_01.models.TroubleshootingResult>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/troubleshoot'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'TroubleshootingParameters')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('TroubleshootingResult', response)
if response.status_code == 202:
deserialized = self._deserialize('TroubleshootingResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get_troubleshooting_result(
self, resource_group_name, network_watcher_name, target_resource_id, custom_headers=None, raw=False, **operation_config):
"""Get the last completed troubleshooting result on a specified resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param target_resource_id: The target resource ID to query the
troubleshooting result.
:type target_resource_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`TroubleshootingResult
<azure.mgmt.network.v2016_12_01.models.TroubleshootingResult>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = models.QueryTroubleshootingParameters(target_resource_id=target_resource_id)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/queryTroubleshootResult'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'QueryTroubleshootingParameters')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('TroubleshootingResult', response)
if response.status_code == 202:
deserialized = self._deserialize('TroubleshootingResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def set_flow_log_configuration(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Configures flow log on a specified resource.
:param resource_group_name: The name of the network watcher resource
group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that define the configuration of flow
log.
:type parameters: :class:`FlowLogInformation
<azure.mgmt.network.v2016_12_01.models.FlowLogInformation>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`FlowLogInformation
<azure.mgmt.network.v2016_12_01.models.FlowLogInformation>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/configureFlowLog'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'FlowLogInformation')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('FlowLogInformation', response)
if response.status_code == 202:
deserialized = self._deserialize('FlowLogInformation', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get_flow_log_status(
self, resource_group_name, network_watcher_name, target_resource_id, custom_headers=None, raw=False, **operation_config):
"""Queries status of flow log on a specified resource.
:param resource_group_name: The name of the network watcher resource
group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param target_resource_id: The target resource where getting the flow
logging status.
:type target_resource_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`FlowLogInformation
<azure.mgmt.network.v2016_12_01.models.FlowLogInformation>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = models.FlowLogStatusParameters(target_resource_id=target_resource_id)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/queryFlowLogStatus'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'FlowLogStatusParameters')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('FlowLogInformation', response)
if response.status_code == 202:
deserialized = self._deserialize('FlowLogInformation', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Code for backpropagation using the tape utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.eager import tape as tape_module
# Terminology:
#
# - op: a possibly composite operation, which has an entry in the tape
# - target: dy in dx/dy
# - source: dx in dx/dy
# - tensor: one of the many inputs or outputs of an operation
#
# Below here we do the gradient algorithm. It works as follows:
#
# First we filter the tape to just the subset of operations we want to
# differentiate. In the process of doing so we count how many times each Tensor
# is used as an input to an op (so we know when we're done computing gradients
# for that Tensor). We also count, for each tape entry, how many of its output
# Tensors need gradients to be computed (Tensors which are not used do not need
# any gradients to be computed).
#
# Finally, we start a backprop stack with a set of tape entries for which we
# have all gradients available. This set usually is a subset of the set of
# targets (not all since targets which have outputs in the tape will not have
# gradients available initially).
#
# Then we repeatedly pop an entry from the stack, run its backprop, and update
# the gradients of its inputs. Once we have computed all gradients for a single
# input we can mark this input as done, and this can trigger adding an entry to
# the stack if all outputs of that entry are now done.
#
# When the stack is empty we have gradients for all tensors we're interested in.
def _prepare_backprop(vspace, target, tensor_to_op, op_to_entry, id_sources):
"""Filters the tape to only include relevant entries and counts tensor usages.
Args:
vspace: information about the space we're differentiating in.
target: the target to optimize.
tensor_to_op: Map from tensor id to key in op_to_entry that produced it.
op_to_entry: Map from op id to a tape.TapeEntry object
id_sources: the ids of the sources wrt the gradient is being taken.
Returns:
usage counts (how many entries downstream from a tensor use it)
op_to_entry_map: entry map (a filtered tape, with only the relevant
entries),
missing: map from tensor id to how many downstream gradients still need
to be computed before this tensor's gradient can be computed.
"""
tensor_stack = [vspace.tensor_id(x) for x in target]
tensor_usage_counts = {}
o_to_e = {} # Copy of just the bits we need from op_to_entry
while tensor_stack:
t = tensor_stack.pop()
op = tensor_to_op.get(t, None)
# op is None or -1 if the tensor is a source (i.e. was watched directly)
if op is None or op == -1 or op in o_to_e:
continue
op_trace = tape_module.TapeEntry(*op_to_entry[op])
o_to_e[op] = op_trace
for it in op_trace.input_ids:
if it in tensor_usage_counts:
tensor_usage_counts[it] += 1
else:
tensor_usage_counts[it] = 1
if it not in id_sources and it in tensor_to_op:
tensor_stack.append(it)
op_missing_tensor_counts = collections.defaultdict(int)
for t in tensor_usage_counts:
if t in tensor_to_op and tensor_to_op[t] is not None:
op_missing_tensor_counts[tensor_to_op[t]] += 1
return tensor_usage_counts, o_to_e, op_missing_tensor_counts
def _initialize_backprop_stack(op_to_entry, op_missing_tensor):
"""Returns the set of tape entries which are available for backprop."""
ready_ops = []
for op in op_to_entry:
if op not in op_missing_tensor:
ready_ops.append(op)
return ready_ops
def _initial_gradients(vspace, target, output_gradients, tensor_usage_counts):
"""Computes the initial gradients for each Tensor."""
# Initialize the backprop stack
gradients = collections.defaultdict(list)
for i, t in enumerate(target):
if vspace.tensor_id(t) in tensor_usage_counts:
# Can't provide a gradient of something we're trying to differentiate
assert output_gradients is None or output_gradients[i] is None
else:
if output_gradients is None or output_gradients[i] is None:
out_grad = vspace.ones_like(t)
else:
out_grad = output_gradients[i]
gradients[vspace.tensor_id(t)].append(out_grad)
return gradients
VSpace = collections.namedtuple(
"VSpace",
["aggregate_fn", "num_elements_fn", "tensor_id", "zeros", "ones_like"])
# If over MIN_AGGREGATE_COUNT gradients are accumulated and the total
# memory consumption is over MIN_AGGREGATE_BYTES, do an early aggregation
# so as to release the gradient tensor to save memory.
_MIN_AGGREGATE_COUNT = 4
_MIN_AGGREGATE_BYTES = 128 * 1024 * 1024
def imperative_grad(
vspace,
tape,
target,
sources,
output_gradients=None):
"""Computes gradients from the imperatively defined tape on top of the stack.
Works by filtering the tape, computing how many downstream usages are of each
tensor and entry, and repeatedly applying backward functions until we have
gradients for all sources.
Args:
vspace: the vector space in which to differentiate.
tape: the gradient tape which stores the trace.
target: either a Tensor or list of Tensors to be differentiated.
sources: list of Tensors for which we want gradients
output_gradients: if not None, a list of gradient provided for each Target,
or None if we are to use the target's computed downstream gradient.
Returns:
the gradient wrt each of the sources.
Raises:
RuntimeError: if something goes wrong.
ValueError: if there is no sequence of differentiable operations connecting
a source and any target Tensor. This can happen either if the target is
not computed based on the source, if the tracing was set up incorrectly,
or if only non-differentiable functions of the source were used in the
computation of target.
"""
tensor_to_op, op_to_entry = tape.export()
# This overwrites the op_to_entry variable, which will release all memory used
# to keep traces that are irrelevant to the gradient computation we're doing
# here.
id_sources = [vspace.tensor_id(t) for t in sources]
tensor_usage_counts, op_to_entry, op_missing_tensor = _prepare_backprop(
vspace, target, tensor_to_op, op_to_entry, id_sources)
ready_ops = _initialize_backprop_stack(op_to_entry, op_missing_tensor)
gradients = _initial_gradients(vspace, target, output_gradients,
tensor_usage_counts)
gradients_size = dict()
# Now exhaust the backprop stack
while ready_ops:
op = ready_ops.pop()
op_trace = op_to_entry.pop(op)
out_gradients = [gradients.pop(t, None) for t in op_trace.output_ids]
# Cache the last used zero tensor. We reuse it if the next one
# we need is of the same shape and dtype. This is very helpful in
# large splits and should have negligible overhead in other cases.
last_shape_and_dtype = None
last_zeros = None
for i in range(len(out_gradients)):
if out_gradients[i] is None:
# TODO(apassos) this should be in the right device
none_indices = _grad_fn_accepts_none_for_indices.get(
op_trace.op_type, None)
if none_indices is None or i not in none_indices:
shape_and_dtype = op_trace.output_shape_and_dtype[i]
if shape_and_dtype != last_shape_and_dtype:
last_shape_and_dtype = shape_and_dtype
last_zeros = vspace.zeros(*shape_and_dtype)
out_gradients[i] = last_zeros
else:
out_gradients[i] = vspace.aggregate_fn(out_gradients[i])
in_gradients = op_trace.backward_function(*(out_gradients))
for i, t in enumerate(op_trace.input_ids):
if in_gradients[i] is not None:
t_grads = gradients.setdefault(t, [])
t_grads.append(in_gradients[i])
if len(t_grads) >= _MIN_AGGREGATE_COUNT:
if t not in gradients_size:
gradients_size[t] = vspace.num_elements_fn(t_grads[-1])
size = gradients_size[t]
if len(t_grads) * size * 4 > _MIN_AGGREGATE_BYTES:
t_grads[:] = [vspace.aggregate_fn(t_grads)]
if tensor_usage_counts.get(t, 0) > 0:
tensor_usage_counts[t] -= 1
if (t in tensor_to_op
and tensor_usage_counts[t] == 0
and t not in id_sources):
in_op = tensor_to_op[t]
if in_op is None or in_op == -1:
continue
if op_missing_tensor.get(in_op, 0) > 0:
op_missing_tensor[in_op] -= 1
if op_missing_tensor.get(in_op, 0) == 0:
ready_ops.append(in_op)
result = []
for i, s in enumerate(sources):
g = gradients.get(vspace.tensor_id(s), None)
if g is None:
result.append(None)
else:
result.append(vspace.aggregate_fn(g))
return result
# TODO(agarwal): use an automatic mechanism for handling None arguments to
# gradient functions.
# Some gradient functions can accept None arguments for gradients. The following
# maps the operation name to the indices at which the corresponding gradient
# function can accept None values.
# e.g. FusedBatchNorm outputs 5 values and hence receives 5 gradient values
# during backprop. However the gradient function uses only the first of those
# values and ignores the rest. The entry, "FusedBatchNorm": [1, 2, 3, 4],
# indicates that only the gradient corresponding to index 0 is used, and the
# gradient values at indices 1-4 are ignored (and hence can be None). The
# backprop algorithm can then leverage this by not constructing zeros to
# pass for those indices.
_grad_fn_accepts_none_for_indices = {
"SoftmaxCrossEntropyWithLogits": [1],
"FusedBatchNorm": [1, 2, 3, 4]
}
|
|
from __future__ import division
from django.shortcuts import *
from django.conf import settings
from django.db import transaction
from django.db.models.expressions import RawSQL
from django.contrib import messages
from django.http import *
from django.core.exceptions import *
from django.views.generic import *
from django.views.generic.edit import BaseFormView
from django.utils import timezone
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import *
from django.utils.safestring import mark_safe
from django.views.decorators.csrf import csrf_exempt
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from uchicagohvz.game.models import *
from uchicagohvz.game.forms import *
from uchicagohvz.game.data_apis import *
from uchicagohvz.game.tasks import *
from uchicagohvz.users.models import *
import re
# Create your views here.
class ListGames(ListView):
model = Game
template_name = 'game/list.html'
def get_queryset(self):
qs = super(ListGames, self).get_queryset()
if self.request.user.is_authenticated():
current_game = Game.objects.all()[0]
past_players = Player.objects.filter(user=self.request.user).exclude(game=current_game)
try:
player = Player.objects.get(game=current_game, user=self.request.user)
if past_players and (not past_players[0].gun_returned) and past_players[0].renting_gun:
player.delinquent_gun = True
player.save()
except:
pass
qs = qs.annotate(is_player=RawSQL("SELECT EXISTS(SELECT 1 FROM game_player WHERE \
game_player.game_id = game_game.id AND game_player.user_id = %s AND \
game_player.active = true)", (self.request.user.id,)))
return qs
class ShowGame(DetailView):
model = Game
template_name = 'game/show.html'
def get_context_data(self, **kwargs):
for squad in New_Squad.objects.all():
if squad.players.count() == 0:
squad.delete()
context = super(ShowGame, self).get_context_data(**kwargs)
if self.object.status in ('in_progress', 'finished'):
if self.object.get_active_players().count() > 0:
context['humans_percent'] = int(round(100 * self.object.get_humans().count() / self.object.get_active_players().count(), 0))
context['zombies_percent'] = int(round(100 * self.object.get_zombies().count() / self.object.get_active_players().count(), 0))
if self.object.status == "in_progress":
context['sms_code_number'] = settings.NEXMO_NUMBER
context['kills_per_hour'] = kills_per_hour(self.object)
context['kills_in_last_hour'] = kills_in_last_hour(self.object)
context['survival_by_dorm'] = survival_by_dorm(self.object)
context['most_courageous_dorms'] = most_courageous_dorms(self.object)
context['most_infectious_dorms'] = most_infectious_dorms(self.object)
context['top_humans'] = top_humans(self.object)[:10]
context['top_zombies'] = top_zombies(self.object)[:10]
context['squad_count'] = self.object.squads.count()
context['missions'] = self.object.missions.all()
if self.object.squads.count():
context['top_human_squads'] = top_human_squads(self.object)
context['top_zombie_squads'] = top_zombie_squads(self.object)
if self.request.user.is_authenticated():
in_game = Player.objects.filter(game=self.object, user=self.request.user).exists()
if in_game:
player = Player.objects.get(game=self.object, user=self.request.user)
context['player'] = player
past_players = Player.objects.filter(user=self.request.user).exclude(game=self.object)
if past_players and (not past_players[0].gun_returned) and past_players[0].renting_gun:
player.delinquent_gun = True
player.save()
if self.object.status in ('in_progress', 'finished') and player.active:
if player.human:
context['player_rank'] = player.human_rank
else:
context['player_rank'] = player.zombie_rank
return context
class RegisterForGame(FormView):
form_class = GameRegistrationForm
template_name = "game/register.html"
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
self.game = get_object_or_404(Game, id=self.kwargs['pk'])
if self.game.status != 'registration' or Player.objects.filter(game=self.game, user=request.user).exists():
return HttpResponseRedirect(self.game.get_absolute_url())
return super(RegisterForGame, self).dispatch(request, *args, **kwargs)
def form_valid(self, form):
player = form.save(commit=False)
player.user = self.request.user
player.game = self.game
player.save()
messages.success(self.request, "You are now registered for %s!" % (self.game.name))
return HttpResponseRedirect(self.game.get_absolute_url())
def get_context_data(self, **kwargs):
context = super(RegisterForGame, self).get_context_data(**kwargs)
context['game'] = self.game
return context
def get_form_kwargs(self):
kwargs = super(RegisterForGame, self).get_form_kwargs()
kwargs['game'] = self.game
return kwargs
class ChooseSquad(FormView):
form_class = SquadForm
template_name = 'game/choose_squad.html'
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
self.game = get_object_or_404(Game, id=self.kwargs['pk'])
self.player = get_object_or_404(Player, game=self.game, user=self.request.user)
return super(ChooseSquad, self).dispatch(request, *args, **kwargs)
def form_valid(self, form):
if form.cleaned_data['create_squad']:
self.player.new_squad = New_Squad.objects.create(
game=self.game, name=form.cleaned_data['create_squad'])
elif form.cleaned_data['choose_squad']:
self.player.new_squad = form.cleaned_data['choose_squad']
self.player.save(update_fields=['new_squad'])
return HttpResponseRedirect(self.game.get_absolute_url())
def get_form_kwargs(self):
kwargs = super(ChooseSquad, self).get_form_kwargs()
kwargs['game'] = self.game
return kwargs
class EnterBiteCode(FormView):
form_class = BiteCodeForm
template_name = 'game/enter-bite-code.html'
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(EnterBiteCode, self).dispatch(request, *args, **kwargs)
def form_valid(self, form):
victim = form.victim
kill = victim.kill_me(self.killer)
if kill:
send_death_notification.delay(kill)
kill.lat = form.cleaned_data.get('lat')
kill.lng = form.cleaned_data.get('lng')
kill.notes = form.cleaned_data.get('notes')
kill.save()
victim_profile = Profile.objects.get(user=victim.user)
messages.success(self.request, mark_safe(
u"Kill logged successfully! <b>%s</b> has joined the ranks of the undead." % (victim.user.get_full_name())))
if victim_profile.last_words:
victim.last_words = victim_profile.last_words
victim.save()
messages.error(self.request, mark_safe(
u"{0}'s last words: {1}".format(victim.user.get_full_name(), victim.last_words)))
return HttpResponseRedirect(self.game.get_absolute_url())
def get_form_kwargs(self):
kwargs = super(EnterBiteCode, self).get_form_kwargs()
self.game = get_object_or_404(Game, id=self.kwargs['pk'])
if self.game.status == 'in_progress':
self.killer = get_object_or_404(Player, game=self.game, active=True, human=False, user=self.request.user)
kwargs['killer'] = self.killer
kwargs['require_location'] = True
return kwargs
else:
raise PermissionDenied
def get_context_data(self, **kwargs):
context = super(EnterBiteCode, self).get_context_data(**kwargs)
return context
class AnnotateKill(UpdateView):
form_class = AnnotateKillForm
model = Kill
template_name = 'game/annotate-kill.html'
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(AnnotateKill, self).dispatch(request, *args, **kwargs)
def get_object(self, queryset=None):
kill = super(AnnotateKill, self).get_object()
if kill.killer.user == self.request.user:
return kill
raise PermissionDenied
def form_valid(self, form):
kill = form.save()
messages.success(self.request, 'Kill annotated successfully.')
return HttpResponseRedirect(kill.killer.game.get_absolute_url())
class SubmitCodeSMS(APIView):
@method_decorator(csrf_exempt)
def post(self, request, *args, **kwargs):
if all([f in request.data for f in ('msisdn', 'text')]):
process_sms_code.delay(request.data['msisdn'], request.data['text'])
return Response()
class SubmitAwardCode(BaseFormView):
form_class = AwardCodeForm
http_method_names = ['post']
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(SubmitAwardCode, self).dispatch(request, *args, **kwargs)
@transaction.atomic
def form_valid(self, form):
award = form.award
award.players.add(self.player)
award.save()
messages.success(self.request, mark_safe("Code entry for <b>%s</b> accepted!" % (award.name)))
return HttpResponseRedirect(self.game.get_absolute_url())
def form_invalid(self, form):
for e in form.non_field_errors():
messages.error(self.request, e)
return HttpResponseRedirect(self.game.get_absolute_url())
def get_form_kwargs(self):
kwargs = super(SubmitAwardCode, self).get_form_kwargs()
self.game = get_object_or_404(Game, id=self.kwargs['pk'])
if self.game.status == 'in_progress':
self.player = get_object_or_404(Player, game=self.game, active=True, user=self.request.user)
kwargs['player'] = self.player
return kwargs
else:
raise PermissionDenied
class SubmitDiscordTag(BaseFormView):
form_class = DiscordTagForm
http_method_names = ['post']
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(SubmitDiscordTag, self).dispatch(request, *args, **kwargs)
@transaction.atomic
def form_valid(self, form):
tag = form.tag
messages.success(self.request, mark_safe("Registered Discord Tag!"))
return HttpResponseRedirect(self.game.get_absolute_url())
def form_invalid(self, form):
for e in form.non_field_errors():
messages.error(self.request, e)
return HttpResponseRedirect(self.game.get_absolute_url())
def get_form_kwargs(self):
kwargs = super(SubmitDiscordTag, self).get_form_kwargs()
self.game = get_object_or_404(Game, id=self.kwargs['pk'])
self.player = get_object_or_404(Player, game=self.game, user=self.request.user)
kwargs['user'] = self.request.user
kwargs['player'] = self.player
return kwargs
class SubmitMinecraftUsername(BaseFormView):
form_class = MinecraftUsernameForm
http_method_names = ['post']
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(SubmitMinecraftUsername, self).dispatch(request, *args, **kwargs)
@transaction.atomic
def form_valid(self, form):
mc_username = form.mc_username
messages.success(self.request, mark_safe("Registered minecraft username!"))
return HttpResponseRedirect(self.game.get_absolute_url())
def form_invalid(self, form):
for e in form.non_field_errors():
messages.error(self.request, e)
return HttpResponseRedirect(self.game.get_absolute_url())
def get_form_kwargs(self):
kwargs = super(SubmitMinecraftUsername, self).get_form_kwargs()
self.game = get_object_or_404(Game, id=self.kwargs['pk'])
self.player = get_object_or_404(Player, game=self.game, user=self.request.user)
kwargs['user'] = self.request.user
kwargs['player'] = self.player
return kwargs
class ShowPlayer(DetailView):
model = Player
template_name = 'game/show_player.html'
def get_object(self, queryset=None):
return get_object_or_404(Player, id=self.kwargs['pk'], active=True)
def get_context_data(self, **kwargs):
context = super(ShowPlayer, self).get_context_data(**kwargs)
player = self.object
if (not player.human) and (player.user == self.request.user or player.game.status == 'finished'):
try:
my_kill = Kill.objects.filter(victim=player)[0]
context['kill_tree'] = my_kill.get_descendants()
except:
pass
return context
class Leaderboard(DetailView):
model = Game
template_name = 'game/leaderboard.html'
def get_context_data(self, **kwargs):
context = super(Leaderboard, self).get_context_data(**kwargs)
game = context['game']
if game.status in ('in_progress', 'finished'):
context['top_humans'] = top_humans(game)
context['top_zombies'] = top_zombies(game)
return context
else:
raise Http404
class ShowSquad(DetailView):
model = Squad
template_name = 'game/show_squad.html'
class ShowNewSquad(DetailView):
model = New_Squad
template_name = 'game/show_new_squad.html'
class ShowKill(DetailView):
model = Kill
template_name = 'game/show_kill.html'
class ShowMission(DetailView):
model = Mission
template_name = 'game/show_mission.html'
class UploadMissionPicture(FormView):
form_class = UploadMissionPictureForm
model = MissionPicture
template_name = 'game/upload-mission-picture.html'
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(UploadMissionPicture, self).dispatch(request, *args, **kwargs)
def get_object(self, queryset=None):
mission_picture = super(UploadMissionPicture, self).get_object()
if mission_picture.player.user == self.request.user:
return mission_picture
raise PermissionDenied
def form_valid(self, form):
mission_picture = form.save(commit=False)
mission_picture.game = get_object_or_404(Game, id=self.kwargs['pk'])
mission_picture.save()
messages.success(self.request, 'Picture successfully uploaded.')
return HttpResponseRedirect(mission_picture.game.get_absolute_url())
def get_form_kwargs(self):
kwargs = super(UploadMissionPicture, self).get_form_kwargs()
self.game = get_object_or_404(Game, id=self.kwargs['pk'])
kwargs['game'] = self.game
return kwargs
class SendZombieText(BaseFormView):
form_class = ZombieTextForm
http_method_names = ['post']
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
self.game = get_object_or_404(Game, id=self.kwargs['pk'])
return super(SendZombieText, self).dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
if self.game.status == 'in_progress':
self.player = get_object_or_404(Player, game=self.game, active=True, user=request.user)
if not self.player.lead_zombie:
raise PermissionDenied
return super(SendZombieText, self).post(request, *args, **kwargs)
else:
raise PermissionDenied
@transaction.atomic
def form_valid(self, form):
message = form.message
send_zombie_text(message)
messages.success(self.request, mark_safe("Message sent to subscribing zombies!"))
return HttpResponseRedirect(self.game.get_absolute_url())
def form_invalid(self, form):
for e in form.non_field_errors():
messages.error(self.request, e)
return HttpResponseRedirect(self.game.get_absolute_url())
|
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from tempest import test
from tempest_lib.common.utils import data_utils
from neutron.tests.api import base
from neutron.tests.tempest import config
CONF = config.CONF
class FloatingIPTestJSON(base.BaseNetworkTest):
"""
Tests the following operations in the Quantum API using the REST client for
Neutron:
Create a Floating IP
Update a Floating IP
Delete a Floating IP
List all Floating IPs
Show Floating IP details
Associate a Floating IP with a port and then delete that port
Associate a Floating IP with a port and then with a port on another
router
v2.0 of the Neutron API is assumed. It is also assumed that the following
options are defined in the [network] section of etc/tempest.conf:
public_network_id which is the id for the external network present
"""
@classmethod
def resource_setup(cls):
super(FloatingIPTestJSON, cls).resource_setup()
if not test.is_extension_enabled('router', 'network'):
msg = "router extension not enabled."
raise cls.skipException(msg)
cls.ext_net_id = CONF.network.public_network_id
# Create network, subnet, router and add interface
cls.network = cls.create_network()
cls.subnet = cls.create_subnet(cls.network)
cls.router = cls.create_router(data_utils.rand_name('router-'),
external_network_id=cls.ext_net_id)
cls.create_router_interface(cls.router['id'], cls.subnet['id'])
cls.port = list()
# Create two ports one each for Creation and Updating of floatingIP
for i in range(2):
cls.create_port(cls.network)
@test.attr(type='smoke')
@test.idempotent_id('62595970-ab1c-4b7f-8fcc-fddfe55e8718')
def test_create_list_show_update_delete_floating_ip(self):
# Creates a floating IP
body = self.client.create_floatingip(
floating_network_id=self.ext_net_id,
port_id=self.ports[0]['id'])
created_floating_ip = body['floatingip']
self.addCleanup(self.client.delete_floatingip,
created_floating_ip['id'])
self.assertIsNotNone(created_floating_ip['id'])
self.assertIsNotNone(created_floating_ip['tenant_id'])
self.assertIsNotNone(created_floating_ip['floating_ip_address'])
self.assertEqual(created_floating_ip['port_id'], self.ports[0]['id'])
self.assertEqual(created_floating_ip['floating_network_id'],
self.ext_net_id)
self.assertIn(created_floating_ip['fixed_ip_address'],
[ip['ip_address'] for ip in self.ports[0]['fixed_ips']])
# Verifies the details of a floating_ip
floating_ip = self.client.show_floatingip(created_floating_ip['id'])
shown_floating_ip = floating_ip['floatingip']
self.assertEqual(shown_floating_ip['id'], created_floating_ip['id'])
self.assertEqual(shown_floating_ip['floating_network_id'],
self.ext_net_id)
self.assertEqual(shown_floating_ip['tenant_id'],
created_floating_ip['tenant_id'])
self.assertEqual(shown_floating_ip['floating_ip_address'],
created_floating_ip['floating_ip_address'])
self.assertEqual(shown_floating_ip['port_id'], self.ports[0]['id'])
# Verify the floating ip exists in the list of all floating_ips
floating_ips = self.client.list_floatingips()
floatingip_id_list = list()
for f in floating_ips['floatingips']:
floatingip_id_list.append(f['id'])
self.assertIn(created_floating_ip['id'], floatingip_id_list)
# Associate floating IP to the other port
floating_ip = self.client.update_floatingip(
created_floating_ip['id'],
port_id=self.ports[1]['id'])
updated_floating_ip = floating_ip['floatingip']
self.assertEqual(updated_floating_ip['port_id'], self.ports[1]['id'])
self.assertEqual(updated_floating_ip['fixed_ip_address'],
self.ports[1]['fixed_ips'][0]['ip_address'])
self.assertEqual(updated_floating_ip['router_id'], self.router['id'])
# Disassociate floating IP from the port
floating_ip = self.client.update_floatingip(
created_floating_ip['id'],
port_id=None)
updated_floating_ip = floating_ip['floatingip']
self.assertIsNone(updated_floating_ip['port_id'])
self.assertIsNone(updated_floating_ip['fixed_ip_address'])
self.assertIsNone(updated_floating_ip['router_id'])
@test.attr(type='smoke')
@test.idempotent_id('e1f6bffd-442f-4668-b30e-df13f2705e77')
def test_floating_ip_delete_port(self):
# Create a floating IP
body = self.client.create_floatingip(
floating_network_id=self.ext_net_id)
created_floating_ip = body['floatingip']
self.addCleanup(self.client.delete_floatingip,
created_floating_ip['id'])
# Create a port
port = self.client.create_port(network_id=self.network['id'])
created_port = port['port']
floating_ip = self.client.update_floatingip(
created_floating_ip['id'],
port_id=created_port['id'])
# Delete port
self.client.delete_port(created_port['id'])
# Verifies the details of the floating_ip
floating_ip = self.client.show_floatingip(created_floating_ip['id'])
shown_floating_ip = floating_ip['floatingip']
# Confirm the fields are back to None
self.assertEqual(shown_floating_ip['id'], created_floating_ip['id'])
self.assertIsNone(shown_floating_ip['port_id'])
self.assertIsNone(shown_floating_ip['fixed_ip_address'])
self.assertIsNone(shown_floating_ip['router_id'])
@test.attr(type='smoke')
@test.idempotent_id('1bb2f731-fe5a-4b8c-8409-799ade1bed4d')
def test_floating_ip_update_different_router(self):
# Associate a floating IP to a port on a router
body = self.client.create_floatingip(
floating_network_id=self.ext_net_id,
port_id=self.ports[1]['id'])
created_floating_ip = body['floatingip']
self.addCleanup(self.client.delete_floatingip,
created_floating_ip['id'])
self.assertEqual(created_floating_ip['router_id'], self.router['id'])
network2 = self.create_network()
subnet2 = self.create_subnet(network2)
router2 = self.create_router(data_utils.rand_name('router-'),
external_network_id=self.ext_net_id)
self.create_router_interface(router2['id'], subnet2['id'])
port_other_router = self.create_port(network2)
# Associate floating IP to the other port on another router
floating_ip = self.client.update_floatingip(
created_floating_ip['id'],
port_id=port_other_router['id'])
updated_floating_ip = floating_ip['floatingip']
self.assertEqual(updated_floating_ip['router_id'], router2['id'])
self.assertEqual(updated_floating_ip['port_id'],
port_other_router['id'])
self.assertIsNotNone(updated_floating_ip['fixed_ip_address'])
@test.attr(type='smoke')
@test.idempotent_id('36de4bd0-f09c-43e3-a8e1-1decc1ffd3a5')
def test_create_floating_ip_specifying_a_fixed_ip_address(self):
body = self.client.create_floatingip(
floating_network_id=self.ext_net_id,
port_id=self.ports[1]['id'],
fixed_ip_address=self.ports[1]['fixed_ips'][0]['ip_address'])
created_floating_ip = body['floatingip']
self.addCleanup(self.client.delete_floatingip,
created_floating_ip['id'])
self.assertIsNotNone(created_floating_ip['id'])
self.assertEqual(created_floating_ip['fixed_ip_address'],
self.ports[1]['fixed_ips'][0]['ip_address'])
floating_ip = self.client.update_floatingip(
created_floating_ip['id'],
port_id=None)
self.assertIsNone(floating_ip['floatingip']['port_id'])
@test.attr(type='smoke')
@test.idempotent_id('45c4c683-ea97-41ef-9c51-5e9802f2f3d7')
def test_create_update_floatingip_with_port_multiple_ip_address(self):
# Find out ips that can be used for tests
ips = list(netaddr.IPNetwork(self.subnet['cidr']))
list_ips = [str(ip) for ip in ips[-3:-1]]
fixed_ips = [{'ip_address': list_ips[0]}, {'ip_address': list_ips[1]}]
# Create port
body = self.client.create_port(network_id=self.network['id'],
fixed_ips=fixed_ips)
port = body['port']
self.addCleanup(self.client.delete_port, port['id'])
# Create floating ip
body = self.client.create_floatingip(
floating_network_id=self.ext_net_id,
port_id=port['id'],
fixed_ip_address=list_ips[0])
floating_ip = body['floatingip']
self.addCleanup(self.client.delete_floatingip, floating_ip['id'])
self.assertIsNotNone(floating_ip['id'])
self.assertEqual(floating_ip['fixed_ip_address'], list_ips[0])
# Update floating ip
body = self.client.update_floatingip(floating_ip['id'],
port_id=port['id'],
fixed_ip_address=list_ips[1])
update_floating_ip = body['floatingip']
self.assertEqual(update_floating_ip['fixed_ip_address'],
list_ips[1])
|
|
"""
<Program>
seattleclearinghouse_xmlrpc.py
<Started>
6/28/2009
<Author>
Jason Chen
Justin Samuel
<Purpose>
A client library for communicating with the SeattleClearinghouse XMLRPC Server.
Your Python scripts can import this library, create an instance of the
SeattleClearinghouseClient class, then call methods on the object to perform XMLRPC
calls through the SeattleClearinghouse XMLRPC API.
Full tutorials on using this library, see:
https://seattle.cs.washington.edu/wiki/SeattleGeniClientLib
In order to perform secure SSL communication with SeattleClearinghouse:
* You must have M2Crypto installed.
* You must set the value of CA_CERTIFICATES_FILE to the location of a PEM
file containing CA certificates that you trust. If you don't know where
this is on your own system, you can download this file from a site you
trust. One such place to download this file from is:
http://curl.haxx.se/ca/cacert.pem
If you can't fulfill the above requirements, you can still use this client with
XMLRPC servers that use https but you will be vulnerable to a man-in-the-middle
attack. To enable this insecure mode, include the argument:
allow_ssl_insecure=True
when creating a SeattleClearinghouseClient instance.
<Notes>
All methods of the client class may raise the following errors in addition to
any others described in the method's docstring:
CommunicationError
AuthenticationError
InvalidRequestError
InternalError
The safest way to be certain to catch any of these errors is to the catch
their base class:
SeattleClearinghouseError
"""
import os
import socket
import xmlrpclib
# If a user does not provide us with an API key, we'll need to load
# their private key instead.
from repyportability import add_dy_support
add_dy_support(locals())
# Location of a file containing one or more PEM-encoded CA certificates
# concatenated together. This is required if using allow_ssl_insecure=False.
# By default it looks for a cacert.pem file in the same directory as this
# python module is in.
DEFAULT_CA_CERTIFICATES_FILE = os.path.join(os.path.dirname(__file__), "cacert.pem")
# The location of the SeattleClearinghouse XMLRPC server to use.
DEFAULT_XMLRPC_URL = "https://seattleclearinghouse.poly.edu/xmlrpc/"
# SeattleClearinghouse XMLRPC Fault Code Constants
FAULTCODE_INTERNALERROR = 100
FAULTCODE_AUTHERROR = 101
FAULTCODE_INVALIDREQUEST = 102
FAULTCODE_NOTENOUGHCREDITS = 103
FAULTCODE_UNABLETOACQUIRE = 105
class SeattleClearinghouseClient(object):
"""
Implementation of an XMLRPC client for communicating with a SeattleClearinghouse
server. This uses the public API described at:
https://seattle.cs.washington.edu/wiki/SeattleGeniApi
"""
def __init__(self, username, api_key=None, private_key_string=None,
xmlrpc_url=None,
allow_ssl_insecure=None,
ca_certs_file=None):
if xmlrpc_url is None:
xmlrpc_url = DEFAULT_XMLRPC_URL
if allow_ssl_insecure is None:
allow_ssl_insecure = False
if ca_certs_file is None:
ca_certs_file = DEFAULT_CA_CERTIFICATES_FILE
if not isinstance(username, basestring):
raise TypeError("username must be a string")
if api_key is not None:
if not isinstance(api_key, basestring):
raise TypeError("api_key must be a string")
else:
if not private_key_string:
raise TypeError("private_key_string must be provided if api_key is not")
if not isinstance(private_key_string, basestring):
raise TypeError("private_key_string must be a string")
if not isinstance(xmlrpc_url, basestring):
raise TypeError("xmlrpc_url must be a string")
if not isinstance(allow_ssl_insecure, bool):
raise TypeError("allow_ssl_insecure must be True or False")
if not isinstance(ca_certs_file, basestring):
raise TypeError("ca_certs_file must be a string")
if allow_ssl_insecure:
self.proxy = xmlrpclib.Server(xmlrpc_url)
else:
ssl_transport = _get_ssl_transport(ca_certs_file)
self.proxy = xmlrpclib.Server(xmlrpc_url, transport=ssl_transport)
if not api_key:
api_key = self._get_api_key(username, private_key_string)
self.auth = {'username':username, 'api_key':api_key}
def _get_api_key(self, username, private_key_string):
# Normally we try not to import modules anywhere but globally,
# but I'd like to keep this xmlrpc client usable without repy files
# available when the user provides their api key and doesn't require
# it to be retrieved.
try:
dy_import_module_symbols("rsa.r2py")
except ImportError, e:
raise SeattleClearinghouseError("Unable to get API key from SeattleClearinghouse " +
"because a required python or repy module " +
"cannot be found:" + str(e))
# This will raise a ValueError if the private key is not valid.
private_key_dict = rsa_string_to_privatekey(private_key_string)
encrypted_data = self.proxy.get_encrypted_api_key(username)
decrypted_data = rsa_decrypt(encrypted_data, private_key_dict)
split_data = decrypted_data.split("!")
# The encrypted data has 20 bytes of random data followed by a "!" which
# is then followed by the actual API key. If the private key was the wrong
# key, we will end up with garbage data (if it was an invalid key, it
# might be empty, though). So, we're going to make the fairly safe
# assumption that the odds of a random decryption with the wrong key
# resulting in data that starts with 20 bytes which aren't exclamation
# marks followed by a single exclamation mark and no others is pretty low.
if len(split_data) != 2 or len(split_data[0]) != 20:
raise AuthenticationError("The provided private key does not appear " +
"to correspond to this account's public key: " +
"encrypted API key could not be decrypted.")
api_key = split_data[1]
return api_key
def _do_call(self, function, *args):
try:
return function(self.auth, *args)
except socket.error, err:
raise CommunicationError("XMLRPC failed: " + str(err))
except xmlrpclib.Fault, fault:
if fault.faultCode == FAULTCODE_AUTHERROR:
raise AuthenticationError
elif fault.faultCode == FAULTCODE_INVALIDREQUEST:
raise InvalidRequestError(fault.faultString)
elif fault.faultCode == FAULTCODE_NOTENOUGHCREDITS:
raise NotEnoughCreditsError(fault.faultString)
elif fault.faultCode == FAULTCODE_UNABLETOACQUIRE:
raise UnableToAcquireResourcesError(fault.faultString)
else:
raise InternalError(fault.faultString)
def _do_pwauth_call(self, function, password, *args):
"""For use by calls that require a password rather than an api key."""
pwauth = {'username':self.auth['username'], 'password':password}
try:
return function(pwauth, *args)
except socket.error, err:
raise CommunicationError("XMLRPC failed: " + str(err))
except xmlrpclib.Fault, fault:
if fault.faultCode == FAULTCODE_AUTHERROR:
raise AuthenticationError
elif fault.faultCode == FAULTCODE_INVALIDREQUEST:
raise InvalidRequestError(fault.faultString)
elif fault.faultCode == FAULTCODE_NOTENOUGHCREDITS:
raise NotEnoughCreditsError(fault.faultString)
elif fault.faultCode == FAULTCODE_UNABLETOACQUIRE:
raise UnableToAcquireResourcesError(fault.faultString)
else:
raise InternalError(fault.faultString)
def acquire_lan_resources(self, count):
"""
<Purpose>
Acquire LAN vessels.
<Arguments>
count
The number of vessels to acquire.
<Exceptions>
The common exceptions described in the module comments, as well as:
SeattleClearinghouseNotEnoughCredits
If the account does not have enough available vessel credits to fulfill
the request.
<Side Effects>
If successful, 'count' LAN vessels have been acquired for the account.
<Returns>
A list of vessel handles of the acquired vessels.
"""
return self.acquire_resources('lan', count)
def acquire_wan_resources(self, count):
"""
<Purpose>
Acquire WAN vessels.
<Arguments>
count
The number of vessels to acquire.
<Exceptions>
The common exceptions described in the module comments, as well as:
SeattleClearinghouseNotEnoughCredits
If the account does not have enough available vessel credits to fulfill
the request.
<Side Effects>
If successful, 'count' WAN vessels have been acquired for the account.
<Returns>
A list of vessel handles of the acquired vessels.
"""
return self.acquire_resources('wan', count)
def acquire_nat_resources(self, count):
"""
<Purpose>
Acquire NAT vessels.
<Arguments>
count
The number of vessels to acquire.
<Exceptions>
The common exceptions described in the module comments, as well as:
SeattleClearinghouseNotEnoughCredits
If the account does not have enough available vessel credits to fulfill
the request.
<Side Effects>
If successful, 'count' NAT vessels have been acquired for the account.
<Returns>
A list of vessel handles of the acquired vessels.
"""
return self.acquire_resources('nat', count)
def acquire_random_resources(self, count):
"""
<Purpose>
Acquire vessels (they can be LAN, WAN, NAT, or any combination of these).
<Arguments>
count
The number of vessels to acquire.
<Exceptions>
The common exceptions described in the module comments, as well as:
SeattleClearinghouseNotEnoughCredits
If the account does not have enough available vessel credits to fulfill
the request.
<Side Effects>
If successful, 'count' vessels have been acquired for the account.
<Returns>
A list of vessel handles of the acquired vessels.
"""
return self.acquire_resources('random', count)
def acquire_resources(self, res_type, count):
"""
<Purpose>
Acquire vessels.
<Arguments>
res_type
A string describing the type of vessels to acquire.
count
The number of vessels to acquire.
<Exceptions>
The common exceptions described in the module comments, as well as:
SeattleClearinghouseNotEnoughCredits
If the account does not have enough available vessel credits to fulfill
the request.
<Side Effects>
If successful, 'count' vessels have been acquired for the account.
<Returns>
A list of vessel handles of the acquired vessels.
"""
if not isinstance(res_type, basestring):
raise TypeError("res_type must be a string")
if type(count) not in [int, long]:
raise TypeError("count must be an integer")
rspec = {'rspec_type':res_type, 'number_of_nodes':count}
return self._do_call(self.proxy.acquire_resources, rspec)
def acquire_specific_vessels(self, handlelist):
"""
<Purpose>
Attempt to acquire specific vessels.
<Arguments>
handlelist
A list of vessel handles.
<Exceptions>
The common exceptions described in the module comments, as well as:
SeattleClearinghouseNotEnoughCredits
If the account does not have enough available vessel credits to fulfill
the request.
<Side Effects>
If successful, zero or more vessels from handlelist have been acquired.
<Returns>
A list of vessel handles of the acquired vessels.
"""
_validate_handle_list(handlelist)
return self._do_call(self.proxy.acquire_specific_vessels, handlelist)
def release_resources(self, handlelist):
"""
<Purpose>
Release vessels.
<Arguments>
handlelist
A list of handles as returned by acquire_vessels() or found in the
'handle' key of the dictionaries returned by get_resource_info().
<Exceptions>
The common exceptions described in the module comments.
<Side Effects>
If successful, the vessels in handlelist have been released. If not
successful, it is possible that a partial set of the vessels was
released.
<Returns>
None
"""
_validate_handle_list(handlelist)
return self._do_call(self.proxy.release_resources, handlelist)
def renew_resources(self, handlelist):
"""
<Purpose>
Renew vessels.
<Arguments>
handlelist
A list of handles as returned by acquire_vessels() or found in the
'handle' key of the dictionaries returned by get_resource_info().
<Exceptions>
The common exceptions described in the module comments, as well as:
SeattleClearinghouseNotEnoughCredits
If the account is currently over its vessel credit limit, then vessels
cannot be renewed until the account is no longer over its credit limit.
<Side Effects>
If successful, the vessels in handlelist have been renewed. If not
successful, it is possible that a partial set of the vessels was
renewed.
<Returns>
None
"""
_validate_handle_list(handlelist)
return self._do_call(self.proxy.renew_resources, handlelist)
def get_resource_info(self):
"""
<Purpose>
Obtain information about acquired vessels.
<Arguments>
None
<Exceptions>
The common exceptions described in the module comments, as well as:
<Side Effects>
None
<Returns>
A list of dictionaries, where each dictionary describes a vessel that
is currently acquired by the account.
"""
return self._do_call(self.proxy.get_resource_info)
def get_account_info(self):
"""
<Purpose>
Obtain information about the account.
<Arguments>
None
<Exceptions>
The common exceptions described in the module comments, as well as:
<Side Effects>
None
<Returns>
A dictionary with information about the account.
"""
return self._do_call(self.proxy.get_account_info)
def get_public_key(self):
"""
<Purpose>
Obtain the public key of the account.
<Arguments>
None
<Exceptions>
The common exceptions described in the module comments, as well as:
None
<Side Effects>
None
<Returns>
A string containing the public key of the account.
"""
return self._do_call(self.proxy.get_public_key)
def set_public_key(self, password, pubkeystring):
"""
<Purpose>
Set the public key of the account.
<Arguments>
password
The account password. This is required because changing the public
key of the account cannot be done with just the api key.
pubkeystring
A string representing the new public key to be set for the account.
<Exceptions>
The common exceptions described in the module comments, as well as:
InvalidRequestError
If the pubkey is invalid.
<Side Effects>
The public key of the account is changed and will be updated on all
vessels the account has acquired.
<Returns>
None
"""
self._do_pwauth_call(self.proxy.set_public_key, password, pubkeystring)
def regenerate_api_key(self, password):
"""
<Purpose>
Generate a new API key for the account..
<Arguments>
password
The account password. This is required because changing the api
key of the account cannot be done with just the current api key.
<Exceptions>
The common exceptions described in the module comments, as well as:
None
<Side Effects>
The account's api key has been changed.
<Returns>
The new api key for the account.
"""
api_key = self._do_pwauth_call(self.proxy.regenerate_api_key, password)
self.auth['api_key'] = api_key
return api_key
def _validate_handle_list(handlelist):
"""
Raise a TypeError or ValueError if handlelist is not a non-empty list of
string.
"""
if not isinstance(handlelist, list):
raise TypeError("Invalid data type for handle list: " +
str(type(handlelist)))
for handle in handlelist:
if not isinstance(handle, basestring):
raise TypeError("Invalid data type for a handle in the handle list: " +
str(type(handle)))
if not handlelist:
raise ValueError("Given handlelist is empty.")
def _get_ssl_transport(ca_certs_file):
"""
Returns an object usable as the transport for an xmlrpclib proxy. This will
be an M2Crypto.m2xmlrpclib.SSL_Transport that has been configured with a
context that has the ca_certs_file loaded, will not allow SSLv2, and will
reject certificate names that don't match the hostname.
"""
try:
import M2Crypto
except ImportError, err:
raise ImportError("In order to use the SeattleClearinghouse XMLRPC client with " +
"allow_ssl_insecure=False, you need M2Crypto " +
"installed. " + str(err))
# We don't define this class until here because otherwise M2Crypto may not
# be available.
class M2CryptoSSLTransport(M2Crypto.m2xmlrpclib.SSL_Transport):
def request(self, host, handler, request_body, verbose=0):
if host.find(":") == -1:
host = host + ":443"
return M2Crypto.m2xmlrpclib.SSL_Transport.request(self, host, handler,
request_body, verbose)
ctx = M2Crypto.SSL.Context("sslv3")
ctx.set_verify(M2Crypto.SSL.verify_peer |
M2Crypto.SSL.verify_fail_if_no_peer_cert, depth=9)
if ctx.load_verify_locations(ca_certs_file) != 1:
raise SeattleClearinghouseError("No CA certs found in file: " + ca_certs_file)
return M2CryptoSSLTransport(ctx)
class SeattleClearinghouseError(Exception):
"""Base class for exceptions raised by the SeattleClearinghouseClient."""
class CommunicationError(SeattleClearinghouseError):
"""
Indicates that XMLRPC communication failed.
"""
class InternalError(SeattleClearinghouseError):
"""
Indicates an unexpected error occurred, probably either a bug in this
client or a bug in SeattleClearinghouse.
"""
class AuthenticationError(SeattleClearinghouseError):
"""Indicates an authentication failure (invalid username and/or API key)."""
def __init__(self, msg=None):
if msg is None:
msg = "Authentication failed. Invalid username and/or API key."
SeattleClearinghouseError.__init__(self, msg)
class InvalidRequestError(SeattleClearinghouseError):
"""Indicates that the request is invalid."""
class NotEnoughCreditsError(SeattleClearinghouseError):
"""
Indicates that the requested operation requires more vessel credits to
be available then the account currently has.
"""
class UnableToAcquireResourcesError(SeattleClearinghouseError):
"""
Indicates that the requested operation failed because SeattleClearinghouse was unable
to acquire the requested resources.
"""
|
|
"""
Linear algebra
--------------
Linear equations
................
Basic linear algebra is implemented; you can for example solve the linear
equation system::
x + 2*y = -10
3*x + 4*y = 10
using ``lu_solve``::
>>> A = matrix([[1, 2], [3, 4]])
>>> b = matrix([-10, 10])
>>> x = lu_solve(A, b)
>>> x
matrix(
[['30.0'],
['-20.0']])
If you don't trust the result, use ``residual`` to calculate the residual ||A*x-b||::
>>> residual(A, x, b)
matrix(
[['3.46944695195361e-18'],
['3.46944695195361e-18']])
>>> str(eps)
'2.22044604925031e-16'
As you can see, the solution is quite accurate. The error is caused by the
inaccuracy of the internal floating point arithmetic. Though, it's even smaller
than the current machine epsilon, which basically means you can trust the
result.
If you need more speed, use NumPy. Or choose a faster data type using the
keyword ``force_type``::
>>> lu_solve(A, b, force_type=float)
matrix(
[[29.999999999999996],
[-19.999999999999996]])
``lu_solve`` accepts overdetermined systems. It is usually not possible to solve
such systems, so the residual is minimized instead. Internally this is done
using Cholesky decomposition to compute a least squares approximation. This means
that that ``lu_solve`` will square the errors. If you can't afford this, use
``qr_solve`` instead. It is twice as slow but more accurate, and it calculates
the residual automatically.
Matrix factorization
....................
The function ``lu`` computes an explicit LU factorization of a matrix::
>>> P, L, U = lu(matrix([[0,2,3],[4,5,6],[7,8,9]]))
>>> print P
[0.0 0.0 1.0]
[1.0 0.0 0.0]
[0.0 1.0 0.0]
>>> print L
[ 1.0 0.0 0.0]
[ 0.0 1.0 0.0]
[0.571428571428571 0.214285714285714 1.0]
>>> print U
[7.0 8.0 9.0]
[0.0 2.0 3.0]
[0.0 0.0 0.214285714285714]
>>> print P.T*L*U
[0.0 2.0 3.0]
[4.0 5.0 6.0]
[7.0 8.0 9.0]
Interval matrices
-----------------
Matrices may contain interval elements. This allows one to perform
basic linear algebra operations such as matrix multiplication
and equation solving with rigorous error bounds::
>>> a = matrix([['0.1','0.3','1.0'],
... ['7.1','5.5','4.8'],
... ['3.2','4.4','5.6']], force_type=mpi)
>>>
>>> b = matrix(['4','0.6','0.5'], force_type=mpi)
>>> c = lu_solve(a, b)
>>> c
matrix(
[[[5.2582327113062393041, 5.2582327113062749951]],
[[-13.155049396267856583, -13.155049396267821167]],
[[7.4206915477497212555, 7.4206915477497310922]]])
>>> print a*c
[ [3.9999999999999866773, 4.0000000000000133227]]
[[0.59999999999972430942, 0.60000000000027142733]]
[[0.49999999999982236432, 0.50000000000018474111]]
"""
# TODO:
# *implement high-level qr()
# *test unitvector
# *iterative solving
from copy import copy
class LinearAlgebraMethods(object):
def LU_decomp(ctx, A, overwrite=False, use_cache=True):
"""
LU-factorization of a n*n matrix using the Gauss algorithm.
Returns L and U in one matrix and the pivot indices.
Use overwrite to specify whether A will be overwritten with L and U.
"""
if not A.rows == A.cols:
raise ValueError('need n*n matrix')
# get from cache if possible
if use_cache and isinstance(A, ctx.matrix) and A._LU:
return A._LU
if not overwrite:
orig = A
A = A.copy()
tol = ctx.absmin(ctx.mnorm(A,1) * ctx.eps) # each pivot element has to be bigger
n = A.rows
p = [None]*(n - 1)
for j in xrange(n - 1):
# pivoting, choose max(abs(reciprocal row sum)*abs(pivot element))
biggest = 0
for k in xrange(j, n):
s = ctx.fsum([ctx.absmin(A[k,l]) for l in xrange(j, n)])
if ctx.absmin(s) <= tol:
raise ZeroDivisionError('matrix is numerically singular')
current = 1/s * ctx.absmin(A[k,j])
if current > biggest: # TODO: what if equal?
biggest = current
p[j] = k
# swap rows according to p
ctx.swap_row(A, j, p[j])
if ctx.absmin(A[j,j]) <= tol:
raise ZeroDivisionError('matrix is numerically singular')
# calculate elimination factors and add rows
for i in xrange(j + 1, n):
A[i,j] /= A[j,j]
for k in xrange(j + 1, n):
A[i,k] -= A[i,j]*A[j,k]
if ctx.absmin(A[n - 1,n - 1]) <= tol:
raise ZeroDivisionError('matrix is numerically singular')
# cache decomposition
if not overwrite and isinstance(orig, ctx.matrix):
orig._LU = (A, p)
return A, p
def L_solve(ctx, L, b, p=None):
"""
Solve the lower part of a LU factorized matrix for y.
"""
assert L.rows == L.cols, 'need n*n matrix'
n = L.rows
assert len(b) == n
b = copy(b)
if p: # swap b according to p
for k in xrange(0, len(p)):
ctx.swap_row(b, k, p[k])
# solve
for i in xrange(1, n):
for j in xrange(i):
b[i] -= L[i,j] * b[j]
return b
def U_solve(ctx, U, y):
"""
Solve the upper part of a LU factorized matrix for x.
"""
assert U.rows == U.cols, 'need n*n matrix'
n = U.rows
assert len(y) == n
x = copy(y)
for i in xrange(n - 1, -1, -1):
for j in xrange(i + 1, n):
x[i] -= U[i,j] * x[j]
x[i] /= U[i,i]
return x
def lu_solve(ctx, A, b, **kwargs):
"""
Ax = b => x
Solve a determined or overdetermined linear equations system.
Fast LU decomposition is used, which is less accurate than QR decomposition
(especially for overdetermined systems), but it's twice as efficient.
Use qr_solve if you want more precision or have to solve a very ill-
conditioned system.
If you specify real=True, it does not check for overdeterminded complex
systems.
"""
prec = ctx.prec
try:
ctx.prec += 10
# do not overwrite A nor b
A, b = ctx.matrix(A, **kwargs).copy(), ctx.matrix(b, **kwargs).copy()
if A.rows < A.cols:
raise ValueError('cannot solve underdetermined system')
if A.rows > A.cols:
# use least-squares method if overdetermined
# (this increases errors)
AH = A.H
A = AH * A
b = AH * b
if (kwargs.get('real', False) or
not sum(type(i) is ctx.mpc for i in A)):
# TODO: necessary to check also b?
x = ctx.cholesky_solve(A, b)
else:
x = ctx.lu_solve(A, b)
else:
# LU factorization
A, p = ctx.LU_decomp(A)
b = ctx.L_solve(A, b, p)
x = ctx.U_solve(A, b)
finally:
ctx.prec = prec
return x
def improve_solution(ctx, A, x, b, maxsteps=1):
"""
Improve a solution to a linear equation system iteratively.
This re-uses the LU decomposition and is thus cheap.
Usually 3 up to 4 iterations are giving the maximal improvement.
"""
assert A.rows == A.cols, 'need n*n matrix' # TODO: really?
for _ in xrange(maxsteps):
r = ctx.residual(A, x, b)
if ctx.norm(r, 2) < 10*ctx.eps:
break
# this uses cached LU decomposition and is thus cheap
dx = ctx.lu_solve(A, -r)
x += dx
return x
def lu(ctx, A):
"""
A -> P, L, U
LU factorisation of a square matrix A. L is the lower, U the upper part.
P is the permutation matrix indicating the row swaps.
P*A = L*U
If you need efficiency, use the low-level method LU_decomp instead, it's
much more memory efficient.
"""
# get factorization
A, p = ctx.LU_decomp(A)
n = A.rows
L = ctx.matrix(n)
U = ctx.matrix(n)
for i in xrange(n):
for j in xrange(n):
if i > j:
L[i,j] = A[i,j]
elif i == j:
L[i,j] = 1
U[i,j] = A[i,j]
else:
U[i,j] = A[i,j]
# calculate permutation matrix
P = ctx.eye(n)
for k in xrange(len(p)):
ctx.swap_row(P, k, p[k])
return P, L, U
def unitvector(ctx, n, i):
"""
Return the i-th n-dimensional unit vector.
"""
assert 0 < i <= n, 'this unit vector does not exist'
return [ctx.zero]*(i-1) + [ctx.one] + [ctx.zero]*(n-i)
def inverse(ctx, A, **kwargs):
"""
Calculate the inverse of a matrix.
If you want to solve an equation system Ax = b, it's recommended to use
solve(A, b) instead, it's about 3 times more efficient.
"""
prec = ctx.prec
try:
ctx.prec += 10
# do not overwrite A
A = ctx.matrix(A, **kwargs).copy()
n = A.rows
# get LU factorisation
A, p = ctx.LU_decomp(A)
cols = []
# calculate unit vectors and solve corresponding system to get columns
for i in xrange(1, n + 1):
e = ctx.unitvector(n, i)
y = ctx.L_solve(A, e, p)
cols.append(ctx.U_solve(A, y))
# convert columns to matrix
inv = []
for i in xrange(n):
row = []
for j in xrange(n):
row.append(cols[j][i])
inv.append(row)
result = ctx.matrix(inv, **kwargs)
finally:
ctx.prec = prec
return result
def householder(ctx, A):
"""
(A|b) -> H, p, x, res
(A|b) is the coefficient matrix with left hand side of an optionally
overdetermined linear equation system.
H and p contain all information about the transformation matrices.
x is the solution, res the residual.
"""
assert isinstance(A, ctx.matrix)
m = A.rows
n = A.cols
assert m >= n - 1
# calculate Householder matrix
p = []
for j in xrange(0, n - 1):
s = ctx.fsum((A[i,j])**2 for i in xrange(j, m))
if not abs(s) > ctx.eps:
raise ValueError('matrix is numerically singular')
p.append(-ctx.sign(A[j,j]) * ctx.sqrt(s))
kappa = ctx.one / (s - p[j] * A[j,j])
A[j,j] -= p[j]
for k in xrange(j+1, n):
y = ctx.fsum(A[i,j] * A[i,k] for i in xrange(j, m)) * kappa
for i in xrange(j, m):
A[i,k] -= A[i,j] * y
# solve Rx = c1
x = [A[i,n - 1] for i in xrange(n - 1)]
for i in xrange(n - 2, -1, -1):
x[i] -= ctx.fsum(A[i,j] * x[j] for j in xrange(i + 1, n - 1))
x[i] /= p[i]
# calculate residual
if not m == n - 1:
r = [A[m-1-i, n-1] for i in xrange(m - n + 1)]
else:
# determined system, residual should be 0
r = [0]*m # maybe a bad idea, changing r[i] will change all elements
return A, p, x, r
#def qr(ctx, A):
# """
# A -> Q, R
#
# QR factorisation of a square matrix A using Householder decomposition.
# Q is orthogonal, this leads to very few numerical errors.
#
# A = Q*R
# """
# H, p, x, res = householder(A)
# TODO: implement this
def residual(ctx, A, x, b, **kwargs):
"""
Calculate the residual of a solution to a linear equation system.
r = A*x - b for A*x = b
"""
oldprec = ctx.prec
try:
ctx.prec *= 2
A, x, b = ctx.matrix(A, **kwargs), ctx.matrix(x, **kwargs), ctx.matrix(b, **kwargs)
return A*x - b
finally:
ctx.prec = oldprec
def qr_solve(ctx, A, b, norm=None, **kwargs):
"""
Ax = b => x, ||Ax - b||
Solve a determined or overdetermined linear equations system and
calculate the norm of the residual (error).
QR decomposition using Householder factorization is applied, which gives very
accurate results even for ill-conditioned matrices. qr_solve is twice as
efficient.
"""
if norm is None:
norm = ctx.norm
prec = ctx.prec
try:
prec += 10
# do not overwrite A nor b
A, b = ctx.matrix(A, **kwargs).copy(), ctx.matrix(b, **kwargs).copy()
if A.rows < A.cols:
raise ValueError('cannot solve underdetermined system')
H, p, x, r = ctx.householder(ctx.extend(A, b))
res = ctx.norm(r)
# calculate residual "manually" for determined systems
if res == 0:
res = ctx.norm(ctx.residual(A, x, b))
return ctx.matrix(x, **kwargs), res
finally:
ctx.prec = prec
# TODO: possible for complex matrices? -> have a look at GSL
def cholesky(ctx, A):
"""
Cholesky decomposition of a symmetric positive-definite matrix.
Can be used to solve linear equation systems twice as efficient compared
to LU decomposition or to test whether A is positive-definite.
A = L * L.T
Only L (the lower part) is returned.
"""
assert isinstance(A, ctx.matrix)
if not A.rows == A.cols:
raise ValueError('need n*n matrix')
n = A.rows
L = ctx.matrix(n)
for j in xrange(n):
s = A[j,j] - ctx.fsum(L[j,k]**2 for k in xrange(j))
if s < ctx.eps:
raise ValueError('matrix not positive-definite')
L[j,j] = ctx.sqrt(s)
for i in xrange(j, n):
L[i,j] = (A[i,j] - ctx.fsum(L[i,k] * L[j,k] for k in xrange(j))) \
/ L[j,j]
return L
def cholesky_solve(ctx, A, b, **kwargs):
"""
Ax = b => x
Solve a symmetric positive-definite linear equation system.
This is twice as efficient as lu_solve.
Typical use cases:
* A.T*A
* Hessian matrix
* differential equations
"""
prec = ctx.prec
try:
prec += 10
# do not overwrite A nor b
A, b = ctx.matrix(A, **kwargs).copy(), ctx.matrix(b, **kwargs).copy()
if A.rows != A.cols:
raise ValueError('can only solve determined system')
# Cholesky factorization
L = ctx.cholesky(A)
# solve
n = L.rows
assert len(b) == n
for i in xrange(n):
b[i] -= ctx.fsum(L[i,j] * b[j] for j in xrange(i))
b[i] /= L[i,i]
x = ctx.U_solve(L.T, b)
return x
finally:
ctx.prec = prec
def det(ctx, A):
"""
Calculate the determinant of a matrix.
"""
prec = ctx.prec
try:
# do not overwrite A
A = ctx.matrix(A).copy()
# use LU factorization to calculate determinant
try:
R, p = ctx.LU_decomp(A)
except ZeroDivisionError:
return 0
z = 1
for i, e in enumerate(p):
if i != e:
z *= -1
for i in xrange(A.rows):
z *= R[i,i]
return z
finally:
ctx.prec = prec
def cond(ctx, A, norm=None):
"""
Calculate the condition number of a matrix using a specified matrix norm.
The condition number estimates the sensitivity of a matrix to errors.
Example: small input errors for ill-conditioned coefficient matrices
alter the solution of the system dramatically.
For ill-conditioned matrices it's recommended to use qr_solve() instead
of lu_solve(). This does not help with input errors however, it just avoids
to add additional errors.
Definition: cond(A) = ||A|| * ||A**-1||
"""
if norm is None:
norm = lambda x: ctx.mnorm(x,1)
return norm(A) * norm(ctx.inverse(A))
def lu_solve_mat(ctx, a, b):
"""Solve a * x = b where a and b are matrices."""
r = ctx.matrix(a.rows, b.cols)
for i in range(b.cols):
c = ctx.lu_solve(a, b.column(i))
for j in range(len(c)):
r[j, i] = c[j]
return r
|
|
# Documented in http://zulip.readthedocs.io/en/latest/queuing.html
from __future__ import absolute_import
from typing import Any, Callable, Dict, List, Mapping, Optional
from django.conf import settings
from django.core.handlers.wsgi import WSGIRequest
from django.core.handlers.base import BaseHandler
from zerver.models import get_user_profile_by_email, \
get_user_profile_by_id, get_prereg_user_by_email, get_client, \
UserMessage, Message, Realm
from zerver.lib.context_managers import lockfile
from zerver.lib.error_notify import do_report_error
from zerver.lib.feedback import handle_feedback
from zerver.lib.queue import SimpleQueueClient, queue_json_publish
from zerver.lib.timestamp import timestamp_to_datetime
from zerver.lib.notifications import handle_missedmessage_emails, enqueue_welcome_emails, \
clear_followup_emails_queue, send_local_email_template_with_delay, \
send_missedmessage_email
from zerver.lib.push_notifications import handle_push_notification
from zerver.lib.actions import do_send_confirmation_email, \
do_update_user_activity, do_update_user_activity_interval, do_update_user_presence, \
internal_send_message, check_send_message, extract_recipients, \
render_incoming_message, do_update_embedded_data
from zerver.lib.url_preview import preview as url_preview
from zerver.lib.digest import handle_digest_email
from zerver.lib.email_mirror import process_message as mirror_email
from zerver.decorator import JsonableError
from zerver.tornado.socket import req_redis_key
from confirmation.models import Confirmation
from zerver.lib.db import reset_queries
from zerver.lib.redis_utils import get_redis_client
from zerver.lib.str_utils import force_str
from zerver.context_processors import common_context
import os
import sys
import six
import ujson
from collections import defaultdict
import email
import time
import datetime
import logging
import requests
import simplejson
from six.moves import cStringIO as StringIO
class WorkerDeclarationException(Exception):
pass
def assign_queue(queue_name, enabled=True, queue_type="consumer"):
# type: (str, bool, Optional[str]) -> Callable[[QueueProcessingWorker], QueueProcessingWorker]
def decorate(clazz):
# type: (QueueProcessingWorker) -> QueueProcessingWorker
clazz.queue_name = queue_name
if enabled:
register_worker(queue_name, clazz, queue_type)
return clazz
return decorate
worker_classes = {} # type: Dict[str, Any] # Any here should be QueueProcessingWorker type
queues = {} # type: Dict[str, Dict[str, QueueProcessingWorker]]
def register_worker(queue_name, clazz, queue_type):
# type: (str, QueueProcessingWorker, str) -> None
if queue_type not in queues:
queues[queue_type] = {}
queues[queue_type][queue_name] = clazz
worker_classes[queue_name] = clazz
def get_worker(queue_name):
# type: (str) -> QueueProcessingWorker
return worker_classes[queue_name]()
def get_active_worker_queues(queue_type=None):
# type: (Optional[str]) -> List[str]
"""Returns all the non-test worker queues."""
if queue_type is None:
return list(worker_classes.keys())
return list(queues[queue_type].keys())
class QueueProcessingWorker(object):
queue_name = None # type: str
def __init__(self):
# type: () -> None
self.q = None # type: SimpleQueueClient
if self.queue_name is None:
raise WorkerDeclarationException("Queue worker declared without queue_name")
def consume(self, data):
# type: (Mapping[str, Any]) -> None
raise WorkerDeclarationException("No consumer defined!")
def consume_wrapper(self, data):
# type: (Mapping[str, Any]) -> None
try:
self.consume(data)
except Exception:
self._log_problem()
if not os.path.exists(settings.QUEUE_ERROR_DIR):
os.mkdir(settings.QUEUE_ERROR_DIR)
fname = '%s.errors' % (self.queue_name,)
fn = os.path.join(settings.QUEUE_ERROR_DIR, fname)
line = u'%s\t%s\n' % (time.asctime(), ujson.dumps(data))
lock_fn = fn + '.lock'
with lockfile(lock_fn):
with open(fn, 'ab') as f:
f.write(line.encode('utf-8'))
reset_queries()
def _log_problem(self):
# type: () -> None
logging.exception("Problem handling data on queue %s" % (self.queue_name,))
def setup(self):
# type: () -> None
self.q = SimpleQueueClient()
def start(self):
# type: () -> None
self.q.register_json_consumer(self.queue_name, self.consume_wrapper)
self.q.start_consuming()
def stop(self):
# type: () -> None
self.q.stop_consuming()
@assign_queue('signups')
class SignupWorker(QueueProcessingWorker):
def consume(self, data):
# type: (Mapping[str, Any]) -> None
# This should clear out any invitation reminder emails
clear_followup_emails_queue(data['email_address'])
if settings.MAILCHIMP_API_KEY and settings.PRODUCTION:
endpoint = "https://%s.api.mailchimp.com/3.0/lists/%s/members" % \
(settings.MAILCHIMP_API_KEY.split('-')[1], settings.ZULIP_FRIENDS_LIST_ID)
params = dict(data)
params['list_id'] = settings.ZULIP_FRIENDS_LIST_ID
params['status'] = 'subscribed'
r = requests.post(endpoint, auth=('apikey', settings.MAILCHIMP_API_KEY), json=params, timeout=10)
if r.status_code == 400 and ujson.loads(r.text)['title'] == 'Member Exists':
logging.warning("Attempted to sign up already existing email to list: %s" %
(data['email_address'],))
else:
r.raise_for_status()
enqueue_welcome_emails(data['email_address'], data['merge_fields']['NAME'])
@assign_queue('invites')
class ConfirmationEmailWorker(QueueProcessingWorker):
def consume(self, data):
# type: (Mapping[str, Any]) -> None
invitee = get_prereg_user_by_email(data["email"])
referrer = get_user_profile_by_email(data["referrer_email"])
body = data["email_body"]
do_send_confirmation_email(invitee, referrer, body)
# queue invitation reminder for two days from now.
link = Confirmation.objects.get_link_for_object(invitee, host=referrer.realm.host)
context = common_context(referrer)
context.update({
'activate_url': link,
'referrer': referrer,
'verbose_support_offers': settings.VERBOSE_SUPPORT_OFFERS,
'support_email': settings.ZULIP_ADMINISTRATOR
})
send_local_email_template_with_delay(
[{'email': data["email"], 'name': ""}],
"zerver/emails/invitation/invitation_reminder_email",
context,
datetime.timedelta(days=2),
tags=["invitation-reminders"],
sender={'email': settings.ZULIP_ADMINISTRATOR, 'name': 'Zulip'})
@assign_queue('user_activity')
class UserActivityWorker(QueueProcessingWorker):
def consume(self, event):
# type: (Mapping[str, Any]) -> None
user_profile = get_user_profile_by_id(event["user_profile_id"])
client = get_client(event["client"])
log_time = timestamp_to_datetime(event["time"])
query = event["query"]
do_update_user_activity(user_profile, client, query, log_time)
@assign_queue('user_activity_interval')
class UserActivityIntervalWorker(QueueProcessingWorker):
def consume(self, event):
# type: (Mapping[str, Any]) -> None
user_profile = get_user_profile_by_id(event["user_profile_id"])
log_time = timestamp_to_datetime(event["time"])
do_update_user_activity_interval(user_profile, log_time)
@assign_queue('user_presence')
class UserPresenceWorker(QueueProcessingWorker):
def consume(self, event):
# type: (Mapping[str, Any]) -> None
logging.info("Received event: %s" % (event),)
user_profile = get_user_profile_by_id(event["user_profile_id"])
client = get_client(event["client"])
log_time = timestamp_to_datetime(event["time"])
status = event["status"]
do_update_user_presence(user_profile, client, log_time, status)
@assign_queue('missedmessage_emails', queue_type="loop")
class MissedMessageWorker(QueueProcessingWorker):
def start(self):
# type: () -> None
while True:
missed_events = self.q.drain_queue("missedmessage_emails", json=True)
by_recipient = defaultdict(list) # type: Dict[int, List[Dict[str, Any]]]
for event in missed_events:
logging.info("Received event: %s" % (event,))
by_recipient[event['user_profile_id']].append(event)
for user_profile_id, events in by_recipient.items():
handle_missedmessage_emails(user_profile_id, events)
reset_queries()
# Aggregate all messages received every 2 minutes to let someone finish sending a batch
# of messages
time.sleep(2 * 60)
@assign_queue('missedmessage_email_senders')
class MissedMessageSendingWorker(QueueProcessingWorker):
def consume(self, data):
# type: (Mapping[str, Any]) -> None
send_missedmessage_email(data)
@assign_queue('missedmessage_mobile_notifications')
class PushNotificationsWorker(QueueProcessingWorker):
def consume(self, data):
# type: (Mapping[str, Any]) -> None
handle_push_notification(data['user_profile_id'], data)
def make_feedback_client():
# type: () -> Any # Should be zulip.Client, but not necessarily importable
sys.path.append(os.path.join(os.path.dirname(__file__), '../../api'))
import zulip
return zulip.Client(
client="ZulipFeedback/0.1",
email=settings.DEPLOYMENT_ROLE_NAME,
api_key=settings.DEPLOYMENT_ROLE_KEY,
verbose=True,
site=settings.FEEDBACK_TARGET)
# We probably could stop running this queue worker at all if ENABLE_FEEDBACK is False
@assign_queue('feedback_messages')
class FeedbackBot(QueueProcessingWorker):
def consume(self, event):
# type: (Mapping[str, Any]) -> None
logging.info("Received feedback from %s" % (event["sender_email"],))
handle_feedback(event)
@assign_queue('error_reports')
class ErrorReporter(QueueProcessingWorker):
def start(self):
# type: () -> None
if settings.DEPLOYMENT_ROLE_KEY:
self.staging_client = make_feedback_client()
self.staging_client._register(
'forward_error',
method='POST',
url='deployments/report_error',
make_request=(lambda type, report: {'type': type, 'report': simplejson.dumps(report)}),
)
QueueProcessingWorker.start(self)
def consume(self, event):
# type: (Mapping[str, Any]) -> None
logging.info("Processing traceback with type %s for %s" % (event['type'], event.get('user_email')))
if settings.DEPLOYMENT_ROLE_KEY:
self.staging_client.forward_error(event['type'], event['report'])
elif settings.ERROR_REPORTING:
do_report_error(event['report']['host'], event['type'], event['report'])
@assign_queue('slow_queries', queue_type="loop")
class SlowQueryWorker(QueueProcessingWorker):
def start(self):
# type: () -> None
while True:
self.process_one_batch()
# Aggregate all slow query messages in 1-minute chunks to avoid message spam
time.sleep(1 * 60)
def process_one_batch(self):
# type: () -> None
slow_queries = self.q.drain_queue("slow_queries", json=True)
if settings.ERROR_BOT is None:
return
if len(slow_queries) > 0:
topic = "%s: slow queries" % (settings.EXTERNAL_HOST,)
content = ""
for query in slow_queries:
content += " %s\n" % (query,)
error_bot_realm = get_user_profile_by_email(settings.ERROR_BOT).realm
internal_send_message(error_bot_realm, settings.ERROR_BOT,
"stream", "logs", topic, content)
reset_queries()
@assign_queue("message_sender")
class MessageSenderWorker(QueueProcessingWorker):
def __init__(self):
# type: () -> None
super(MessageSenderWorker, self).__init__()
self.redis_client = get_redis_client()
self.handler = BaseHandler()
self.handler.load_middleware()
def consume(self, event):
# type: (Mapping[str, Any]) -> None
server_meta = event['server_meta']
environ = {
'REQUEST_METHOD': 'SOCKET',
'SCRIPT_NAME': '',
'PATH_INFO': '/json/messages',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': 9993,
'SERVER_PROTOCOL': 'ZULIP_SOCKET/1.0',
'wsgi.version': (1, 0),
'wsgi.input': StringIO(),
'wsgi.errors': sys.stderr,
'wsgi.multithread': False,
'wsgi.multiprocess': True,
'wsgi.run_once': False,
'zulip.emulated_method': 'POST'
}
if 'socket_user_agent' in event['request']:
environ['HTTP_USER_AGENT'] = event['request']['socket_user_agent']
del event['request']['socket_user_agent']
# We're mostly using a WSGIRequest for convenience
environ.update(server_meta['request_environ'])
request = WSGIRequest(environ)
# Note: If we ever support non-POST methods, we'll need to change this.
request._post = event['request']
request.csrf_processing_done = True
user_profile = get_user_profile_by_id(server_meta['user_id'])
request._cached_user = user_profile
resp = self.handler.get_response(request)
server_meta['time_request_finished'] = time.time()
server_meta['worker_log_data'] = request._log_data
resp_content = resp.content.decode('utf-8')
result = {'response': ujson.loads(resp_content), 'req_id': event['req_id'],
'server_meta': server_meta}
redis_key = req_redis_key(event['req_id'])
self.redis_client.hmset(redis_key, {'status': 'complete',
'response': resp_content})
queue_json_publish(server_meta['return_queue'], result, lambda e: None)
@assign_queue('digest_emails')
class DigestWorker(QueueProcessingWorker):
# Who gets a digest is entirely determined by the enqueue_digest_emails
# management command, not here.
def consume(self, event):
# type: (Mapping[str, Any]) -> None
logging.info("Received digest event: %s" % (event,))
handle_digest_email(event["user_profile_id"], event["cutoff"])
@assign_queue('email_mirror')
class MirrorWorker(QueueProcessingWorker):
# who gets a digest is entirely determined by the enqueue_digest_emails
# management command, not here.
def consume(self, event):
# type: (Mapping[str, Any]) -> None
message = force_str(event["message"])
mirror_email(email.message_from_string(message),
rcpt_to=event["rcpt_to"], pre_checked=True)
@assign_queue('test', queue_type="test")
class TestWorker(QueueProcessingWorker):
# This worker allows you to test the queue worker infrastructure without
# creating significant side effects. It can be useful in development or
# for troubleshooting prod/staging. It pulls a message off the test queue
# and appends it to a file in /tmp.
def consume(self, event):
# type: (Mapping[str, Any]) -> None
fn = settings.ZULIP_WORKER_TEST_FILE
message = ujson.dumps(event)
logging.info("TestWorker should append this message to %s: %s" % (fn, message))
with open(fn, 'a') as f:
f.write(message + '\n')
@assign_queue('embed_links')
class FetchLinksEmbedData(QueueProcessingWorker):
def consume(self, event):
# type: (Mapping[str, Any]) -> None
for url in event['urls']:
url_preview.get_link_embed_data(url)
message = Message.objects.get(id=event['message_id'])
# If the message changed, we will run this task after updating the message
# in zerver.views.messages.update_message_backend
if message.content != event['message_content']:
return
if message.content is not None:
ums = UserMessage.objects.filter(
message=message.id).select_related("user_profile")
message_users = {um.user_profile for um in ums}
# Fetch the realm whose settings we're using for rendering
realm = Realm.objects.get(id=event['message_realm_id'])
# If rendering fails, the called code will raise a JsonableError.
rendered_content = render_incoming_message(
message,
message.content,
message_users,
realm)
do_update_embedded_data(
message.sender, message, message.content, rendered_content)
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Run Regression Test Suite
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts, other
than:
- `-extended`: run the "extended" test suite in addition to the basic one.
- `-win`: signal that this is running in a Windows environment, and we
should run the tests.
- `--coverage`: this generates a basic coverage report for the RPC
interface.
For a description of arguments recognized by test scripts, see
`qa/pull-tester/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
sys.path.append("qa/pull-tester/")
from tests_config import *
BOLD = ("","")
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
RPC_TESTS_DIR = SRCDIR + '/qa/rpc-tests/'
#If imported values are not defined then set to zero (or disabled)
if 'ENABLE_WALLET' not in vars():
ENABLE_WALLET=0
if 'ENABLE_BITCOIND' not in vars():
ENABLE_BITCOIND=0
if 'ENABLE_UTILS' not in vars():
ENABLE_UTILS=0
if 'ENABLE_ZMQ' not in vars():
ENABLE_ZMQ=0
ENABLE_COVERAGE=0
#Create a set to store arguments and create the passon string
opts = set()
passon_args = []
PASSON_REGEX = re.compile("^--")
PARALLEL_REGEX = re.compile('^-parallel=')
print_help = False
run_parallel = 4
for arg in sys.argv[1:]:
if arg == "--help" or arg == "-h" or arg == "-?":
print_help = True
break
if arg == '--coverage':
ENABLE_COVERAGE = 1
elif PASSON_REGEX.match(arg):
passon_args.append(arg)
elif PARALLEL_REGEX.match(arg):
run_parallel = int(arg.split(sep='=', maxsplit=1)[1])
else:
opts.add(arg)
#Set env vars
if "BITCOIND" not in os.environ:
os.environ["BITCOIND"] = BUILDDIR + '/src/bitcoind' + EXEEXT
if EXEEXT == ".exe" and "-win" not in opts:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print("Win tests currently disabled by default. Use -win option to enable")
sys.exit(0)
if not (ENABLE_WALLET == 1 and ENABLE_UTILS == 1 and ENABLE_BITCOIND == 1):
print("No rpc tests to run. Wallet, utils, and bitcoind must all be enabled")
sys.exit(0)
# python3-zmq may not be installed. Handle this gracefully and with some helpful info
if ENABLE_ZMQ:
try:
import zmq
except ImportError:
print("ERROR: \"import zmq\" failed. Set ENABLE_ZMQ=0 or "
"to run zmq tests, see dependency info in /qa/README.md.")
# ENABLE_ZMQ=0
raise
testScripts = [
# longest test should go first, to favor running tests in parallel
'wallet-hd.py',
'walletbackup.py',
# vv Tests less than 5m vv
'p2p-fullblocktest.py',
'fundrawtransaction.py',
'p2p-compactblocks.py',
'segwit.py',
# vv Tests less than 2m vv
'wallet.py',
'wallet-accounts.py',
'p2p-segwit.py',
'wallet-dump.py',
'listtransactions.py',
# vv Tests less than 60s vv
'sendheaders.py',
'zapwallettxes.py',
'importmulti.py',
'mempool_limit.py',
'merkle_blocks.py',
'receivedby.py',
'abandonconflict.py',
'bip68-112-113-p2p.py',
'rawtransactions.py',
'reindex.py',
# vv Tests less than 30s vv
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'httpbasics.py',
'multi_rpc.py',
'proxy_test.py',
'signrawtransactions.py',
'nodehandling.py',
'decodescript.py',
'blockchain.py',
'disablewallet.py',
'keypool.py',
'p2p-mempool.py',
'prioritise_transaction.py',
'invalidblockrequest.py',
'invalidtxrequest.py',
'p2p-versionbits-warning.py',
'preciousblock.py',
'importprunedfunds.py',
'signmessages.py',
'nulldummy.py',
'import-rescan.py',
'bumpfee.py',
'rpcnamedargs.py',
'listsinceblock.py',
'p2p-leaktests.py',
'bip91.py',
'fork-large-block.py',
]
if ENABLE_ZMQ:
testScripts.append('zmq_test.py')
testScriptsExt = [
'pruning.py',
# vv Tests less than 20m vv
'smartfees.py',
# vv Tests less than 5m vv
'maxuploadtarget.py',
'mempool_packages.py',
# vv Tests less than 2m vv
'bip68-sequence.py',
'getblocktemplate_longpoll.py',
'p2p-timeouts.py',
# vv Tests less than 60s vv
'bip9-softforks.py',
'p2p-feefilter.py',
'rpcbind_test.py',
# vv Tests less than 30s vv
'bip65-cltv.py',
'bip65-cltv-p2p.py',
'bipdersig-p2p.py',
'bipdersig.py',
'getblocktemplate_proposals.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
'forknotify.py',
'invalidateblock.py',
'maxblocksinflight.py',
'p2p-acceptblock.py',
'replace-by-fee.py',
]
def runtests():
test_list = []
if '-extended' in opts:
test_list = testScripts + testScriptsExt
elif len(opts) == 0 or (len(opts) == 1 and "-win" in opts):
test_list = testScripts
else:
for t in testScripts + testScriptsExt:
if t in opts or re.sub(".py$", "", t) in opts:
test_list.append(t)
if print_help:
# Only print help of the first script and exit
subprocess.check_call((RPC_TESTS_DIR + test_list[0]).split() + ['-h'])
sys.exit(0)
coverage = None
if ENABLE_COVERAGE:
coverage = RPCCoverage()
print("Initializing coverage directory at %s\n" % coverage.dir)
flags = ["--srcdir=%s/src" % BUILDDIR] + passon_args
flags.append("--cachedir=%s/qa/cache" % BUILDDIR)
if coverage:
flags.append(coverage.flag)
if len(test_list) > 1 and run_parallel > 1:
# Populate cache
subprocess.check_output([RPC_TESTS_DIR + 'create_cache.py'] + flags)
#Run Tests
max_len_name = len(max(test_list, key=len))
time_sum = 0
time0 = time.time()
job_queue = RPCTestHandler(run_parallel, test_list, flags)
results = BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "PASSED", "DURATION") + BOLD[0]
all_passed = True
for _ in range(len(test_list)):
(name, stdout, stderr, passed, duration) = job_queue.get_next()
all_passed = all_passed and passed
time_sum += duration
print('\n' + BOLD[1] + name + BOLD[0] + ":")
print('' if passed else stdout + '\n', end='')
print('' if stderr == '' else 'stderr:\n' + stderr + '\n', end='')
results += "%s | %s | %s s\n" % (name.ljust(max_len_name), str(passed).ljust(6), duration)
print("Pass: %s%s%s, Duration: %s s\n" % (BOLD[1], passed, BOLD[0], duration))
results += BOLD[1] + "\n%s | %s | %s s (accumulated)" % ("ALL".ljust(max_len_name), str(all_passed).ljust(6), time_sum) + BOLD[0]
print(results)
print("\nRuntime: %s s" % (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
print("Cleaning up coverage data")
coverage.cleanup()
sys.exit(not all_passed)
class RPCTestHandler:
"""
Trigger the testscrips passed in via the list.
"""
def __init__(self, num_tests_parallel, test_list=None, flags=None):
assert(num_tests_parallel >= 1)
self.num_jobs = num_tests_parallel
self.test_list = test_list
self.flags = flags
self.num_running = 0
# In case there is a graveyard of zombie bitcoinds, we can apply a
# pseudorandom offset to hopefully jump over them.
# (625 is PORT_RANGE/MAX_NODES)
self.portseed_offset = int(time.time() * 1000) % 625
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
t = self.test_list.pop(0)
port_seed = ["--portseed={}".format(len(self.test_list) + self.portseed_offset)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
self.jobs.append((t,
time.time(),
subprocess.Popen((RPC_TESTS_DIR + t).split() + self.flags + port_seed,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
while True:
# Return first proc that finishes
time.sleep(.5)
for j in self.jobs:
(name, time0, proc, log_out, log_err) = j
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)]
log_out.close(), log_err.close()
passed = stderr == "" and proc.returncode == 0
self.num_running -= 1
self.jobs.remove(j)
return name, stdout, stderr, passed, int(time.time() - time0)
print('.', end='', flush=True)
class RPCCoverage(object):
"""
Coverage reporting utilities for pull-tester.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: qa/rpc-tests/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `qa/rpc-tests/test-framework/coverage.py`
REFERENCE_FILENAME = 'rpc_interface.txt'
COVERAGE_FILE_PREFIX = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, REFERENCE_FILENAME)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(COVERAGE_FILE_PREFIX):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
runtests()
|
|
""" test the label propagation module """
import numpy as np
import pytest
from scipy.sparse import issparse
from sklearn.semi_supervised import _label_propagation as label_propagation
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.model_selection import train_test_split
from sklearn.neighbors import NearestNeighbors
from sklearn.datasets import make_classification
from sklearn.exceptions import ConvergenceWarning
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
ESTIMATORS = [
(label_propagation.LabelPropagation, {"kernel": "rbf"}),
(label_propagation.LabelPropagation, {"kernel": "knn", "n_neighbors": 2}),
(
label_propagation.LabelPropagation,
{"kernel": lambda x, y: rbf_kernel(x, y, gamma=20)},
),
(label_propagation.LabelSpreading, {"kernel": "rbf"}),
(label_propagation.LabelSpreading, {"kernel": "knn", "n_neighbors": 2}),
(
label_propagation.LabelSpreading,
{"kernel": lambda x, y: rbf_kernel(x, y, gamma=20)},
),
]
def test_fit_transduction():
samples = [[1.0, 0.0], [0.0, 2.0], [1.0, 3.0]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert clf.transduction_[2] == 1
def test_distribution():
samples = [[1.0, 0.0], [0.0, 1.0], [1.0, 1.0]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
if parameters["kernel"] == "knn":
continue # unstable test; changes in k-NN ordering break it
assert_array_almost_equal(
clf.predict_proba([[1.0, 0.0]]), np.array([[1.0, 0.0]]), 2
)
else:
assert_array_almost_equal(
np.asarray(clf.label_distributions_[2]), np.array([0.5, 0.5]), 2
)
def test_predict():
samples = [[1.0, 0.0], [0.0, 2.0], [1.0, 3.0]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1]))
def test_predict_proba():
samples = [[1.0, 0.0], [0.0, 1.0], [1.0, 2.5]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_almost_equal(
clf.predict_proba([[1.0, 1.0]]), np.array([[0.5, 0.5]])
)
def test_label_spreading_closed_form():
n_classes = 2
X, y = make_classification(n_classes=n_classes, n_samples=200, random_state=0)
y[::3] = -1
clf = label_propagation.LabelSpreading().fit(X, y)
# adopting notation from Zhou et al (2004):
S = clf._build_graph()
Y = np.zeros((len(y), n_classes + 1))
Y[np.arange(len(y)), y] = 1
Y = Y[:, :-1]
for alpha in [0.1, 0.3, 0.5, 0.7, 0.9]:
expected = np.dot(np.linalg.inv(np.eye(len(S)) - alpha * S), Y)
expected /= expected.sum(axis=1)[:, np.newaxis]
clf = label_propagation.LabelSpreading(max_iter=10000, alpha=alpha)
clf.fit(X, y)
assert_array_almost_equal(expected, clf.label_distributions_, 4)
def test_label_propagation_closed_form():
n_classes = 2
X, y = make_classification(n_classes=n_classes, n_samples=200, random_state=0)
y[::3] = -1
Y = np.zeros((len(y), n_classes + 1))
Y[np.arange(len(y)), y] = 1
unlabelled_idx = Y[:, (-1,)].nonzero()[0]
labelled_idx = (Y[:, (-1,)] == 0).nonzero()[0]
clf = label_propagation.LabelPropagation(max_iter=10000, gamma=0.1)
clf.fit(X, y)
# adopting notation from Zhu et al 2002
T_bar = clf._build_graph()
Tuu = T_bar[tuple(np.meshgrid(unlabelled_idx, unlabelled_idx, indexing="ij"))]
Tul = T_bar[tuple(np.meshgrid(unlabelled_idx, labelled_idx, indexing="ij"))]
Y = Y[:, :-1]
Y_l = Y[labelled_idx, :]
Y_u = np.dot(np.dot(np.linalg.inv(np.eye(Tuu.shape[0]) - Tuu), Tul), Y_l)
expected = Y.copy()
expected[unlabelled_idx, :] = Y_u
expected /= expected.sum(axis=1)[:, np.newaxis]
assert_array_almost_equal(expected, clf.label_distributions_, 4)
def test_valid_alpha():
n_classes = 2
X, y = make_classification(n_classes=n_classes, n_samples=200, random_state=0)
for alpha in [-0.1, 0, 1, 1.1, None]:
with pytest.raises(ValueError):
label_propagation.LabelSpreading(alpha=alpha).fit(X, y)
def test_convergence_speed():
# This is a non-regression test for #5774
X = np.array([[1.0, 0.0], [0.0, 1.0], [1.0, 2.5]])
y = np.array([0, 1, -1])
mdl = label_propagation.LabelSpreading(kernel="rbf", max_iter=5000)
mdl.fit(X, y)
# this should converge quickly:
assert mdl.n_iter_ < 10
assert_array_equal(mdl.predict(X), [0, 1, 1])
def test_convergence_warning():
# This is a non-regression test for #5774
X = np.array([[1.0, 0.0], [0.0, 1.0], [1.0, 2.5]])
y = np.array([0, 1, -1])
mdl = label_propagation.LabelSpreading(kernel="rbf", max_iter=1)
warn_msg = "max_iter=1 was reached without convergence."
with pytest.warns(ConvergenceWarning, match=warn_msg):
mdl.fit(X, y)
assert mdl.n_iter_ == mdl.max_iter
mdl = label_propagation.LabelPropagation(kernel="rbf", max_iter=1)
with pytest.warns(ConvergenceWarning, match=warn_msg):
mdl.fit(X, y)
assert mdl.n_iter_ == mdl.max_iter
mdl = label_propagation.LabelSpreading(kernel="rbf", max_iter=500)
with pytest.warns(None) as record:
mdl.fit(X, y)
assert len(record) == 0
mdl = label_propagation.LabelPropagation(kernel="rbf", max_iter=500)
with pytest.warns(None) as record:
mdl.fit(X, y)
assert len(record) == 0
@pytest.mark.parametrize(
"LabelPropagationCls",
[label_propagation.LabelSpreading, label_propagation.LabelPropagation],
)
def test_label_propagation_non_zero_normalizer(LabelPropagationCls):
# check that we don't divide by zero in case of null normalizer
# non-regression test for
# https://github.com/scikit-learn/scikit-learn/pull/15946
# https://github.com/scikit-learn/scikit-learn/issues/9292
X = np.array([[100.0, 100.0], [100.0, 100.0], [0.0, 0.0], [0.0, 0.0]])
y = np.array([0, 1, -1, -1])
mdl = LabelPropagationCls(kernel="knn", max_iter=100, n_neighbors=1)
with pytest.warns(None) as record:
mdl.fit(X, y)
assert len(record) == 0
def test_predict_sparse_callable_kernel():
# This is a non-regression test for #15866
# Custom sparse kernel (top-K RBF)
def topk_rbf(X, Y=None, n_neighbors=10, gamma=1e-5):
nn = NearestNeighbors(n_neighbors=10, metric="euclidean", n_jobs=2)
nn.fit(X)
W = -1 * nn.kneighbors_graph(Y, mode="distance").power(2) * gamma
np.exp(W.data, out=W.data)
assert issparse(W)
return W.T
n_classes = 4
n_samples = 500
n_test = 10
X, y = make_classification(
n_classes=n_classes,
n_samples=n_samples,
n_features=20,
n_informative=20,
n_redundant=0,
n_repeated=0,
random_state=0,
)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=n_test, random_state=0
)
model = label_propagation.LabelSpreading(kernel=topk_rbf)
model.fit(X_train, y_train)
assert model.score(X_test, y_test) >= 0.9
model = label_propagation.LabelPropagation(kernel=topk_rbf)
model.fit(X_train, y_train)
assert model.score(X_test, y_test) >= 0.9
|
|
import os
import sys
import pytest
from tests.lib.path import Path
COMPLETION_FOR_SUPPORTED_SHELLS_TESTS = (
('bash', """\
_pip_completion()
{
COMPREPLY=( $( COMP_WORDS="${COMP_WORDS[*]}" \\
COMP_CWORD=$COMP_CWORD \\
PIP_AUTO_COMPLETE=1 $1 2>/dev/null ) )
}
complete -o default -F _pip_completion pip"""),
('fish', """\
function __fish_complete_pip
set -lx COMP_WORDS (commandline -o) ""
set -lx COMP_CWORD ( \\
math (contains -i -- (commandline -t) $COMP_WORDS)-1 \\
)
set -lx PIP_AUTO_COMPLETE 1
string split \\ -- (eval $COMP_WORDS[1])
end
complete -fa "(__fish_complete_pip)" -c pip"""),
('zsh', """\
function _pip_completion {
local words cword
read -Ac words
read -cn cword
reply=( $( COMP_WORDS="$words[*]" \\
COMP_CWORD=$(( cword-1 )) \\
PIP_AUTO_COMPLETE=1 $words[1] 2>/dev/null ))
}
compctl -K _pip_completion pip"""),
)
@pytest.fixture(scope="session")
def script_with_launchers(
tmpdir_factory, script_factory, common_wheels, pip_src
):
tmpdir = Path(str(tmpdir_factory.mktemp("script_with_launchers")))
script = script_factory(tmpdir.joinpath("workspace"))
# Re-install pip so we get the launchers.
script.pip_install_local('-f', common_wheels, pip_src)
return script
@pytest.mark.parametrize(
'shell, completion',
COMPLETION_FOR_SUPPORTED_SHELLS_TESTS,
ids=[t[0] for t in COMPLETION_FOR_SUPPORTED_SHELLS_TESTS],
)
def test_completion_for_supported_shells(
script_with_launchers, shell, completion
):
"""
Test getting completion for bash shell
"""
result = script_with_launchers.pip(
'completion', '--' + shell, use_module=False
)
assert completion in result.stdout, str(result.stdout)
@pytest.fixture(scope="session")
def autocomplete_script(tmpdir_factory, script_factory):
tmpdir = Path(str(tmpdir_factory.mktemp("autocomplete_script")))
return script_factory(tmpdir.joinpath("workspace"))
@pytest.fixture
def autocomplete(autocomplete_script, monkeypatch):
monkeypatch.setattr(autocomplete_script, 'environ', os.environ.copy())
autocomplete_script.environ['PIP_AUTO_COMPLETE'] = '1'
def do_autocomplete(words, cword, cwd=None):
autocomplete_script.environ['COMP_WORDS'] = words
autocomplete_script.environ['COMP_CWORD'] = cword
result = autocomplete_script.run(
'python', '-c',
'from pip._internal.cli.autocompletion import autocomplete;'
'autocomplete()',
expect_error=True,
cwd=cwd,
)
return result, autocomplete_script
return do_autocomplete
def test_completion_for_unknown_shell(autocomplete_script):
"""
Test getting completion for an unknown shell
"""
error_msg = 'no such option: --myfooshell'
result = autocomplete_script.pip(
'completion', '--myfooshell', expect_error=True
)
assert error_msg in result.stderr, 'tests for an unknown shell failed'
def test_completion_alone(autocomplete_script):
"""
Test getting completion for none shell, just pip completion
"""
result = autocomplete_script.pip('completion', allow_stderr_error=True)
assert 'ERROR: You must pass --bash or --fish or --zsh' in result.stderr, \
'completion alone failed -- ' + result.stderr
def test_completion_for_un_snippet(autocomplete):
"""
Test getting completion for ``un`` should return uninstall
"""
res, env = autocomplete('pip un', '1')
assert res.stdout.strip().split() == ['uninstall'], res.stdout
def test_completion_for_default_parameters(autocomplete):
"""
Test getting completion for ``--`` should contain --help
"""
res, env = autocomplete('pip --', '1')
assert '--help' in res.stdout,\
"autocomplete function could not complete ``--``"
def test_completion_option_for_command(autocomplete):
"""
Test getting completion for ``--`` in command (e.g. ``pip search --``)
"""
res, env = autocomplete('pip search --', '2')
assert '--help' in res.stdout,\
"autocomplete function could not complete ``--``"
def test_completion_short_option(autocomplete):
"""
Test getting completion for short options after ``-`` (eg. pip -)
"""
res, env = autocomplete('pip -', '1')
assert '-h' in res.stdout.split(),\
"autocomplete function could not complete short options after ``-``"
def test_completion_short_option_for_command(autocomplete):
"""
Test getting completion for short options after ``-`` in command
(eg. pip search -)
"""
res, env = autocomplete('pip search -', '2')
assert '-h' in res.stdout.split(),\
"autocomplete function could not complete short options after ``-``"
def test_completion_files_after_option(autocomplete, data):
"""
Test getting completion for <file> or <dir> after options in command
(e.g. ``pip install -r``)
"""
res, env = autocomplete(
words=('pip install -r r'),
cword='3',
cwd=data.completion_paths,
)
assert 'requirements.txt' in res.stdout, (
"autocomplete function could not complete <file> "
"after options in command"
)
assert os.path.join('resources', '') in res.stdout, (
"autocomplete function could not complete <dir> "
"after options in command"
)
assert not any(out in res.stdout for out in
(os.path.join('REPLAY', ''), 'README.txt')), (
"autocomplete function completed <file> or <dir> that "
"should not be completed"
)
if sys.platform != 'win32':
return
assert 'readme.txt' in res.stdout, (
"autocomplete function could not complete <file> "
"after options in command"
)
assert os.path.join('replay', '') in res.stdout, (
"autocomplete function could not complete <dir> "
"after options in command"
)
def test_completion_not_files_after_option(autocomplete, data):
"""
Test not getting completion files after options which not applicable
(e.g. ``pip install``)
"""
res, env = autocomplete(
words=('pip install r'),
cword='2',
cwd=data.completion_paths,
)
assert not any(out in res.stdout for out in
('requirements.txt', 'readme.txt',)), (
"autocomplete function completed <file> when "
"it should not complete"
)
assert not any(os.path.join(out, '') in res.stdout
for out in ('replay', 'resources')), (
"autocomplete function completed <dir> when "
"it should not complete"
)
@pytest.mark.parametrize("cl_opts", ["-U", "--user", "-h"])
def test_completion_not_files_after_nonexpecting_option(
autocomplete, data, cl_opts
):
"""
Test not getting completion files after options which not applicable
(e.g. ``pip install``)
"""
res, env = autocomplete(
words=('pip install %s r' % cl_opts),
cword='2',
cwd=data.completion_paths,
)
assert not any(out in res.stdout for out in
('requirements.txt', 'readme.txt',)), (
"autocomplete function completed <file> when "
"it should not complete"
)
assert not any(os.path.join(out, '') in res.stdout
for out in ('replay', 'resources')), (
"autocomplete function completed <dir> when "
"it should not complete"
)
def test_completion_directories_after_option(autocomplete, data):
"""
Test getting completion <dir> after options in command
(e.g. ``pip --cache-dir``)
"""
res, env = autocomplete(
words=('pip --cache-dir r'),
cword='2',
cwd=data.completion_paths,
)
assert os.path.join('resources', '') in res.stdout, (
"autocomplete function could not complete <dir> after options"
)
assert not any(out in res.stdout for out in (
'requirements.txt', 'README.txt', os.path.join('REPLAY', ''))), (
"autocomplete function completed <dir> when "
"it should not complete"
)
if sys.platform == 'win32':
assert os.path.join('replay', '') in res.stdout, (
"autocomplete function could not complete <dir> after options"
)
def test_completion_subdirectories_after_option(autocomplete, data):
"""
Test getting completion <dir> after options in command
given path of a directory
"""
res, env = autocomplete(
words=('pip --cache-dir ' + os.path.join('resources', '')),
cword='2',
cwd=data.completion_paths,
)
assert os.path.join('resources',
os.path.join('images', '')) in res.stdout, (
"autocomplete function could not complete <dir> "
"given path of a directory after options"
)
def test_completion_path_after_option(autocomplete, data):
"""
Test getting completion <path> after options in command
given absolute path
"""
res, env = autocomplete(
words=('pip install -e ' + os.path.join(data.completion_paths, 'R')),
cword='3',
)
assert all(os.path.normcase(os.path.join(data.completion_paths, out))
in res.stdout for out in (
'README.txt', os.path.join('REPLAY', ''))), (
"autocomplete function could not complete <path> "
"after options in command given absolute path"
)
@pytest.mark.parametrize('flag', ['--bash', '--zsh', '--fish'])
def test_completion_uses_same_executable_name(
autocomplete_script, flag, deprecated_python
):
executable_name = 'pip{}'.format(sys.version_info[0])
# Deprecated python versions produce an extra deprecation warning
result = autocomplete_script.run(
executable_name, 'completion', flag, expect_stderr=deprecated_python,
)
assert executable_name in result.stdout
|
|
"""
CSCI-603: Lab 3(week 3)
Section 03
Author: Pavan Prabahakar Bhat (pxb8715@rit.edu)
Vinayak Marali (vkm7895@rit.edu)
This is a program is to draw a tree using recursion.
"""
#IMPORTS CALLED HERE
import turtle
import random
#validation for recursion depth
#flag = True
#while(flag):
recursionDepth = input("Please enter a value for the depth of recursion ")
if(recursionDepth.isnumeric()):
# A COPY OF THE RECURSION DEPTH
recursionDepth = int(recursionDepth)
layers = recursionDepth
# flag = False
else:
print("Please enter a valid integer number above zero!")
# will exit from here as all the validations have failed
exit()
BRANCH_ANGLE = random.randint(20, 40)# causes right angle between two branches
#validation for overallSize value
# flag1 = True
# while(flag1):
overallSize = input("Please enter the value of the overall expected height of the tree ")
if(overallSize.isnumeric() and int(overallSize) > 0):
overallSize = int(overallSize)
# flag1 = False
else:
print("Please enter a valid positive integer!")
exit()
# as the overallSize has to be approximately equal to the exact height
size = overallSize/2 # used as the size of trunk for the tree
# a copy of the size of trunk
trunkLength = size
#validation for bushiness value
bushiness = input("Please enter a floating point number between 0 and 1 to determine the bushiness of the tree: ")
dotCount = 0 #Count of the number of decimal point
numCount = 0 #Count of the numbers in the input string
notNum = 0 #Count of the non numeric characters in the input string
numPos = 0 #Index of last number in the input string
decimalPos = 0 #Index of decimal point in the input string
for inputCharacter in bushiness:
if bushiness[:2] == "0." or bushiness[:2] == "1.":
if inputCharacter >= "0" and inputCharacter <= "9":
numCount = numCount +1
numPos = bushiness.index(inputCharacter)
elif inputCharacter == ".":
dotCount = dotCount + 1
decimalPos = bushiness.index(inputCharacter)
else:
notNum = notNum + 1
if notNum > 0:
print("Enter a valid float number between 0 to 1 eg: 0.5")
exit()
elif dotCount == 1 and numPos >= decimalPos :
if numCount == len(bushiness) -1:
bushiness = float(bushiness)
elif dotCount == 0 and numPos >= decimalPos:
if numCount == len(bushiness):
bushiness = float(bushiness)
else:
print("Enter a valid float number between 0 to 1 eg: 0.5")
exit()
if(bushiness):
# randomly generates the number of sub-branches depending on the bushiness value
noOfSegments = random.randint(1, int(10*bushiness))
else:
noOfSegments = 0
#validation for leafiness value
leafiness = input("Please enter a floating point number between 0 and 1 to determine the leafiness of the tree: ")
dotCount = 0 #Count of the number of decimal point
numCount = 0 #Count of the numbers in the input string
notNum = 0 #Count of the non numeric characters in the input string
numPos = 0 #Index of last number in the input string
decimalPos = 0 #Index of decimal point in the input string
for inputCharacter in leafiness:
if leafiness[:2] == "0." or leafiness[:2]=="1.":
if inputCharacter >="0" and inputCharacter <="9":
numCount = numCount +1
numPos = leafiness.index(inputCharacter)
elif inputCharacter==".":
dotCount = dotCount + 1
decimalPos = leafiness.index(inputCharacter)
else:
notNum = notNum + 1
if notNum > 0:
print("Enter a valid float number between 0 to 1 eg: 0.5")
exit()
elif dotCount == 1 and numPos>=decimalPos :
if numCount == len(leafiness) -1:
leafiness = float(leafiness)
elif dotCount == 0 and numPos>=decimalPos:
if numCount == len(leafiness):
leafiness = float(leafiness)
else:
print("Enter a valid float number between 0 to 1 eg: 0.5")
exit()
if(leafiness):
# randomly generates the number of leaves depending on the leafiness value
noOfLeafs = random.randint(1, int(50*leafiness))
else:
noOfLeafs = 0
def drawTree( aTurtle, recursionDepth, size ):
""" Recursively draw a tree.
:param aTurtle: the turtle to be used to do the drawing
:param size: positive integer
length of tree trunk
:pre: turtle is at base of tree,
turtle is facing along trunk of tree,
turtle is pen-down.
:post: turtle is at base of tree,
turtle is facing along trunk of tree,
turtle is pen-down.
"""
aTurtle.pendown()
if recursionDepth == 0:
# base case: draws the leaves on the tree
leafLength = trunkLength / 10
for _ in range(noOfLeafs):
aTurtle.color("Green")
aTurtle.fd(leafLength)
aTurtle.backward(leafLength)
aTurtle.rt(3)
aTurtle.color("Black")
aTurtle.lt(3*noOfLeafs) # required to shift an angle at a relative initial position on the final twig
elif(recursionDepth < layers):
# recursive case: draws the trunk and the sub-branches
aTurtle.forward( size )
aTurtle.left( BRANCH_ANGLE )
for _ in range(noOfSegments):
segmentPosition = random.randint(0,round(size/2)) # chooses a random position to place the sub-branch
aTurtle.fd(segmentPosition)
direction = random.randint(0,1) # randomizes the direction in which the sub-branch is drawn
if(direction == 0):
aTurtle.lt(BRANCH_ANGLE)
else:
aTurtle.rt(BRANCH_ANGLE)
aTurtle.fd(size/4) # allocates the size to the sub-branch drawn which is varying
aTurtle.backward( size/4 )
# required to come back to a relative position from where the branch began
if(direction == 0):
aTurtle.rt(BRANCH_ANGLE)
else:
aTurtle.lt(BRANCH_ANGLE)
aTurtle.backward(segmentPosition)
# required for recursing on the right side
drawTree( aTurtle, recursionDepth - 1, size / 2 )
aTurtle.right( 2*BRANCH_ANGLE )
for _ in range(noOfSegments):
segmentPosition = random.randint(0,round(size/2))
aTurtle.fd(segmentPosition)
direction = random.randint(0,1)
if(direction == 0):
aTurtle.lt(BRANCH_ANGLE)
else:
aTurtle.rt(BRANCH_ANGLE)
aTurtle.fd(size/4)
aTurtle.backward( size/4 )
if(direction == 0):
aTurtle.rt(BRANCH_ANGLE)
else:
aTurtle.lt(BRANCH_ANGLE)
aTurtle.backward(segmentPosition)
drawTree( aTurtle, recursionDepth - 1, size / 2 )
aTurtle.left( BRANCH_ANGLE )
aTurtle.backward( size )
elif(recursionDepth == layers):
# recursive case: Draws the trunk and the sub-branches
aTurtle.forward( size )
# generates a random number of branches between 1 and 4
randomBranch = random.randint(1,4)
#required to generate random branch angles
randomFactor = random.randint(1,2)
aTurtle.rt(2 * BRANCH_ANGLE)
while randomBranch > 0:
aTurtle.left( BRANCH_ANGLE )
drawTree( aTurtle, recursionDepth - 1, size / 2 )
aTurtle.right(randomFactor * BRANCH_ANGLE )
drawTree( aTurtle, recursionDepth - 1, size / 2 )
aTurtle.left( BRANCH_ANGLE )
randomBranch = randomBranch -1
aTurtle.setheading(90)
aTurtle.backward( trunkLength )
MARGIN = 2 # Space at edges of canvas
PEN_SIZE = 2 # Thickness of turtle's pen
def initWorld( size ):
""" Initialize the drawing area.
:param size: integer
length of tree trunk to draw
(not currently used)
:pre: size > 0
:post: coordinate system goes from
(-2*size, -2*size) at lower-left
to (2*size, 2*size) at upper-right.
"""
turtle.setup( 600, 600 )
# The lines below are removed because they keep one from
# seeing the difference that the size parameter makes
# in the perceived size of the tree.
#
turtle.setworldcoordinates( -2*size - MARGIN, -2*size - MARGIN, \
2*size + MARGIN, 2*size + MARGIN)
def initTurtle( aTurtle ):
""" Set up the turtle by establishing the drawTree
function's pre-conditions.
:post: aTurtle is at origin ( center ),
aTurtle is facing North,
aTurtle's pen is down, size PEN_SIZE, aTurtle's speed is set to 0
"""
aTurtle.home() # turtle is at origin, facing east, pen-down
aTurtle.left( 90 ) # turtle is facing North
aTurtle.down() # turtle's pen is put down
aTurtle.pensize( PEN_SIZE )
aTurtle.speed(0)
def main():
""" Print a message, initialize the world,
draw an instance of the recursive tree, and wait for ENTER
t: turtle
recursionDepth: depth of recursion
size: positive integer
length of tree trunk
"""
print("Drawing recursive tree with", (recursionDepth, size))
initWorld( size )
t = turtle.Turtle()
initTurtle( t )
drawTree( t, recursionDepth, size )
input("Hit enter to exit...")
# calls the main function
if __name__ == '__main__':
main()
|
|
from core.himesis import Himesis
import uuid
class HState2HProcDef(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the DSLTrans rule State2HProcDef.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HState2HProcDef, self).__init__(name='HState2HProcDef', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """State2HProcDef"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'State2HProcDef')
# match model. We only support one match model
self.add_node()
self.vs[0]["mm__"] = """MatchModel"""
# apply model node
self.add_node()
self.vs[1]["mm__"] = """ApplyModel"""
# paired with relation between match and apply models
self.add_node()
self.vs[2]["mm__"] = """paired_with"""
self.vs[2]["attr1"] = """State2HProcDef"""
# match class State() node
self.add_node()
self.vs[3]["mm__"] = """State"""
self.vs[3]["attr1"] = """+"""
# apply class LocalDef() node
self.add_node()
self.vs[4]["mm__"] = """LocalDef"""
self.vs[4]["attr1"] = """1"""
# apply class ProcDef() node
self.add_node()
self.vs[5]["mm__"] = """ProcDef"""
self.vs[5]["attr1"] = """1"""
# apply class Name() node
self.add_node()
self.vs[6]["mm__"] = """Name"""
self.vs[6]["attr1"] = """1"""
# apply class Name() node
self.add_node()
self.vs[7]["mm__"] = """Name"""
self.vs[7]["attr1"] = """1"""
# apply class Name() node
self.add_node()
self.vs[8]["mm__"] = """Name"""
self.vs[8]["attr1"] = """1"""
# apply class Listen() node
self.add_node()
self.vs[9]["mm__"] = """Listen"""
self.vs[9]["attr1"] = """1"""
# apply class ListenBranch() node
self.add_node()
self.vs[10]["mm__"] = """ListenBranch"""
self.vs[10]["attr1"] = """1"""
# apply class Null() node
self.add_node()
self.vs[11]["mm__"] = """Null"""
self.vs[11]["attr1"] = """1"""
# apply class ListenBranch() node
self.add_node()
self.vs[12]["mm__"] = """ListenBranch"""
self.vs[12]["attr1"] = """1"""
# apply class Seq() node
self.add_node()
self.vs[13]["mm__"] = """Seq"""
self.vs[13]["attr1"] = """1"""
# apply class Trigger() node
self.add_node()
self.vs[14]["mm__"] = """Trigger"""
self.vs[14]["attr1"] = """1"""
# apply class Listen() node
self.add_node()
self.vs[15]["mm__"] = """Listen"""
self.vs[15]["attr1"] = """1"""
# apply class ListenBranch() node
self.add_node()
self.vs[16]["mm__"] = """ListenBranch"""
self.vs[16]["attr1"] = """1"""
# apply class Trigger() node
self.add_node()
self.vs[17]["mm__"] = """Trigger"""
self.vs[17]["attr1"] = """1"""
# apply association LocalDef--def-->ProcDef node
self.add_node()
self.vs[18]["attr1"] = """def"""
self.vs[18]["mm__"] = """directLink_T"""
# apply association ProcDef--channelNames-->Name node
self.add_node()
self.vs[19]["attr1"] = """channelNames"""
self.vs[19]["mm__"] = """directLink_T"""
# apply association ProcDef--channelNames-->Name node
self.add_node()
self.vs[20]["attr1"] = """channelNames"""
self.vs[20]["mm__"] = """directLink_T"""
# apply association ProcDef--channelNames-->Name node
self.add_node()
self.vs[21]["attr1"] = """channelNames"""
self.vs[21]["mm__"] = """directLink_T"""
# apply association ProcDef--p-->Listen node
self.add_node()
self.vs[22]["attr1"] = """p"""
self.vs[22]["mm__"] = """directLink_T"""
# apply association Listen--branches-->ListenBranch node
self.add_node()
self.vs[23]["attr1"] = """branches"""
self.vs[23]["mm__"] = """directLink_T"""
# apply association ListenBranch--p-->Null node
self.add_node()
self.vs[24]["attr1"] = """p"""
self.vs[24]["mm__"] = """directLink_T"""
# apply association Listen--branches-->ListenBranch node
self.add_node()
self.vs[25]["attr1"] = """branches"""
self.vs[25]["mm__"] = """directLink_T"""
# apply association ListenBranch--p-->Seq node
self.add_node()
self.vs[26]["attr1"] = """p"""
self.vs[26]["mm__"] = """directLink_T"""
# apply association Seq--p-->Trigger node
self.add_node()
self.vs[27]["attr1"] = """p"""
self.vs[27]["mm__"] = """directLink_T"""
# apply association Seq--p-->Listen node
self.add_node()
self.vs[28]["attr1"] = """p"""
self.vs[28]["mm__"] = """directLink_T"""
# apply association Listen--branches-->ListenBranch node
self.add_node()
self.vs[29]["attr1"] = """branches"""
self.vs[29]["mm__"] = """directLink_T"""
# apply association ListenBranch--p-->Trigger node
self.add_node()
self.vs[30]["attr1"] = """p"""
self.vs[30]["mm__"] = """directLink_T"""
# backward association State---->LocalDef node
self.add_node()
self.vs[31]["mm__"] = """backward_link"""
# Add the edges
self.add_edges([
(0,3), # matchmodel -> match_class State()
(1,4), # applymodel -> -> apply_class LocalDef()
(1,5), # applymodel -> -> apply_class ProcDef()
(1,6), # applymodel -> -> apply_class Name()
(1,7), # applymodel -> -> apply_class Name()
(1,8), # applymodel -> -> apply_class Name()
(1,9), # applymodel -> -> apply_class Listen()
(1,10), # applymodel -> -> apply_class ListenBranch()
(1,11), # applymodel -> -> apply_class Null()
(1,12), # applymodel -> -> apply_class ListenBranch()
(1,13), # applymodel -> -> apply_class Seq()
(1,14), # applymodel -> -> apply_class Trigger()
(1,15), # applymodel -> -> apply_class Listen()
(1,16), # applymodel -> -> apply_class ListenBranch()
(1,17), # applymodel -> -> apply_class Trigger()
(4,18), # apply_class LocalDef() -> association def
(18,5), # association def -> apply_class ProcDef()
(5,19), # apply_class ProcDef() -> association channelNames
(19,6), # association channelNames -> apply_class Name()
(5,20), # apply_class ProcDef() -> association channelNames
(20,7), # association channelNames -> apply_class Name()
(5,21), # apply_class ProcDef() -> association channelNames
(21,8), # association channelNames -> apply_class Name()
(5,22), # apply_class ProcDef() -> association p
(22,9), # association p -> apply_class Listen()
(9,23), # apply_class Listen() -> association branches
(23,10), # association branches -> apply_class ListenBranch()
(10,24), # apply_class ListenBranch() -> association p
(24,11), # association p -> apply_class Null()
(9,25), # apply_class Listen() -> association branches
(25,12), # association branches -> apply_class ListenBranch()
(12,26), # apply_class ListenBranch() -> association p
(26,13), # association p -> apply_class Seq()
(13,27), # apply_class Seq() -> association p
(27,14), # association p -> apply_class Trigger()
(13,28), # apply_class Seq() -> association p
(28,15), # association p -> apply_class Listen()
(15,29), # apply_class Listen() -> association branches
(29,16), # association branches -> apply_class ListenBranch()
(16,30), # apply_class ListenBranch() -> association p
(30,17), # association p -> apply_class Trigger()
(4,31), # apply_class LocalDef() -> backward_association
(31,3), # backward_association -> apply_class State()
(0,2), # matchmodel -> pairedwith
(2,1) # pairedwith -> applyModel
])
# Add the attribute equations
self["equations"] = [((3,'isComposite'),('constant','true')), ((5,'name'),('constant','H')), ((6,'literal'),('constant','exit_in')), ((7,'literal'),('constant','exack_in')), ((8,'literal'),('constant','sh_in')), ((10,'channel'),('constant','sh_in')), ((12,'channel'),('constant','exit')), ((14,'channel'),('constant','exit_in')), ((16,'channel'),('constant','exack_in')), ((17,'channel'),('constant','exack')), ]
|
|
# -*- coding: utf-8 -*-
"""
eve.methods.put
~~~~~~~~~~~~~~~
This module imlements the PUT method.
:copyright: (c) 2015 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
from werkzeug import exceptions
from datetime import datetime
from eve.auth import requires_auth
from eve.defaults import resolve_default_values
from eve.validation import ValidationError
from flask import current_app as app, abort
from eve.utils import config, debug_error_message, parse_request
from eve.methods.common import get_document, parse, payload as payload_, \
ratelimit, pre_event, store_media_files, resolve_user_restricted_access, \
resolve_embedded_fields, build_response_document, marshal_write_response, \
resolve_sub_resource_path, resolve_document_etag, oplog_push
from eve.versioning import resolve_document_version, \
insert_versioning_documents, late_versioning_catch
@ratelimit()
@requires_auth('item')
@pre_event
def put(resource, payload=None, **lookup):
"""
Default function for handling PUT requests, it has decorators for
rate limiting, authentication and for raising pre-request events.
After the decorators are applied forwards to call to :func:`put_internal`
.. versionchanged:: 0.5
Split into put() and put_internal().
"""
return put_internal(resource, payload, concurrency_check=True,
skip_validation=False, **lookup)
def put_internal(resource, payload=None, concurrency_check=False,
skip_validation=False, **lookup):
""" Intended for internal put calls, this method is not rate limited,
authentication is not checked, pre-request events are not raised, and
concurrency checking is optional. Performs a document replacement.
Updates are first validated against the resource schema. If validation
passes, the document is repalced and an OK status update is returned.
If validation fails a set of validation issues is returned.
:param resource: the name of the resource to which the document belongs.
:param payload: alternative payload. When calling put() from your own code
you can provide an alternative payload. This can be useful,
for example, when you have a callback function hooked to a
certain endpoint, and want to perform additional put()
callsfrom there.
Please be advised that in order to successfully use this
option, a request context must be available.
:param concurrency_check: concurrency check switch (bool)
:param skip_validation: skip payload validation before write (bool)
:param **lookup: document lookup query.
.. versionchanged:: 0.6
Allow restoring soft deleted documents via PUT
.. versionchanged:: 0.5
Back to resolving default values after validaton as now the validator
can properly validate dependency even when some have default values. See
#353.
Original put() has been split into put() and put_internal().
You can now pass a pre-defined custom payload to the funcion.
ETAG is now stored with the document (#369).
Catching all HTTPExceptions and returning them to the caller, allowing
for eventual flask.abort() invocations in callback functions to go
through. Fixes #395.
.. versionchanged:: 0.4
Allow abort() to be inoked by callback functions.
Resolve default values before validation is performed. See #353.
Raise 'on_replace' instead of 'on_insert'. The callback function gets
the document (as opposed to a list of just 1 document) as an argument.
Support for document versioning.
Raise `on_replaced` after the document has been replaced
.. versionchanged:: 0.3
Support for media fields.
When IF_MATCH is disabled, no etag is included in the payload.
Support for new validation format introduced with Cerberus v0.5.
.. versionchanged:: 0.2
Use the new STATUS setting.
Use the new ISSUES setting.
Raise pre_<method> event.
explictly resolve default values instead of letting them be resolved
by common.parse. This avoids a validation error when a read-only field
also has a default value.
.. versionchanged:: 0.1.1
auth.request_auth_value is now used to store the auth_field value.
Item-identifier wrapper stripped from both request and response payload.
.. versionadded:: 0.1.0
"""
resource_def = app.config['DOMAIN'][resource]
schema = resource_def['schema']
if not skip_validation:
validator = app.validator(schema, resource)
if payload is None:
payload = payload_()
original = get_document(resource, concurrency_check, **lookup)
if not original:
# not found
abort(404)
last_modified = None
etag = None
issues = {}
object_id = original[config.ID_FIELD]
response = {}
if config.BANDWIDTH_SAVER is True:
embedded_fields = []
else:
req = parse_request(resource)
embedded_fields = resolve_embedded_fields(resource, req)
try:
document = parse(payload, resource)
resolve_sub_resource_path(document, resource)
if skip_validation:
validation = True
else:
validation = validator.validate_replace(document, object_id,
original)
if validation:
# sneak in a shadow copy if it wasn't already there
late_versioning_catch(original, resource)
# update meta
last_modified = datetime.utcnow().replace(microsecond=0)
document[config.LAST_UPDATED] = last_modified
document[config.DATE_CREATED] = original[config.DATE_CREATED]
if resource_def['soft_delete'] is True:
# PUT with soft delete enabled should always set the DELETED
# field to False. We are either carrying through un-deleted
# status, or restoring a soft deleted document
document[config.DELETED] = False
# ID_FIELD not in document means it is not being automatically
# handled (it has been set to a field which exists in the
# resource schema.
if config.ID_FIELD not in document:
document[config.ID_FIELD] = object_id
resolve_user_restricted_access(document, resource)
resolve_default_values(document, resource_def['defaults'])
store_media_files(document, resource, original)
resolve_document_version(document, resource, 'PUT', original)
# notify callbacks
getattr(app, "on_replace")(resource, document, original)
getattr(app, "on_replace_%s" % resource)(document, original)
resolve_document_etag(document, resource)
# write to db
try:
app.data.replace(
resource, object_id, document, original)
except app.data.OriginalChangedError:
if concurrency_check:
abort(412, description=debug_error_message(
'Client and server etags don\'t match'
))
# update oplog if needed
oplog_push(resource, document, 'PUT')
insert_versioning_documents(resource, document)
# notify callbacks
getattr(app, "on_replaced")(resource, document, original)
getattr(app, "on_replaced_%s" % resource)(document, original)
# build the full response document
build_response_document(
document, resource, embedded_fields, document)
response = document
if config.IF_MATCH:
etag = response[config.ETAG]
else:
issues = validator.errors
except ValidationError as e:
# TODO should probably log the error and abort 400 instead (when we
# got logging)
issues['validator exception'] = str(e)
except exceptions.HTTPException as e:
raise e
except Exception as e:
# consider all other exceptions as Bad Requests
abort(400, description=debug_error_message(
'An exception occurred: %s' % e
))
if len(issues):
response[config.ISSUES] = issues
response[config.STATUS] = config.STATUS_ERR
status = config.VALIDATION_ERROR_STATUS
else:
response[config.STATUS] = config.STATUS_OK
status = 200
# limit what actually gets sent to minimize bandwidth usage
response = marshal_write_response(response, resource)
return response, last_modified, etag, status
|
|
#!/usr/bin/env vpython3
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import shutil
import sys
import tempfile
import unittest
# The following non-std imports are fetched via vpython. See the list at
# //.vpython
import mock # pylint: disable=import-error
from parameterized import parameterized # pylint: disable=import-error
import six
import test_runner
_TAST_TEST_RESULTS_JSON = {
"name": "ui.ChromeLogin",
"errors": None,
"start": "2020-01-01T15:41:30.799228462-08:00",
"end": "2020-01-01T15:41:53.318914698-08:00",
"skipReason": ""
}
class TestRunnerTest(unittest.TestCase):
def setUp(self):
self._tmp_dir = tempfile.mkdtemp()
self.mock_rdb = mock.patch.object(
test_runner.result_sink, 'TryInitClient', return_value=None)
self.mock_rdb.start()
def tearDown(self):
shutil.rmtree(self._tmp_dir, ignore_errors=True)
self.mock_rdb.stop()
def safeAssertItemsEqual(self, list1, list2):
"""A Py3 safe version of assertItemsEqual.
See https://bugs.python.org/issue17866.
"""
if six.PY3:
self.assertSetEqual(set(list1), set(list2))
else:
self.assertItemsEqual(list1, list2)
class TastTests(TestRunnerTest):
def get_common_tast_args(self, use_vm):
return [
'script_name',
'tast',
'--suite-name=chrome_all_tast_tests',
'--board=eve',
'--flash',
'--path-to-outdir=out_eve/Release',
'--logs-dir=%s' % self._tmp_dir,
'--use-vm' if use_vm else '--device=localhost:2222',
]
def get_common_tast_expectations(self, use_vm, is_lacros=False):
expectation = [
test_runner.CROS_RUN_TEST_PATH,
'--board',
'eve',
'--cache-dir',
test_runner.DEFAULT_CROS_CACHE,
'--results-dest-dir',
'%s/system_logs' % self._tmp_dir,
'--flash',
'--build-dir',
'out_eve/Release',
'--results-dir',
self._tmp_dir,
'--tast-total-shards=1',
'--tast-shard-index=0',
]
expectation.extend(['--start', '--copy-on-write']
if use_vm else ['--device', 'localhost:2222'])
for p in test_runner.SYSTEM_LOG_LOCATIONS:
expectation.extend(['--results-src', p])
if not is_lacros:
expectation += [
'--mount',
'--deploy',
'--nostrip',
]
return expectation
def test_tast_gtest_filter(self):
"""Tests running tast tests with a gtest-style filter."""
with open(os.path.join(self._tmp_dir, 'streamed_results.jsonl'), 'w') as f:
json.dump(_TAST_TEST_RESULTS_JSON, f)
args = self.get_common_tast_args(False) + [
'--attr-expr=( "group:mainline" && "dep:chrome" && !informational)',
'--gtest_filter=ui.ChromeLogin:ui.WindowControl',
]
with mock.patch.object(sys, 'argv', args),\
mock.patch.object(test_runner.subprocess, 'Popen') as mock_popen:
mock_popen.return_value.returncode = 0
test_runner.main()
# The gtest filter should cause the Tast expr to be replaced with a list
# of the tests in the filter.
expected_cmd = self.get_common_tast_expectations(False) + [
'--tast=("name:ui.ChromeLogin" || "name:ui.WindowControl")'
]
self.safeAssertItemsEqual(expected_cmd, mock_popen.call_args[0][0])
@parameterized.expand([
[True],
[False],
])
def test_tast_attr_expr(self, use_vm):
"""Tests running a tast tests specified by an attribute expression."""
with open(os.path.join(self._tmp_dir, 'streamed_results.jsonl'), 'w') as f:
json.dump(_TAST_TEST_RESULTS_JSON, f)
args = self.get_common_tast_args(use_vm) + [
'--attr-expr=( "group:mainline" && "dep:chrome" && !informational)',
]
with mock.patch.object(sys, 'argv', args),\
mock.patch.object(test_runner.subprocess, 'Popen') as mock_popen:
mock_popen.return_value.returncode = 0
test_runner.main()
expected_cmd = self.get_common_tast_expectations(use_vm) + [
'--tast=( "group:mainline" && "dep:chrome" && !informational)',
]
self.safeAssertItemsEqual(expected_cmd, mock_popen.call_args[0][0])
@parameterized.expand([
[True],
[False],
])
def test_tast_lacros(self, use_vm):
"""Tests running a tast tests for Lacros."""
with open(os.path.join(self._tmp_dir, 'streamed_results.jsonl'), 'w') as f:
json.dump(_TAST_TEST_RESULTS_JSON, f)
args = self.get_common_tast_args(use_vm) + [
'-t=lacros.Basic',
'--deploy-lacros',
]
with mock.patch.object(sys, 'argv', args),\
mock.patch.object(test_runner.subprocess, 'Popen') as mock_popen:
mock_popen.return_value.returncode = 0
test_runner.main()
expected_cmd = self.get_common_tast_expectations(
use_vm, is_lacros=True) + [
'--tast',
'lacros.Basic',
'--deploy-lacros',
'--lacros-launcher-script',
test_runner.LACROS_LAUNCHER_SCRIPT_PATH,
]
self.safeAssertItemsEqual(expected_cmd, mock_popen.call_args[0][0])
@parameterized.expand([
[True],
[False],
])
def test_tast_with_vars(self, use_vm):
"""Tests running a tast tests with runtime variables."""
with open(os.path.join(self._tmp_dir, 'streamed_results.jsonl'), 'w') as f:
json.dump(_TAST_TEST_RESULTS_JSON, f)
args = self.get_common_tast_args(use_vm) + [
'-t=ui.ChromeLogin',
'--tast-var=key=value',
]
with mock.patch.object(sys, 'argv', args),\
mock.patch.object(test_runner.subprocess, 'Popen') as mock_popen:
mock_popen.return_value.returncode = 0
test_runner.main()
expected_cmd = self.get_common_tast_expectations(use_vm) + [
'--tast', 'ui.ChromeLogin', '--tast-var', 'key=value'
]
self.safeAssertItemsEqual(expected_cmd, mock_popen.call_args[0][0])
@parameterized.expand([
[True],
[False],
])
def test_tast(self, use_vm):
"""Tests running a tast tests."""
with open(os.path.join(self._tmp_dir, 'streamed_results.jsonl'), 'w') as f:
json.dump(_TAST_TEST_RESULTS_JSON, f)
args = self.get_common_tast_args(use_vm) + [
'-t=ui.ChromeLogin',
]
with mock.patch.object(sys, 'argv', args),\
mock.patch.object(test_runner.subprocess, 'Popen') as mock_popen:
mock_popen.return_value.returncode = 0
test_runner.main()
expected_cmd = self.get_common_tast_expectations(use_vm) + [
'--tast', 'ui.ChromeLogin'
]
self.safeAssertItemsEqual(expected_cmd, mock_popen.call_args[0][0])
class GTestTest(TestRunnerTest):
@parameterized.expand([
[True],
[False],
])
def test_gtest(self, use_vm):
"""Tests running a gtest."""
fd_mock = mock.mock_open()
args = [
'script_name',
'gtest',
'--test-exe=out_eve/Release/base_unittests',
'--board=eve',
'--path-to-outdir=out_eve/Release',
'--use-vm' if use_vm else '--device=localhost:2222',
]
with mock.patch.object(sys, 'argv', args),\
mock.patch.object(test_runner.subprocess, 'Popen') as mock_popen,\
mock.patch.object(os, 'fdopen', fd_mock),\
mock.patch.object(os, 'remove') as mock_remove,\
mock.patch.object(tempfile, 'mkstemp',
return_value=(3, 'out_eve/Release/device_script.sh')),\
mock.patch.object(os, 'fchmod'):
mock_popen.return_value.returncode = 0
test_runner.main()
self.assertEqual(1, mock_popen.call_count)
expected_cmd = [
test_runner.CROS_RUN_TEST_PATH, '--board', 'eve', '--cache-dir',
test_runner.DEFAULT_CROS_CACHE, '--as-chronos', '--remote-cmd',
'--cwd', 'out_eve/Release', '--files',
'out_eve/Release/device_script.sh'
]
expected_cmd.extend(['--start', '--copy-on-write']
if use_vm else ['--device', 'localhost:2222'])
expected_cmd.extend(['--', './device_script.sh'])
self.safeAssertItemsEqual(expected_cmd, mock_popen.call_args[0][0])
fd_mock().write.assert_called_once_with(
'#!/bin/sh\nexport HOME=/usr/local/tmp\n'
'export TMPDIR=/usr/local/tmp\n'
'LD_LIBRARY_PATH=./ ./out_eve/Release/base_unittests '
'--test-launcher-shard-index=0 --test-launcher-total-shards=1\n')
mock_remove.assert_called_once_with('out_eve/Release/device_script.sh')
def test_gtest_with_vpython(self):
"""Tests building a gtest with --vpython-dir."""
args = mock.MagicMock()
args.test_exe = 'base_unittests'
args.test_launcher_summary_output = None
args.trace_dir = None
args.runtime_deps_path = None
args.path_to_outdir = self._tmp_dir
args.vpython_dir = self._tmp_dir
args.logs_dir = self._tmp_dir
# With vpython_dir initially empty, the test_runner should error out
# due to missing vpython binaries.
gtest = test_runner.GTestTest(args, None)
with self.assertRaises(test_runner.TestFormatError):
gtest.build_test_command()
# Create the two expected tools, and the test should be ready to run.
with open(os.path.join(args.vpython_dir, 'vpython'), 'w'):
pass # Just touch the file.
os.mkdir(os.path.join(args.vpython_dir, 'bin'))
with open(os.path.join(args.vpython_dir, 'bin', 'python'), 'w'):
pass
gtest = test_runner.GTestTest(args, None)
gtest.build_test_command()
class HostCmdTests(TestRunnerTest):
@parameterized.expand([
[True],
[False],
])
def test_host_cmd(self, is_lacros):
args = [
'script_name',
'host-cmd',
'--board=eve',
'--flash',
'--path-to-outdir=out/Release',
'--device=localhost:2222',
]
if is_lacros:
args += ['--deploy-lacros']
else:
args += ['--deploy-chrome']
args += [
'--',
'fake_cmd',
]
with mock.patch.object(sys, 'argv', args),\
mock.patch.object(test_runner.subprocess, 'Popen') as mock_popen:
mock_popen.return_value.returncode = 0
test_runner.main()
expected_cmd = [
test_runner.CROS_RUN_TEST_PATH,
'--board',
'eve',
'--cache-dir',
test_runner.DEFAULT_CROS_CACHE,
'--flash',
'--device',
'localhost:2222',
'--build-dir',
os.path.join(test_runner.CHROMIUM_SRC_PATH, 'out/Release'),
'--host-cmd',
]
if is_lacros:
expected_cmd += [
'--deploy-lacros',
'--lacros-launcher-script',
test_runner.LACROS_LAUNCHER_SCRIPT_PATH,
]
else:
expected_cmd += ['--mount', '--nostrip', '--deploy']
expected_cmd += [
'--',
'fake_cmd',
]
self.safeAssertItemsEqual(expected_cmd, mock_popen.call_args[0][0])
if __name__ == '__main__':
unittest.main()
|
|
# -*- coding: utf-8 -*-
"""
Testing using the Test Client
The test client is a class that can act like a simple
browser for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
``Client`` objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the ``Client`` instance.
This is not intended as a replacement for Twill, Selenium, or
other browser automation frameworks - it is here to allow
testing against the contexts and templates produced by a view,
rather than the HTML rendered to the end-user.
"""
from __future__ import unicode_literals
from django.core import mail
from django.http import HttpResponse
from django.test import Client, RequestFactory, TestCase, override_settings
from .views import get_view, post_view, trace_view
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF='test_client.urls',)
class ClientTest(TestCase):
fixtures = ['testdata.json']
def test_get_view(self):
"GET a view"
# The data is ignored, but let's check it doesn't crash the system
# anyway.
data = {'var': '\xf2'}
response = self.client.get('/get_view/', data)
# Check some response details
self.assertContains(response, 'This is a test')
self.assertEqual(response.context['var'], '\xf2')
self.assertEqual(response.templates[0].name, 'GET Template')
def test_get_post_view(self):
"GET a view that normally expects POSTs"
response = self.client.get('/post_view/', {})
# Check some response details
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, 'Empty GET Template')
self.assertTemplateUsed(response, 'Empty GET Template')
self.assertTemplateNotUsed(response, 'Empty POST Template')
def test_empty_post(self):
"POST an empty dictionary to a view"
response = self.client.post('/post_view/', {})
# Check some response details
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, 'Empty POST Template')
self.assertTemplateNotUsed(response, 'Empty GET Template')
self.assertTemplateUsed(response, 'Empty POST Template')
def test_post(self):
"POST some data to a view"
post_data = {
'value': 37
}
response = self.client.post('/post_view/', post_data)
# Check some response details
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['data'], '37')
self.assertEqual(response.templates[0].name, 'POST Template')
self.assertContains(response, 'Data received')
def test_trace(self):
"""TRACE a view"""
response = self.client.trace('/trace_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['method'], 'TRACE')
self.assertEqual(response.templates[0].name, 'TRACE Template')
def test_response_headers(self):
"Check the value of HTTP headers returned in a response"
response = self.client.get("/header_view/")
self.assertEqual(response['X-DJANGO-TEST'], 'Slartibartfast')
def test_response_attached_request(self):
"""
Check that the returned response has a ``request`` attribute with the
originating environ dict and a ``wsgi_request`` with the originating
``WSGIRequest`` instance.
"""
response = self.client.get("/header_view/")
self.assertTrue(hasattr(response, 'request'))
self.assertTrue(hasattr(response, 'wsgi_request'))
for key, value in response.request.items():
self.assertIn(key, response.wsgi_request.environ)
self.assertEqual(response.wsgi_request.environ[key], value)
def test_response_resolver_match(self):
"""
The response contains a ResolverMatch instance.
"""
response = self.client.get('/header_view/')
self.assertTrue(hasattr(response, 'resolver_match'))
def test_response_resolver_match_redirect_follow(self):
"""
The response ResolverMatch instance contains the correct
information when following redirects.
"""
response = self.client.get('/redirect_view/', follow=True)
self.assertEqual(response.resolver_match.url_name, 'get_view')
def test_response_resolver_match_regular_view(self):
"""
The response ResolverMatch instance contains the correct
information when accessing a regular view.
"""
response = self.client.get('/get_view/')
self.assertEqual(response.resolver_match.url_name, 'get_view')
def test_raw_post(self):
"POST raw data (with a content type) to a view"
test_doc = """<?xml version="1.0" encoding="utf-8"?><library><book><title>Blink</title><author>Malcolm Gladwell</author></book></library>"""
response = self.client.post("/raw_post_view/", test_doc,
content_type="text/xml")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, "Book template")
self.assertEqual(response.content, b"Blink - Malcolm Gladwell")
def test_insecure(self):
"GET a URL through http"
response = self.client.get('/secure_view/', secure=False)
self.assertFalse(response.test_was_secure_request)
self.assertEqual(response.test_server_port, '80')
def test_secure(self):
"GET a URL through https"
response = self.client.get('/secure_view/', secure=True)
self.assertTrue(response.test_was_secure_request)
self.assertEqual(response.test_server_port, '443')
def test_redirect(self):
"GET a URL that redirects elsewhere"
response = self.client.get('/redirect_view/')
# Check that the response was a 302 (redirect) and that
# assertRedirect() understands to put an implicit http://testserver/ in
# front of non-absolute URLs.
self.assertRedirects(response, '/get_view/')
host = 'django.testserver'
client_providing_host = Client(HTTP_HOST=host)
response = client_providing_host.get('/redirect_view/')
# Check that the response was a 302 (redirect) with absolute URI
self.assertRedirects(response, '/get_view/', host=host)
def test_redirect_with_query(self):
"GET a URL that redirects with given GET parameters"
response = self.client.get('/redirect_view/', {'var': 'value'})
# Check if parameters are intact
self.assertRedirects(response, 'http://testserver/get_view/?var=value')
def test_permanent_redirect(self):
"GET a URL that redirects permanently elsewhere"
response = self.client.get('/permanent_redirect_view/')
# Check that the response was a 301 (permanent redirect)
self.assertRedirects(response, 'http://testserver/get_view/', status_code=301)
client_providing_host = Client(HTTP_HOST='django.testserver')
response = client_providing_host.get('/permanent_redirect_view/')
# Check that the response was a 301 (permanent redirect) with absolute URI
self.assertRedirects(response, 'http://django.testserver/get_view/', status_code=301)
def test_temporary_redirect(self):
"GET a URL that does a non-permanent redirect"
response = self.client.get('/temporary_redirect_view/')
# Check that the response was a 302 (non-permanent redirect)
self.assertRedirects(response, 'http://testserver/get_view/', status_code=302)
def test_redirect_to_strange_location(self):
"GET a URL that redirects to a non-200 page"
response = self.client.get('/double_redirect_view/')
# Check that the response was a 302, and that
# the attempt to get the redirection location returned 301 when retrieved
self.assertRedirects(response, 'http://testserver/permanent_redirect_view/', target_status_code=301)
def test_follow_redirect(self):
"A URL that redirects can be followed to termination."
response = self.client.get('/double_redirect_view/', follow=True)
self.assertRedirects(response, 'http://testserver/get_view/', status_code=302, target_status_code=200)
self.assertEqual(len(response.redirect_chain), 2)
def test_redirect_http(self):
"GET a URL that redirects to an http URI"
response = self.client.get('/http_redirect_view/', follow=True)
self.assertFalse(response.test_was_secure_request)
def test_redirect_https(self):
"GET a URL that redirects to an https URI"
response = self.client.get('/https_redirect_view/', follow=True)
self.assertTrue(response.test_was_secure_request)
def test_notfound_response(self):
"GET a URL that responds as '404:Not Found'"
response = self.client.get('/bad_view/')
# Check that the response was a 404, and that the content contains MAGIC
self.assertContains(response, 'MAGIC', status_code=404)
def test_valid_form(self):
"POST valid data to a form"
post_data = {
'text': 'Hello World',
'email': 'foo@example.com',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Valid POST Template")
def test_valid_form_with_hints(self):
"GET a form, providing hints in the GET data"
hints = {
'text': 'Hello World',
'multi': ('b', 'c', 'e')
}
response = self.client.get('/form_view/', data=hints)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Form GET Template")
# Check that the multi-value data has been rolled out ok
self.assertContains(response, 'Select a valid choice.', 0)
def test_incomplete_data_form(self):
"POST incomplete data to a form"
post_data = {
'text': 'Hello World',
'value': 37
}
response = self.client.post('/form_view/', post_data)
self.assertContains(response, 'This field is required.', 3)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'This field is required.')
self.assertFormError(response, 'form', 'single', 'This field is required.')
self.assertFormError(response, 'form', 'multi', 'This field is required.')
def test_form_error(self):
"POST erroneous data to a form"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'Enter a valid email address.')
def test_valid_form_with_template(self):
"POST valid data to a form using multiple templates"
post_data = {
'text': 'Hello World',
'email': 'foo@example.com',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view_with_template/', post_data)
self.assertContains(response, 'POST data OK')
self.assertTemplateUsed(response, "form_view.html")
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateNotUsed(response, "Valid POST Template")
def test_incomplete_data_form_with_template(self):
"POST incomplete data to a form using multiple templates"
post_data = {
'text': 'Hello World',
'value': 37
}
response = self.client.post('/form_view_with_template/', post_data)
self.assertContains(response, 'POST data has errors')
self.assertTemplateUsed(response, 'form_view.html')
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateNotUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'This field is required.')
self.assertFormError(response, 'form', 'single', 'This field is required.')
self.assertFormError(response, 'form', 'multi', 'This field is required.')
def test_form_error_with_template(self):
"POST erroneous data to a form using multiple templates"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view_with_template/', post_data)
self.assertContains(response, 'POST data has errors')
self.assertTemplateUsed(response, "form_view.html")
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateNotUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'Enter a valid email address.')
def test_unknown_page(self):
"GET an invalid URL"
response = self.client.get('/unknown_view/')
# Check that the response was a 404
self.assertEqual(response.status_code, 404)
def test_url_parameters(self):
"Make sure that URL ;-parameters are not stripped."
response = self.client.get('/unknown_view/;some-parameter')
# Check that the path in the response includes it (ignore that it's a 404)
self.assertEqual(response.request['PATH_INFO'], '/unknown_view/;some-parameter')
def test_view_with_login(self):
"Request a page that is protected with @login_required"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, 'http://testserver/accounts/login/?next=/login_protected_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_method_login(self):
"Request a page that is protected with a @login_required method"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_method_view/')
self.assertRedirects(response, 'http://testserver/accounts/login/?next=/login_protected_method_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Request a page that requires a login
response = self.client.get('/login_protected_method_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_login_and_custom_redirect(self):
"Request a page that is protected with @login_required(redirect_field_name='redirect_to')"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view_custom_redirect/')
self.assertRedirects(response, 'http://testserver/accounts/login/?redirect_to=/login_protected_view_custom_redirect/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Request a page that requires a login
response = self.client.get('/login_protected_view_custom_redirect/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_bad_login(self):
"Request a page that is protected with @login, but use bad credentials"
login = self.client.login(username='otheruser', password='nopassword')
self.assertFalse(login)
def test_view_with_inactive_login(self):
"Request a page that is protected with @login, but use an inactive login"
login = self.client.login(username='inactive', password='password')
self.assertFalse(login)
def test_logout(self):
"Request a logout after logging in"
# Log in
self.client.login(username='testclient', password='password')
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
# Log out
self.client.logout()
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, 'http://testserver/accounts/login/?next=/login_protected_view/')
@override_settings(SESSION_ENGINE="django.contrib.sessions.backends.signed_cookies")
def test_logout_cookie_sessions(self):
self.test_logout()
def test_view_with_permissions(self):
"Request a page that is protected with @permission_required"
# Get the page without logging in. Should result in 302.
response = self.client.get('/permission_protected_view/')
self.assertRedirects(response, 'http://testserver/accounts/login/?next=/permission_protected_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Log in with wrong permissions. Should result in 302.
response = self.client.get('/permission_protected_view/')
self.assertRedirects(response, 'http://testserver/accounts/login/?next=/permission_protected_view/')
# TODO: Log in with right permissions and request the page again
def test_view_with_permissions_exception(self):
"Request a page that is protected with @permission_required but raises an exception"
# Get the page without logging in. Should result in 403.
response = self.client.get('/permission_protected_view_exception/')
self.assertEqual(response.status_code, 403)
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Log in with wrong permissions. Should result in 403.
response = self.client.get('/permission_protected_view_exception/')
self.assertEqual(response.status_code, 403)
def test_view_with_method_permissions(self):
"Request a page that is protected with a @permission_required method"
# Get the page without logging in. Should result in 302.
response = self.client.get('/permission_protected_method_view/')
self.assertRedirects(response, 'http://testserver/accounts/login/?next=/permission_protected_method_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Log in with wrong permissions. Should result in 302.
response = self.client.get('/permission_protected_method_view/')
self.assertRedirects(response, 'http://testserver/accounts/login/?next=/permission_protected_method_view/')
# TODO: Log in with right permissions and request the page again
def test_external_redirect(self):
response = self.client.get('/django_project_redirect/')
self.assertRedirects(response, 'https://www.djangoproject.com/', fetch_redirect_response=False)
def test_session_modifying_view(self):
"Request a page that modifies the session"
# Session value isn't set initially
try:
self.client.session['tobacconist']
self.fail("Shouldn't have a session value")
except KeyError:
pass
self.client.post('/session_view/')
# Check that the session was modified
self.assertEqual(self.client.session['tobacconist'], 'hovercraft')
def test_view_with_exception(self):
"Request a page that is known to throw an error"
self.assertRaises(KeyError, self.client.get, "/broken_view/")
# Try the same assertion, a different way
try:
self.client.get('/broken_view/')
self.fail('Should raise an error')
except KeyError:
pass
def test_mail_sending(self):
"Test that mail is redirected to a dummy outbox during test setup"
response = self.client.get('/mail_sending_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Test message')
self.assertEqual(mail.outbox[0].body, 'This is a test email')
self.assertEqual(mail.outbox[0].from_email, 'from@example.com')
self.assertEqual(mail.outbox[0].to[0], 'first@example.com')
self.assertEqual(mail.outbox[0].to[1], 'second@example.com')
def test_mass_mail_sending(self):
"Test that mass mail is redirected to a dummy outbox during test setup"
response = self.client.get('/mass_mail_sending_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[0].subject, 'First Test message')
self.assertEqual(mail.outbox[0].body, 'This is the first test email')
self.assertEqual(mail.outbox[0].from_email, 'from@example.com')
self.assertEqual(mail.outbox[0].to[0], 'first@example.com')
self.assertEqual(mail.outbox[0].to[1], 'second@example.com')
self.assertEqual(mail.outbox[1].subject, 'Second Test message')
self.assertEqual(mail.outbox[1].body, 'This is the second test email')
self.assertEqual(mail.outbox[1].from_email, 'from@example.com')
self.assertEqual(mail.outbox[1].to[0], 'second@example.com')
self.assertEqual(mail.outbox[1].to[1], 'third@example.com')
@override_settings(
MIDDLEWARE_CLASSES=('django.middleware.csrf.CsrfViewMiddleware',),
ROOT_URLCONF='test_client.urls',
)
class CSRFEnabledClientTests(TestCase):
def test_csrf_enabled_client(self):
"A client can be instantiated with CSRF checks enabled"
csrf_client = Client(enforce_csrf_checks=True)
# The normal client allows the post
response = self.client.post('/post_view/', {})
self.assertEqual(response.status_code, 200)
# The CSRF-enabled client rejects it
response = csrf_client.post('/post_view/', {})
self.assertEqual(response.status_code, 403)
class CustomTestClient(Client):
i_am_customized = "Yes"
class CustomTestClientTest(TestCase):
client_class = CustomTestClient
def test_custom_test_client(self):
"""A test case can specify a custom class for self.client."""
self.assertEqual(hasattr(self.client, "i_am_customized"), True)
_generic_view = lambda request: HttpResponse(status=200)
@override_settings(ROOT_URLCONF='test_client.urls')
class RequestFactoryTest(TestCase):
"""Tests for the request factory."""
# A mapping between names of HTTP/1.1 methods and their test views.
http_methods_and_views = (
('get', get_view),
('post', post_view),
('put', _generic_view),
('patch', _generic_view),
('delete', _generic_view),
('head', _generic_view),
('options', _generic_view),
('trace', trace_view),
)
def setUp(self):
self.request_factory = RequestFactory()
def test_request_factory(self):
"""The request factory implements all the HTTP/1.1 methods."""
for method_name, view in self.http_methods_and_views:
method = getattr(self.request_factory, method_name)
request = method('/somewhere/')
response = view(request)
self.assertEqual(response.status_code, 200)
def test_get_request_from_factory(self):
"""
The request factory returns a templated response for a GET request.
"""
request = self.request_factory.get('/somewhere/')
response = get_view(request)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'This is a test')
def test_trace_request_from_factory(self):
"""The request factory returns an echo response for a TRACE request."""
url_path = '/somewhere/'
request = self.request_factory.trace(url_path)
response = trace_view(request)
protocol = request.META["SERVER_PROTOCOL"]
echoed_request_line = "TRACE {} {}".format(url_path, protocol)
self.assertEqual(response.status_code, 200)
self.assertContains(response, echoed_request_line)
|
|
# Copyright (c) 2007-8, Playful Invention Company.
# Copyright (c) 2008-11, Walter Bender
# Copyright (c) 2011 Collabora Ltd. <http://www.collabora.co.uk/>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import cairo
import os
from math import pi
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import Pango
from gi.repository import PangoCairo
from gi.repository import GObject
from tautils import get_path
from taconstants import (Color, TMP_SVG_PATH, DEFAULT_PEN_COLOR,
DEFAULT_BACKGROUND_COLOR, DEFAULT_FONT)
def wrap100(n):
''' A variant on mod... 101 -> 99; 199 -> 1 '''
n = int(n)
n %= 200
if n > 99:
n = 199 - n
return n
def calc_shade(c, s, invert=False):
''' Convert a color to the current shade (lightness/darkness). '''
# Assumes 16 bit input values
if invert:
if s == -1:
return int(c)
elif s < 0:
return int(c / (1 + s))
return int((c - 65536 * s) / (1 - s))
else:
if s < 0:
return int(c * (1 + s))
return int(c + (65536 - c) * s)
def calc_gray(c, g, invert=False):
''' Gray is a psuedo saturation calculation. '''
# Assumes 16 bit input values
if g == 100:
return int(c)
if invert:
if g == 0:
return int(c)
else:
return int(((c * 100) - (32768 * (100 - g))) / g)
else:
return int(((c * g) + (32768 * (100 - g))) / 100)
colors = {}
DEGTOR = pi / 180.
RTODEG = 180. / pi
COLOR_TABLE = (
0xFF0000, 0xFF0D00, 0xFF1A00, 0xFF2600, 0xFF3300,
0xFF4000, 0xFF4D00, 0xFF5900, 0xFF6600, 0xFF7300,
0xFF8000, 0xFF8C00, 0xFF9900, 0xFFA600, 0xFFB300,
0xFFBF00, 0xFFCC00, 0xFFD900, 0xFFE600, 0xFFF200,
0xFFFF00, 0xE6FF00, 0xCCFF00, 0xB3FF00, 0x99FF00,
0x80FF00, 0x66FF00, 0x4DFF00, 0x33FF00, 0x1AFF00,
0x00FF00, 0x00FF0D, 0x00FF1A, 0x00FF26, 0x00FF33,
0x00FF40, 0x00FF4D, 0x00FF59, 0x00FF66, 0x00FF73,
0x00FF80, 0x00FF8C, 0x00FF99, 0x00FFA6, 0x00FFB3,
0x00FFBF, 0x00FFCC, 0x00FFD9, 0x00FFE6, 0x00FFF2,
0x00FFFF, 0x00F2FF, 0x00E6FF, 0x00D9FF, 0x00CCFF,
0x00BFFF, 0x00B3FF, 0x00A6FF, 0x0099FF, 0x008CFF,
0x0080FF, 0x0073FF, 0x0066FF, 0x0059FF, 0x004DFF,
0x0040FF, 0x0033FF, 0x0026FF, 0x001AFF, 0x000DFF,
0x0000FF, 0x0D00FF, 0x1A00FF, 0x2600FF, 0x3300FF,
0x4000FF, 0x4D00FF, 0x5900FF, 0x6600FF, 0x7300FF,
0x8000FF, 0x8C00FF, 0x9900FF, 0xA600FF, 0xB300FF,
0xBF00FF, 0xCC00FF, 0xD900FF, 0xE600FF, 0xF200FF,
0xFF00FF, 0xFF00E6, 0xFF00CC, 0xFF00B3, 0xFF0099,
0xFF0080, 0xFF0066, 0xFF004D, 0xFF0033, 0xFF001A)
class TurtleGraphics:
''' A class for the Turtle graphics canvas '''
def __init__(self, turtle_window, width, height):
''' Create a sprite to hold the canvas. '''
self.turtle_window = turtle_window
self.width = width
self.height = height
self.textsize = 48
self._fgrgb = DEFAULT_PEN_COLOR
self._bgrgb = DEFAULT_BACKGROUND_COLOR
self._font = DEFAULT_FONT
self._shade = 0
self._color = 0
self._gray = 100
self.cr_svg = None # Surface used for saving to SVG
# Build a cairo.Context from a cairo.XlibSurface
self.canvas = cairo.Context(self.turtle_window.turtle_canvas)
self.set_pen_size(5)
def setup_svg_surface(self):
''' Set up a surface for saving to SVG '''
svg_surface = cairo.SVGSurface(self.get_svg_path(),
self.width, self.height)
self.svg_surface = svg_surface
self.cr_svg = cairo.Context(svg_surface)
self.cr_svg.set_line_cap(1) # Set the line cap to be round
def get_svg_path(self):
'''We use a separate file for the svg used for generating Sugar icons
'''
if self.turtle_window.running_sugar:
return os.path.join(get_path(self.turtle_window.activity,
'instance'), 'output.svg')
else:
return TMP_SVG_PATH
def fill_polygon(self, poly_points):
''' Draw the polygon... '''
def _fill_polygon(cr, poly_points):
cr.new_path()
for i, p in enumerate(poly_points):
if p[0] == 'move':
if i == len(poly_points) - 1 or \
poly_points[i + 1][0] not in ['rarc', 'larc']:
cr.move_to(p[1], p[2])
elif p[0] == 'rarc':
cr.arc(p[1], p[2], p[3], p[4], p[5])
elif p[0] == 'larc':
cr.arc_negative(p[1], p[2], p[3], p[4], p[5])
else: # line
cr.line_to(p[1], p[2])
cr.close_path()
cr.fill()
_fill_polygon(self.canvas, poly_points)
self.inval()
if self.cr_svg is not None:
_fill_polygon(self.cr_svg, poly_points)
def clearscreen(self):
'''Clear the canvas and reset most graphics attributes to defaults.'''
def _clearscreen(cr):
cr.move_to(0, 0)
self._bgrgb = DEFAULT_BACKGROUND_COLOR
cr.set_source_rgb(self._bgrgb[0] / 255.,
self._bgrgb[1] / 255.,
self._bgrgb[2] / 255.)
cr.rectangle(0, 0, self.width * 2, self.height * 2)
cr.fill()
_clearscreen(self.canvas)
self.inval()
if self.cr_svg is not None:
_clearscreen(self.cr_svg)
def rarc(self, x, y, r, a, heading):
''' draw a clockwise arc '''
def _rarc(cr, x, y, r, a, h):
cr.arc(x, y, r, (h - 180) * DEGTOR, (h - 180 + a) * DEGTOR)
cr.stroke()
_rarc(self.canvas, x, y, r, a, heading)
self.inval()
if self.cr_svg is not None:
_rarc(self.cr_svg, x, y, r, a, heading)
def larc(self, x, y, r, a, heading):
''' draw a counter-clockwise arc '''
def _larc(cr, x, y, r, a, h):
cr.arc_negative(x, y, r, h * DEGTOR, (h - a) * DEGTOR)
cr.stroke()
_larc(self.canvas, x, y, r, a, heading)
self.inval()
if self.cr_svg is not None:
_larc(self.cr_svg, x, y, r, a, heading)
def set_pen_size(self, pen_size):
''' Set the pen size '''
self.canvas.set_line_width(pen_size)
if self.cr_svg is not None:
self.cr_svg.set_line_width(pen_size)
def fillscreen(self, c, s):
''' Deprecated method: Fill screen with color/shade '''
self.fillscreen_with_gray(c, s, self._gray)
def fillscreen_with_gray(self, color, shade, gray):
''' Fill screen with color/shade/gray and reset to defaults '''
save_rgb = self._fgrgb[:]
# Special case for color blocks
if isinstance(color, Color):
if color.color is None:
self._shade = color.shade
else:
self._color = color.color
else:
self._color = color
if isinstance(shade, Color):
self._shade = shade.shade
else:
self._shade = shade
if isinstance(gray, Color):
self._gray = gray.gray
else:
self._gray = gray
if self._gray < 0:
self._gray = 0
if self._gray > 100:
self._gray = 100
self.set_fgcolor(shade=self._shade, gray=self._gray, color=self._color)
self._bgrgb = self._fgrgb[:]
def _fillscreen(cr, rgb, w, h):
cr.set_source_rgb(rgb[0] / 255., rgb[1] / 255., rgb[2] / 255.)
cr.rectangle(0, 0, w * 2, h * 2)
cr.fill()
_fillscreen(self.canvas, self._fgrgb, self.width, self.height)
self.inval()
if self.cr_svg is not None:
_fillscreen(self.cr_svg, self._fgrgb, self.width, self.height)
self._fgrgb = save_rgb[:]
def set_fgcolor(self, shade=None, gray=None, color=None):
''' Set the foreground color '''
if shade is not None:
self._shade = shade
if gray is not None:
self._gray = gray
if color is not None:
self._color = color
sh = (wrap100(self._shade) - 50) / 50.0
rgb = COLOR_TABLE[wrap100(self._color)]
r = (rgb >> 8) & 0xff00
r = calc_gray(r, self._gray)
r = calc_shade(r, sh)
g = rgb & 0xff00
g = calc_gray(g, self._gray)
g = calc_shade(g, sh)
b = (rgb << 8) & 0xff00
b = calc_gray(b, self._gray)
b = calc_shade(b, sh)
self._fgrgb = [r >> 8, g >> 8, b >> 8]
def draw_surface(self, surface, x, y, w, h):
''' Draw a surface '''
def _draw_surface(cc, surface, x, y, w, h):
cc.set_source_surface(surface, x, y)
cc.rectangle(x, y, w, h)
cc.fill()
_draw_surface(self.canvas, surface, x, y, w, h)
self.inval()
if self.cr_svg is not None:
_draw_surface(self.cr_svg, surface, x, y, w, h)
def draw_pixbuf(self, pixbuf, a, b, x, y, w, h, heading):
''' Draw a pixbuf '''
def _draw_pixbuf(cc, pixbuf, a, b, x, y, w, h, heading):
# Build a gtk.gdk.CairoContext from a cairo.Context to access
# the set_source_pixbuf attribute.
cc.save()
# center the rotation on the center of the image
cc.translate(x + w / 2., y + h / 2.)
cc.rotate(heading * DEGTOR)
cc.translate(-x - w / 2., -y - h / 2.)
Gdk.cairo_set_source_pixbuf(cc, pixbuf, x, y)
cc.rectangle(x, y, w, h)
cc.fill()
cc.restore()
_draw_pixbuf(self.canvas, pixbuf, a, b, x, y, w, h, heading)
self.inval()
if self.cr_svg is not None:
_draw_pixbuf(self.cr_svg, pixbuf, a, b, x, y, w, h, heading)
def set_font(self, font_name):
''' Set font used by draw_text '''
self._font = str(font_name)
def draw_text(self, label, x, y, size, width, heading, scale):
''' Draw text '''
def _draw_text(cc, label, x, y, size, width, scale, heading, rgb,
wrap=False):
import textwrap
final_scale = int(size * scale) * Pango.SCALE
label = str(label)
if wrap:
label = '\n'.join(textwrap.wrap(label, int(width / scale)))
pl = PangoCairo.create_layout(cc)
fd = Pango.FontDescription(self._font)
fd.set_size(final_scale)
pl.set_font_description(fd)
if isinstance(label, (str, unicode)):
text = label.replace('\0', ' ')
elif isinstance(label, (float, int)):
text = str(label)
else:
text = label
pl.set_text(str(label), len(str(label)))
pl.set_width(int(width) * Pango.SCALE)
cc.save()
cc.translate(x, y)
cc.rotate(heading * DEGTOR)
cc.set_source_rgb(rgb[0] / 255., rgb[1] / 255., rgb[2] / 255.)
PangoCairo.update_layout(cc, pl)
PangoCairo.show_layout(cc, pl)
cc.restore()
width *= scale
_draw_text(self.canvas, label, x, y, size, width, scale, heading,
self._fgrgb)
self.inval()
if self.cr_svg is not None: # and self.pendown:
_draw_text(self.cr_svg, label, x, y, size, width, scale, heading,
self._fgrgb, wrap=True)
def set_source_rgb(self):
r = self._fgrgb[0] / 255.
g = self._fgrgb[1] / 255.
b = self._fgrgb[2] / 255.
self.canvas.set_source_rgb(r, g, b)
if self.cr_svg is not None:
self.cr_svg.set_source_rgb(r, g, b)
def draw_line(self, x1, y1, x2, y2):
''' Draw a line '''
def _draw_line(cr, x1, y1, x2, y2):
cr.set_line_cap(1) # Set the line cap to be round
cr.move_to(x1, y1)
cr.line_to(x2, y2)
cr.stroke()
_draw_line(self.canvas, x1, y1, x2, y2)
if self.cr_svg is not None:
_draw_line(self.cr_svg, x1, y1, x2, y2)
self.inval()
def get_color_index(self, r, g, b, a=0):
''' Find the closest palette entry to the rgb triplet '''
if self._shade != 50 or self._gray != 100:
r <<= 8
g <<= 8
b <<= 8
if self._shade != 50:
sh = (wrap100(self._shade) - 50) / 50.
r = calc_shade(r, sh, True)
g = calc_shade(g, sh, True)
b = calc_shade(b, sh, True)
if self._gray != 100:
r = calc_gray(r, self._gray, True)
g = calc_gray(g, self._gray, True)
b = calc_gray(b, self._gray, True)
r >>= 8
g >>= 8
b >>= 8
min_distance = 1000000
closest_color = -1
for i, c in enumerate(COLOR_TABLE):
cr = int((c & 0xff0000) >> 16)
cg = int((c & 0x00ff00) >> 8)
cb = int((c & 0x0000ff))
distance_squared = \
((cr - r) ** 2) + ((cg - g) ** 2) + ((cb - b) ** 2)
if distance_squared == 0:
return i
if distance_squared < min_distance:
min_distance = distance_squared
closest_color = i
return closest_color
def get_pixel(self, x, y):
''' Read the pixel at x, y '''
if self.turtle_window.interactive_mode:
x = int(x)
y = int(y)
w = self.turtle_window.turtle_canvas.get_width()
h = self.turtle_window.turtle_canvas.get_height()
if x < 0 or x > (w - 1) or y < 0 or y > (h - 1):
return(-1, -1, -1, -1)
# create a new 1x1 cairo surface
cs = cairo.ImageSurface(cairo.FORMAT_RGB24, 1, 1)
cr = cairo.Context(cs)
cr.set_source_surface(self.turtle_window.turtle_canvas, -x, -y)
cr.rectangle(0, 0, 1, 1)
cr.set_operator(cairo.OPERATOR_SOURCE)
cr.fill()
cs.flush() # ensure all writing is done
pixels = cs.get_data() # Read the pixel
return (ord(pixels[2]), ord(pixels[1]), ord(pixels[0]), 0)
else:
return(-1, -1, -1, -1)
def svg_close(self):
''' Close current SVG graphic '''
self.cr_svg.show_page()
self.svg_surface.flush()
self.svg_surface.finish()
def svg_reset(self):
''' Reset svg flags '''
self.cr_svg = None
def inval(self):
''' Invalidate a region for gtk '''
self.turtle_window.inval_all()
|
|
from django.db import transaction
from rest_framework import serializers
from rest_auth.serializers import LoginSerializer, PasswordResetSerializer
from rest_framework.validators import UniqueValidator
from account.declaration import PartnerDeclarationPDFCreator
from account.forms import CustomPasswordResetForm
from common.consts import (
COLLABORATION_EVIDENCE_MODES,
)
from common.serializers import CommonFileBase64UploadSerializer
from partner.models import (
Partner,
PartnerProfile,
PartnerHeadOrganization,
PartnerMember,
PartnerRegistrationDocument,
PartnerGoverningDocument,
PartnerCollaborationEvidence,
)
from partner.roles import PartnerRole
from partner.serializers import (
PartnerSerializer,
PartnerProfileSerializer,
PartnerHeadOrganizationRegisterSerializer,
PartnerGoverningDocumentSerializer,
PartnerRegistrationDocumentSerializer,
PartnerCollaborationEvidenceSerializer,
)
from account.models import User, UserProfile
from sanctionslist.scans import sanctions_scan_partner
class SimpleAccountSerializer(serializers.ModelSerializer):
date_joined = serializers.DateTimeField(required=False, read_only=True)
fullname = serializers.CharField(required=False)
email = serializers.EmailField(validators=[
UniqueValidator(queryset=User.objects.all(), lookup='iexact')
])
class Meta:
model = User
fields = (
'id',
'fullname',
'email',
'password',
'date_joined',
)
extra_kwargs = {'password': {'write_only': True}}
def save(self):
user = super(SimpleAccountSerializer, self).save()
if 'password' in self.validated_data:
user.set_password(self.validated_data['password'])
user.save()
return user
class PartnerRecommendationDocumentSerializer(PartnerCollaborationEvidenceSerializer):
evidence_file = CommonFileBase64UploadSerializer()
class Meta(PartnerCollaborationEvidenceSerializer.Meta):
extra_kwargs = {
'organization_name': {
'required': True
},
'date_received': {
'required': True
},
'partner': {
'required': False
},
}
class PartnerDeclarationSerializer(serializers.Serializer):
question = serializers.CharField()
answer = serializers.CharField()
class PartnerRegistrationSerializer(serializers.Serializer):
user = serializers.HiddenField(default=serializers.CreateOnlyDefault(serializers.CurrentUserDefault()))
partner = PartnerSerializer()
partner_profile = PartnerProfileSerializer()
partner_head_organization = PartnerHeadOrganizationRegisterSerializer()
governing_document = PartnerGoverningDocumentSerializer(required=False)
registration_document = PartnerRegistrationDocumentSerializer(required=False)
recommendation_document = PartnerRecommendationDocumentSerializer(required=False)
declaration = PartnerDeclarationSerializer(many=True, write_only=True)
def validate(self, attrs):
validated_data = super(PartnerRegistrationSerializer, self).validate(attrs)
governing_document = validated_data.get('governing_document')
registration_document = validated_data.get('registration_document')
recommendation_document = validated_data.get('recommendation_document')
if not any([governing_document, recommendation_document, registration_document]):
raise serializers.ValidationError('At least one document needs to be provided.')
profile = validated_data.get('partner_profile', {})
if profile.get('have_governing_document'):
if not governing_document:
raise serializers.ValidationError({
'governing_document': 'This field is required'
})
elif not profile.get('missing_governing_document_comment'):
raise serializers.ValidationError({
'missing_governing_document_comment': 'This field is required'
})
if profile.get('registered_to_operate_in_country'):
if not registration_document:
raise serializers.ValidationError({
'registration_document': 'This field is required'
})
elif not profile.get('missing_registration_document_comment'):
raise serializers.ValidationError({
'missing_registration_document_comment': 'This field is required'
})
return validated_data
def save_documents(self, validated_data, user):
governing_document = validated_data.get('governing_document')
registration_document = validated_data.get('registration_document')
recommendation_document = validated_data.get('recommendation_document')
if governing_document:
governing_document['created_by'] = user
governing_document['profile'] = self.partner.profile
PartnerGoverningDocument.objects.create(editable=False, **governing_document)
if registration_document:
registration_document['created_by'] = user
registration_document['profile'] = self.partner.profile
PartnerRegistrationDocument.objects.create(editable=False, **registration_document)
if recommendation_document:
recommendation_document['created_by'] = user
recommendation_document['partner'] = self.partner
recommendation_document['mode'] = COLLABORATION_EVIDENCE_MODES.reference
PartnerCollaborationEvidence.objects.create(editable=False, **recommendation_document)
self.partner.profile.any_reference = True
self.partner.profile.save()
@transaction.atomic
def create(self, validated_data):
user = validated_data.pop('user')
self.partner = Partner.objects.create(**validated_data['partner'])
self.save_documents(validated_data, user)
PartnerProfile.objects.filter(partner=self.partner).update(**validated_data['partner_profile'])
partner_head_org = validated_data['partner_head_organization']
partner_head_org['partner_id'] = self.partner.pk
PartnerHeadOrganization.objects.create(**partner_head_org)
partner_member, _ = PartnerMember.objects.get_or_create(
partner=self.partner,
user=user,
defaults={
'role': PartnerRole.ADMIN.name,
'title': 'Administrator'
}
)
self.partner.save()
self.partner = Partner.objects.get(pk=self.partner.pk)
Partner.objects.filter(pk=self.partner.pk).update(
declaration=PartnerDeclarationPDFCreator(
validated_data['declaration'], self.partner, user
).get_as_common_file()
)
sanctions_scan_partner(self.partner)
from partner.serializers import PartnerMemberSerializer
return {
"partner": PartnerSerializer(instance=self.partner).data,
"user": SimpleAccountSerializer(instance=user).data,
"partner_profile": PartnerProfileSerializer(instance=self.partner.profile).data,
"partner_head_organization": PartnerHeadOrganizationRegisterSerializer(instance=self.partner.org_head).data,
"partner_member": PartnerMemberSerializer(instance=partner_member).data,
}
class UserProfileSerializer(serializers.ModelSerializer):
notification_frequency_display = serializers.CharField(source='get_notification_frequency_display')
class Meta:
model = UserProfile
fields = (
'id',
'notification_frequency',
'notification_frequency_display',
'accepted_tos',
)
class UserSerializer(serializers.ModelSerializer):
name = serializers.CharField(source='fullname', read_only=True)
profile = UserProfileSerializer(read_only=True)
class Meta:
model = User
fields = (
'id',
'is_active',
'name',
'email',
'status',
'profile',
)
class BasicUserSerializer(serializers.ModelSerializer):
name = serializers.CharField(source='fullname', read_only=True)
class Meta:
model = User
fields = (
'id',
'name',
'email',
)
class IDUserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', )
class PartnerUserSerializer(UserSerializer):
partners = serializers.SerializerMethodField()
role = serializers.SerializerMethodField()
permissions = serializers.SerializerMethodField()
def _partner_member(self, user):
request = self.context.get('request')
if request and request.partner_member:
return request.partner_member
return user.partner_members.first()
def get_role(self, user):
return self._partner_member(user).get_role_display()
class Meta:
model = User
fields = UserSerializer.Meta.fields + (
'partners',
'role',
'permissions',
)
def get_partners(self, obj):
partner_ids = obj.get_partner_ids_i_can_access()
return PartnerSerializer(Partner.objects.filter(id__in=partner_ids), many=True).data
def get_permissions(self, user):
return [
p.name for p in self._partner_member(user).user_permissions
]
class UserFullnameSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'fullname', 'email', )
class PartnerMemberSerializer(serializers.ModelSerializer):
user = UserFullnameSerializer()
class Meta:
model = PartnerMember
fields = "__all__"
class CustomLoginSerializer(LoginSerializer):
def validate(self, attrs):
attrs = super(CustomLoginSerializer, self).validate(attrs)
user = attrs['user']
if user.is_partner_user:
if not user.partner_members.filter(partner__is_locked=False).exists():
raise serializers.ValidationError('Account is Locked')
return attrs
class CustomPasswordResetSerializer(PasswordResetSerializer):
password_reset_form_class = CustomPasswordResetForm
|
|
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import cPickle as pickle
from uuid import UUID
class HMoveOneInputRepeatedIndirectMatchDiffRulesLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HMoveOneInputRepeatedIndirectMatchDiffRulesLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HMoveOneInputRepeatedIndirectMatchDiffRulesLHS, self).__init__(name='HMoveOneInputRepeatedIndirectMatchDiffRulesLHS', num_nodes=5, edges=[])
# Add the edges
self.add_edges([(4, 0), (0, 3), (4, 1), (1, 2)])
# Set the graph attributes
self["mm__"] = pickle.loads("""(lp1
S'MT_pre__GM2AUTOSAR_MM'
p2
aS'MoTifRule'
p3
a.""")
self["MT_constraint__"] = pickle.loads("""Vif PreNode('9')['associationType'] == PreNode('10')['associationType']:\u000a return True\u000a\u000areturn False\u000a
p1
.""")
self["name"] = """"""
self["GUID__"] = UUID('86719168-f64c-454a-870d-ac7a081e98f5')
# Set the node attributes
self.vs[0]["MT_subtypeMatching__"] = False
self.vs[0]["MT_label__"] = """9"""
self.vs[0]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[0]["mm__"] = """MT_pre__indirectLink_S"""
self.vs[0]["MT_dirty__"] = False
self.vs[0]["GUID__"] = UUID('62b27716-d133-4135-8c85-02f502047f4f')
self.vs[1]["MT_subtypeMatching__"] = False
self.vs[1]["MT_label__"] = """10"""
self.vs[1]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[1]["mm__"] = """MT_pre__indirectLink_S"""
self.vs[1]["MT_dirty__"] = False
self.vs[1]["GUID__"] = UUID('2bac1f61-03cd-49cd-b8b8-e9ab86aaf2a2')
self.vs[2]["MT_pivotOut__"] = """element1"""
self.vs[2]["MT_pre__name"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[2]["MT_subtypeMatching__"] = True
self.vs[2]["MT_pre__classtype"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[2]["MT_pivotIn__"] = """element1"""
self.vs[2]["MT_label__"] = """3"""
self.vs[2]["MT_subtypes__"] = pickle.loads("""(lp1
S'MT_pre__VirtualDevice'
p2
aS'MT_pre__Distributable'
p3
aS'MT_pre__Signal'
p4
aS'MT_pre__ExecFrame'
p5
aS'MT_pre__ECU'
p6
a.""")
self.vs[2]["mm__"] = """MT_pre__MetaModelElement_S"""
self.vs[2]["MT_dirty__"] = False
self.vs[2]["MT_pre__cardinality"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[2]["GUID__"] = UUID('b3738c6c-94c0-495e-89b4-cf6e6729abe9')
self.vs[3]["MT_pivotOut__"] = """element2"""
self.vs[3]["MT_pre__name"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[3]["MT_subtypeMatching__"] = True
self.vs[3]["MT_pre__classtype"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[3]["MT_pivotIn__"] = """element2"""
self.vs[3]["MT_label__"] = """4"""
self.vs[3]["MT_subtypes__"] = pickle.loads("""(lp1
S'MT_pre__VirtualDevice'
p2
aS'MT_pre__Distributable'
p3
aS'MT_pre__Signal'
p4
aS'MT_pre__ExecFrame'
p5
aS'MT_pre__ECU'
p6
a.""")
self.vs[3]["mm__"] = """MT_pre__MetaModelElement_S"""
self.vs[3]["MT_dirty__"] = False
self.vs[3]["MT_pre__cardinality"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[3]["GUID__"] = UUID('b7780f52-7449-45f2-b308-b6332cdf60b1')
self.vs[4]["MT_pre__name"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[4]["MT_subtypeMatching__"] = True
self.vs[4]["MT_pre__classtype"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[4]["MT_label__"] = """5"""
self.vs[4]["MT_subtypes__"] = pickle.loads("""(lp1
S'MT_pre__VirtualDevice'
p2
aS'MT_pre__Distributable'
p3
aS'MT_pre__Signal'
p4
aS'MT_pre__ExecFrame'
p5
aS'MT_pre__ECU'
p6
a.""")
self.vs[4]["mm__"] = """MT_pre__MetaModelElement_S"""
self.vs[4]["MT_dirty__"] = False
self.vs[4]["MT_pre__cardinality"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[4]["GUID__"] = UUID('2a0aa472-1e42-4b26-aa21-a7c67abae717')
def eval_name3(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_classtype3(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_cardinality3(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_name4(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_classtype4(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_cardinality4(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_name5(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_classtype5(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_cardinality5(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
if PreNode('9')['associationType'] == PreNode('10')['associationType']:
return True
return False
|
|
from __future__ import (absolute_import, unicode_literals, division,
print_function)
import numpy as np
import os
from astropy.tests.helper import pytest
from ..io import read, write, split_numbers
from ..io import ref_mjd
from ..io import high_precision_keyword_read
from ..io import load_events_and_gtis
from ..io import read_header_key
import warnings
curdir = os.path.abspath(os.path.dirname(__file__))
datadir = os.path.join(curdir, 'data')
_H5PY_INSTALLED = True
try:
import h5py
except ImportError:
_H5PY_INSTALLED = False
skip_condition = pytest.mark.skipif(not _H5PY_INSTALLED,
reason = "H5PY not installed.")
class TestIO(object):
def test_common_name(self):
"""Test the common_name function."""
from ..io import common_name
a = 'A_3-50_A'
b = 'B_3-50_B'
assert common_name(a, b) == '3-50'
def test_high_precision_keyword(self):
"""Test high precision FITS keyword read."""
hdr = {"MJDTESTI": 100, "MJDTESTF": np.longdouble(0.5),
"CIAO": np.longdouble(0.)}
assert \
high_precision_keyword_read(hdr,
"MJDTEST") == np.longdouble(100.5), \
"Keyword MJDTEST read incorrectly"
assert \
high_precision_keyword_read(hdr,
"MJDTESTA") == np.longdouble(100.5), \
"Keyword MJDTESTA read incorrectly"
assert \
high_precision_keyword_read(hdr, "CIAO") == np.longdouble(0.), \
"Keyword CIAO read incorrectly"
assert high_precision_keyword_read(hdr, "BU") is None, \
"Inexistent key read incorrectly"
def test_event_file_read(self):
"""Test event file reading."""
fname = os.path.join(datadir, 'monol_testA.evt')
load_events_and_gtis(fname, additional_columns=["PI"])
def test_read_header_key(self):
"""Test event file reading."""
fname = os.path.join(datadir, 'monol_testA.evt')
assert read_header_key(fname, "INSTRUME") == 'FPMA'
assert read_header_key(fname, "BU") == ""
def test_read_mjdref(self):
"""Test event file reading."""
fname = os.path.join(datadir, 'monol_testA.evt')
assert ref_mjd(fname) is not None
def test_split_number(self):
"""Test split with high precision numbers."""
numbers = np.array([57401.0000003423423400453453,
0.00000574010000003426646], dtype = np.longdouble)
number_I, number_F = split_numbers(numbers)
r_numbers = np.longdouble(number_I) + np.longdouble(number_F)
assert (numbers == r_numbers).all()
class TestIOReadWrite(object):
"""A class to test all the read and write functions."""
def __init__(self):
self.number = 10
self.str = 'Test'
self.list = [1,2,3]
self.array = np.array([1,2,3])
self.long_number = np.longdouble(1.25)
self.long_array = np.longdouble([1,2,3])
def test_operation(self):
return self.number * 10
class TestFileFormats(object):
def test_pickle_read_write(self):
test_object = TestIOReadWrite()
write(test_object, filename='test.pickle', format_='pickle')
assert read('test.pickle', 'pickle') is not None
os.remove('test.pickle')
def test_pickle_attributes(self):
"""Test if pickle maintains class object attributes."""
test_object = TestIOReadWrite()
write(test_object, filename='test.pickle', format_='pickle')
rec_object = read('test.pickle', 'pickle')
assert rec_object.number == test_object.number
assert rec_object.str == test_object.str
assert rec_object.list == test_object.list
assert (rec_object.array == test_object.array).all()
assert rec_object.long_number == test_object.long_number
assert (rec_object.long_array == test_object.long_array).all()
os.remove('test.pickle')
def test_pickle_functions(self):
"""Test if pickle maintains class methods."""
test_object = TestIOReadWrite()
write(test_object,'test.pickle', 'pickle')
assert read('test.pickle', 'pickle').test_operation() == test_object.number * 10
os.remove('test.pickle')
@skip_condition
def test_hdf5_write(self):
test_object = TestIOReadWrite()
write(test_object, 'test.hdf5', 'hdf5')
os.remove('test.hdf5')
@skip_condition
def test_hdf5_read(self):
test_object = TestIOReadWrite()
write(test_object, 'test.hdf5', 'hdf5')
read('test.hdf5','hdf5')
os.remove('test.hdf5')
@skip_condition
def test_hdf5_data_recovery(self):
test_object = TestIOReadWrite()
write(test_object, 'test.hdf5', 'hdf5')
rec_object = read('test.hdf5','hdf5')
assert rec_object['number'] == test_object.number
assert rec_object['str'] == test_object.str
assert (rec_object['list'] == test_object.list).all()
assert (rec_object['array'] == np.array(test_object.array)).all()
assert rec_object['long_number'] == np.double(test_object.long_number)
assert (rec_object['long_array'] == np.double(np.array(test_object.long_array))).all()
os.remove('test.hdf5')
def test_save_ascii(self):
time = [1, 2, 3, 4]
counts = [2, 3, 41, 4]
write(np.array([time, counts]).T, "ascii_test.txt",
"ascii")
os.remove("ascii_test.txt")
def test_save_ascii_with_mixed_types(self):
time = ["bla", 1, 2, 3]
counts = [2,3,41,4]
with pytest.raises(Exception):
write(np.array([time, counts]).T,
"ascii_test.txt", "ascii")
def test_save_ascii_with_format(self):
time = ["bla", 1, 2, 3]
counts = [2,3,41,4]
write(np.array([time, counts]).T,
filename="ascii_test.txt", format_="ascii",
fmt=["%s", "%s"])
def test_read_ascii(self):
time = [1,2,3,4,5]
counts = [5,7,8,2,3]
np.savetxt("ascii_test.txt", np.array([time, counts]).T)
read("ascii_test.txt", "ascii")
os.remove("ascii_test.txt")
def test_fits_write(self):
test_object = TestIOReadWrite()
write(test_object, 'test.fits', 'fits')
os.remove('test.fits')
def test_fits_read(self):
test_object = TestIOReadWrite()
write(test_object, 'test.fits', 'fits')
read('test.fits','fits',cols=['array','number','long_number'])
os.remove('test.fits')
def test_fits_with_multiple_tables(self):
test_object = TestIOReadWrite()
write(test_object, 'test.fits', 'fits', tnames=['EVENTS', 'GTI'],
colsassign={'number':'GTI', 'array':'GTI'})
os.remove('test.fits')
def test_fits_data_recovery(self):
test_object = TestIOReadWrite()
write(test_object, 'test.fits', 'fits')
rec_object = read('test.fits', 'fits', cols = ['number', 'str', 'list',
'array','long_array','long_number'])
assert rec_object['NUMBER'] == test_object.number
assert rec_object['STR'] == test_object.str
assert (rec_object['LIST'] == test_object.list).all()
assert (rec_object['ARRAY'] == np.array(test_object.array)).all()
assert rec_object['LONG_NUMBER'] == np.double(test_object.long_number)
assert (rec_object['LONG_ARRAY'] == np.double(np.array(test_object.long_array))).all()
del rec_object
os.remove('test.fits')
def test_savefig_matplotlib_not_installed(self):
from ..io import savefig
try:
import matplotlib.pyplot as plt
except Exception as e:
lc = Lightcurve([1, 2, 3], [2, 2, 2])
try:
savefig("test.png")
except Exception as e:
assert type(e) is ImportError
assert str(e) == "Matplotlib required for savefig()"
def test_savefig_without_plot(self):
import matplotlib.pyplot as plt
from ..io import savefig
plt.close("all")
with warnings.catch_warnings(record=True) as w:
savefig('test.png')
assert "plot the image first" in str(w[0].message)
os.unlink('test.png')
def test_savefig(self):
import matplotlib.pyplot as plt
from ..io import savefig
plt.plot([1, 2, 3])
savefig("test.png")
os.unlink("test.png")
|
|
from __future__ import print_function
import random
import consts
import itertools
import heapq
class GenericWormBot:
def __init__(self, bot_id, initial_position=(0,0)):
self.bot_id = bot_id
x,y = initial_position
self.body_parts = [{'x':x, 'y':y}]
self.FAILED = False
self.FAILURE_REASON = "Hasn't Failed"
self.last_history = []
@classmethod
def new_instance(cls, bot_id, starting_position):
bot_id = "<{}>.{}".format(cls.__name__, bot_id)
return cls(bot_id, starting_position)
def act(self, game, bots):
t = self.think(game, bots)
print(t)
move_x, move_y = t
new_head_x = self.body_parts[consts.HEAD]['x'] + move_x
new_head_y = self.body_parts[consts.HEAD]['y'] + move_y
self.body_parts.insert(consts.HEAD, {'x':new_head_x, 'y':new_head_y})
def delete_tail(self):
cell = self.body_parts[-1]
del self.body_parts[-1]
return cell
def think(self, game, bots):
raise NotImplementedError
@property
def head(self):
return self.body_parts[consts.HEAD]
def _single_collision(self, part):
head = self.head
return head['x'] == part['x'] and head['y'] == part['y']
def _collision_helper(self, body):
return any([self._single_collision(part) for part in body])
def self_collision(self):
return self._collision_helper(self.body_parts[1:])
def other_collision(self, bots):
for bot in bots:
if bot.bot_id == self.bot_id:
continue
if self._collision_helper(bot.body_parts):
return True
return False
def failed(self, reason):
self.FAILED = True
self.FAILURE_REASON = reason
def bad_move(self, new_coords, game=None):
if game:
if new_coords[0] < 0 or new_coords[1] < 0:
return True
if new_coords[0] > game.right_edge or new_coords[1] > game.bottom_edge:
return True
if len(self.body_parts) > 1:
for part in self.body_parts[1:]:
if part['x'] == new_coords[0] and part['y'] == new_coords[1]:
return True
return False
class PriorityQueue:
def __init__(self):
self.items = []
def push(self, item):
heapq.heappush(self.items, item)
def push_many(self, items):
for item in items:
self.push(item)
def pop(self):
return heapq.heappop(self.items)
def not_empty(self):
return len(self.items) > 0
class AwesomeBot(GenericWormBot):
def calc_dist(self, coord1, coord2):
## coord1 = (x1,y1)
## coord2 = (x2,y2)
xsq = (coord1[0]-coord2[0])**2
ysq = (coord1[1]-coord2[1])**2
return (xsq + ysq)**0.5
def apply_move(self, move, xy):
## move = (-1,0)
## xy = (curx, cury)
new_coord = (move[0]+xy[0], move[1]+xy[1])
return new_coord
def apply_moves(self, xy, foodxy, game):
all_moves = []
for move_name, move_value in consts.MOVES.items():
new_coord = self.apply_move(move_value, xy)
move_dist = self.calc_dist(new_coord, foodxy)
if not self.bad_move(new_coord, game):
all_moves.append((move_dist, new_coord, move_value))
return all_moves
def apply_moves_bare(self, xy, game):
all_moves = []
for move_name, move_value in consts.MOVES.items():
new_coord = self.apply_move(move_value, xy)
if not self.bad_move(new_coord, game):
all_moves.append((new_coord, move_value))
return all_moves
def think(self, game, bots):
n = len(self.last_history)
# this version makes it crazy
#if n > 0 and n % 20 != 0:
if n > 0:
return self.last_history.pop()
head = self.body_parts[consts.HEAD]
curxy = (head['x'], head['y'])
move = astar(game, self)
return move
def astar(game, bot):
foodxy = (game.food['x'], game.food['y'])
head = bot.body_parts[consts.HEAD]
curxy = (head['x'], head['y'])
starting_point = (0, 0,curxy, None, 0)
frontier = PriorityQueue()
frontier.push(starting_point)
came_from = dict()
graveyard = set()
best_move = None
while frontier.not_empty():
astarval, move_dist, next_move, move_value, move_num = frontier.pop()
if next_move == foodxy:
best_move = (astarval, move_dist, next_move, move_value, move_num)
break
moves = bot.apply_moves(next_move, foodxy, game)
for move in moves:
move = move + (move_num+1, )
move_coord = move[1]
next_astarval = move[0] + move[-1]
move = (next_astarval, ) + move
if move_coord not in graveyard:
came_from[move] = (astarval, move_dist, next_move, move_value, move_num)
frontier.push(move)
graveyard.add(move_coord)
if best_move is None:
return astar_dontdie(game, bot)
bot.last_version = "normal"
#astarval, move_dist, next_move, move_value, move_num = \
get_best(bot, best_move, came_from, pos=3)
return bot.last_history.pop()
def astar_dontdie(game, bot):
print('in dont die')
head = bot.body_parts[consts.HEAD]
curxy = (head['x'], head['y'])
butt = bot.body_parts[-1]
buttxy = (butt['x'], butt['y'])
starting_point = (0, curxy, None)
frontier = PriorityQueue()
frontier.push(starting_point)
came_from = dict()
graveyard = set()
best_move = None
while frontier.not_empty():
move_num, next_move, move_value = frontier.pop()
moves = bot.apply_moves_bare(next_move, game)
for move in moves:
move = (move_num-1, move[0], move[1])
move_coord = move[1]
if move_coord not in graveyard:
came_from[move] = (move_num, next_move, move_value)
frontier.push(move)
graveyard.add(move_coord)
if best_move == None:
print("this is none")
if len(came_from) == 0:
print('we are screwed and dead')
return list(consts.MOVES.values())[1]
best_move = min(came_from.items(), key=lambda x: x[0][0])[1]
bot.last_version = "dontdie"
get_best(bot, best_move, came_from, -1, 2)
return bot.last_history.pop()
def get_best(bot, best_move, came_from, mod=1, pos=1):
bot.last_history = []
justincase = 0
print(pos)
bot.last_history.append(best_move[pos])
while mod*came_from[best_move][0] > 0 and justincase < 10**4:
best_move = came_from[best_move]
justincase += 1
bot.last_history.append(best_move[pos])
|
|
import datetime
import logging
from django.core.mail import EmailMultiAlternatives
from django.conf import settings
from django.db.models import Q
from django.template.loader import render_to_string
from django.utils import timezone
from celery import shared_task
import mandrill
import pytz
from .helpers import get_domain_name
from .models import Team, Membership
logger = logging.getLogger('put')
@shared_task
def schedule_reminders():
"""
Schedule sending of reminders to each member in an active `Team`.
A team is considered active if the length of its `days_sent` field is greater
than 0 and the team has at least 1 active member.
Sends email reminders the day before each day in `Team.days_sent` field.
"""
has_days_sent_data = ~Q(digest_days_sent__len=0)
active_teams = (
Team.objects.filter(has_days_sent_data,
memberships__is_active__gt=0,
is_active=True)
.distinct()
)
for team in active_teams:
ph_tz = pytz.timezone('Asia/Manila')
today = timezone.now().astimezone(ph_tz)
if today.weekday() in team.digest_days_sent:
reminders_eta = today.replace(
hour=team.send_reminders_at.hour,
minute=team.send_reminders_at.minute,
)
send_reminders.apply_async(
(team.id,),
eta=reminders_eta,
)
@shared_task
def remind_team_member(membership_id, previous_todos=None, previous_blockers=None):
"""
Sends an individual reminder to a user.
Includes TODOs and blockers if provided.
"""
try:
membership = Membership.objects.get(id=membership_id, is_active=True)
except Membership.DoesNotExist:
logger.error(
"Active Membership with %s ID does not exist." % membership_id)
return
subject = 'What did you get done today?'
from_email = 'Digestus Reminder <{email}>'.format(email=membership.team.email)
recipient = [
'{name} <{email}>'.format(name=membership.user.get_full_name(),
email=membership.user.email)
]
context = {
'team_email': membership.team.email,
'team_name': membership.team.name,
'previous_todos': previous_todos,
'previous_blockers': previous_blockers,
'domain': get_domain_name(),
}
text_body = render_to_string('updates/emails/reminder.txt', context)
email_msg = EmailMultiAlternatives(
subject=subject,
body=text_body,
from_email=from_email,
to=recipient,
)
email_msg.subaccount = membership.team.subaccount_id
try:
email_msg.send()
except Exception as e:
logger.exception('Failed to send team member reminder. Retrying in 5 minutes.')
remind_team_member.retry(
args=[membership_id, previous_todos, previous_blockers],
exc=e,
countdown=300,
max_retries=5,
)
@shared_task
def send_reminders(team_id):
"""
Sends reminder emails to all members of the team if:
1. The team has one or more active members
2. The team has a valid Mandrill subaccount
"""
try:
team = Team.objects.get(id=team_id, is_active=True)
except Team.DoesNotExist:
logger.error(
"Active team with %s ID does not exist." % team_id)
return
# Team should have members
if not team.memberships.filter(is_active=True):
logger.error(
"Active team %s has no active members. Sending of reminders aborted" % team.name)
return
# Team should have a valid Mandrill subaccount
mc = mandrill.Mandrill(settings.MANDRILL_API_KEY)
try:
mc.subaccounts.info(id=team.subaccount_id)
except Exception:
logger.exception(
"Active team %s has an invalid subaccount. Sending of reminders aborted" % team.name)
return
today = timezone.now()
for membership in team.memberships.filter(is_active=True):
update = membership.updates.filter(
for_date__year=today.year,
for_date__month=today.month,
for_date__day=today.day,
).first()
if update and (update.will_do or update.blocker):
remind_team_member.delay(
membership.id,
update.will_do_as_list(),
update.blocker_as_list(),
)
else:
remind_team_member.delay(membership.id)
@shared_task
def schedule_digest():
"""
Schedule sending of digests to all active members and silent recipients in an active `Team`.
"""
has_days_sent_data = ~Q(digest_days_sent__len=0)
active_teams = (
Team.objects.filter(has_days_sent_data,
is_active=True,
memberships__is_active__gt=0)
.distinct()
)
for team in active_teams:
ph_tz = pytz.timezone('Asia/Manila')
today = timezone.now().astimezone(ph_tz)
if today.weekday() in team.digest_days_sent:
digest_eta = today.replace(
hour=team.send_digest_at.hour,
minute=team.send_digest_at.minute,
)
send_digest.apply_async(
(team.id, digest_eta.astimezone(pytz.UTC)),
eta=digest_eta,
)
# TODO: test for project managers early updates
# Send digest an hour before to Project Managers
pm_digest_eta = digest_eta - datetime.timedelta(hours=1)
send_digest.apply_async(
(team.id, digest_eta.astimezone(pytz.UTC), True),
eta=pm_digest_eta,
)
@shared_task
def send_digest(team_id, for_date, for_project_managers=False):
"""
Sends digest for the given date to all active members and silent
recipients of the team.
Arguments:
`team`: `Team` object
`for_date`: A `datetime.datetime` instance in UTC
`for_project_managers`: Boolean; whether to send only to Project Manager members
"""
# TODO: create decorator for this repeating pattern: try...except
try:
team = Team.objects.get(id=team_id, is_active=True)
except Team.DoesNotExist:
logger.exception(
"Active team with %s ID does not exist." % team_id)
return
team_updates = team.get_updates(for_date)
if team_updates:
ph_tz = pytz.timezone('Asia/Manila')
update_for_date = for_date.astimezone(ph_tz).strftime('%a, %b %d %Y')
context = {
'members_and_updates': team.get_updates(for_date),
'team': team,
'date': update_for_date,
'domain': get_domain_name(),
}
text_body = render_to_string('updates/emails/digest.txt', context)
html_body = render_to_string('updates/emails/digest.html', context)
# Prepare email
from_email = 'Digestus Digest <{email}>'.format(email=team.email)
subject = 'Digest for {team} for {date}'.format(team=team.name,
date=update_for_date)
msg = EmailMultiAlternatives(
subject=subject,
body=text_body,
from_email=from_email,
to=team.get_recipients(for_project_managers),
)
msg.auto_text = True
msg.preserve_recipients = True
msg.auto_text = False
msg.auto_html = False
msg.attach_alternative(html_body, 'text/html')
msg.content_subtype = 'html'
msg.subaccount = team.subaccount_id
try:
msg.send()
except Exception as e:
logger.exception(
'Digest sending failed for team with ID: %s. Retrying in 5 minutes.' % team_id)
send_digest.retry(
args=[team_id, for_date, for_project_managers],
exc=e,
countdown=300,
max_retries=5,
)
else:
error_msg = 'Team %s has no active members. Sending of digest aborted.' % team.name
logger.error(error_msg)
@shared_task
def wrong_email_format_reply(inbound_email, from_email, email_text):
subject = "FORMAT ERROR!!"
context = {
'email_text': email_text
}
text_body = render_to_string('updates/emails/auto_reply.txt', context)
auto_reply = EmailMultiAlternatives(
subject=subject,
body=text_body,
from_email=inbound_email,
to=[from_email, ]
)
auto_reply.send()
|
|
"""Library of Region objects I use in my research"""
from aospy.region import Region
china_west = Region(
name='china_west',
description='Western China',
lat_bounds=(35, 45),
lon_bounds=(75, 100),
do_land_mask=False
)
china_east = Region(
name='china_east',
description='Eastern China',
lat_bounds=(22, 32),
lon_bounds=(105, 120),
do_land_mask=True
)
globe = Region(
name='globe',
description='Entire globe',
lat_bounds=(-90, 90),
lon_bounds=(0, 360),
do_land_mask=False
)
land = Region(
name='land',
description='Land',
lat_bounds=(-90, 90),
lon_bounds=(0, 360),
do_land_mask=True
)
ocean = Region(
name='ocean',
description='Ocean',
lat_bounds=(-90, 90),
lon_bounds=(0, 360),
do_land_mask='ocean'
)
nh = Region(
name='nh',
description='Northern hemisphere',
lat_bounds=(0, 90),
lon_bounds=(0, 360),
do_land_mask=False
)
sh = Region(
name='sh',
description='Southern hemisphere',
lat_bounds=(-90, 0),
lon_bounds=(0, 360),
do_land_mask=False
)
eh = Region(
name='eh',
description='Eastern hemisphere',
lat_bounds=(-90, 90),
lon_bounds=(0, 180),
do_land_mask=False
)
wh = Region(
name='wh',
description='Western hemisphere',
lat_bounds=(-90, 90),
lon_bounds=(180, 360),
do_land_mask=False
)
tropics = Region(
name='tropics',
description='Tropics (30S-30N)',
lat_bounds=(-30, 30),
lon_bounds=(0, 360),
do_land_mask=False
)
trop_land = Region(
name='tropics_land',
description='All land 30S-30N',
lat_bounds=(-30, 30),
lon_bounds=(0, 360),
do_land_mask=True
)
trop_ocean = Region(
description='All ocean 30S-30N',
name='tropics_ocean',
lat_bounds=(-30, 30),
lon_bounds=(0, 360),
do_land_mask='ocean'
)
deep_tropics = Region(
name='deep_tropics',
description='Deep tropics (10S-10N)',
lat_bounds=(-10, 10),
lon_bounds=(0, 360),
do_land_mask=False
)
atlantic = Region(
name='atlantic',
description='Atlantic Ocean',
do_land_mask='ocean',
# atlantic.mask_bounds=[((-90, 90), (0, 25)), ((-90, 90), (290, 360)),
# # Atlantic 1
# ((xlat(j) ge -90. and (xlon(i) gt 290. or xlon(i) lt 25.)) or $
# (xlat(j) gt 0. and xlat(j) lt 20. and ((xlon(i)+xlat(j)) gt 290.)) or $
# (xlat(j) le 65. and xlat(j) gt 15 and (xlon(i) gt 260. or xlon(i) lt 50.)) or $
# (xlat(j) gt 65.))
# # Atlantic 2
# ((xlat(j) ge -90. and (xlon(i) gt 290. or xlon(i) lt 25.)) or $
# (xlat(j) gt 0. and xlat(j) lt 20. and ((xlon(i)+xlat(j)) gt 290.)) or $
# (xlat(j) le 65. and xlat(j) gt 15 and (xlon(i) gt 260. or xlon(i) lt 50.)) or $
# (xlat(j) gt 65.))
# # Indian
# (xlon(i) le 100.5 or (xlon(i) gt 100.5 and xlon(i) lt 128.5 $
# and (28.*(xlat(j)+14.5)+14.*(xlon(i)-100.5)) le 14.*28.) $
# or (xlon(i) lt 145.5 and xlat(j) lt -29.5))
)
sahel = Region(
name='sahel',
description='African Sahel',
mask_bounds=[((10, 20), (0, 40)), ((10, 20), (342, 360))],
do_land_mask=True
)
sahel2 = Region(
name='sahel2',
description='African Sahel w/ longitude bounds 15W-30E',
mask_bounds=[((10, 20), (0, 30)), ((10, 20), (345, 360))],
do_land_mask=True
)
sahel3 = Region(
name='sahel3',
description=('Western part of African Sahel. Used by some to '
'specify the whole Sahel.'),
mask_bounds=[((10, 20), (0, 10)), ((10, 20), (340, 360))],
do_land_mask=False
)
sahel_north = Region(
name='sahel_north',
description='Northern half of African Sahel',
mask_bounds=[((15, 20), (0, 40)), ((15, 20), (342, 360))],
do_land_mask=True
)
sahel_south = Region(
name='sahel_south',
description='Southern half of African Sahel',
mask_bounds=[((10, 15), (0, 40)), ((10, 15), (342, 360))],
do_land_mask=True
)
sahel_west = Region(
name='sahel_west',
description='Western half of African Sahel',
mask_bounds=[((10, 20), (0, 11)), ((10, 20), (342, 360))],
do_land_mask=True
)
sahel_east = Region(
name='sahel_east',
description='Eastern half of African Sahel',
lat_bounds=(10, 20),
lon_bounds=(11, 40),
do_land_mask=True
)
sahara = Region(
name='sahara',
description='African Sahara, as defined by Biasutti et al 2009',
mask_bounds=[((20, 30), (0, 35)), ((20, 30), (350, 360))],
do_land_mask=True
)
ind_monsoon = Region(
name='ind_monsoon',
description='Indian monsoon',
lat_bounds=(10, 30),
lon_bounds=(60, 100),
do_land_mask=False
)
warm_pool = Region(
name='warm_pool',
description='Indo-Pacific warm pool. Ocean mask',
lat_bounds=(-20, 20),
lon_bounds=(60, 180),
do_land_mask='ocean'
)
wpwp = Region(
name='wpwp',
description='West Pacific Warm Pool',
lat_bounds=(-5, 5),
lon_bounds=(80, 160),
do_land_mask=False
)
epac = Region(
name='epac',
description='East Pacific cold tongue',
lat_bounds=(-5, 5),
lon_bounds=(200, 280),
do_land_mask=False
)
epac_watl = Region(
name='epac_watl',
description='East Pacific and West Atlantic, including C. and S. America',
lat_bounds=(0, 15),
lon_bounds=(240, 300),
do_land_mask=False
)
epac_itcz = Region(
name='epac_itcz',
description='East Pacific ITCZ for NH summer',
lat_bounds=(0, 20),
lon_bounds=(180, 250),
do_land_mask=False
)
atl_itcz = Region(
name='atl_itcz',
description='Atlantic ITCZ for NH summer',
lat_bounds=(0, 20),
lon_bounds=(300, 345),
do_land_mask=False
)
burls_wpac = Region(
name='burls_wpac',
description='Equatorial W. Pacific region used by Burls and Fedorov 2014',
lat_bounds=(-8, 8),
lon_bounds=(130, 205),
do_land_mask=False
)
burls_epac = Region(
name='burls_epac',
description='Equatorial E. Pacific region used by Burls and Fedorov 2014',
lat_bounds=(-8, 8),
lon_bounds=(205, 280),
do_land_mask=False
)
burls_pac = Region(
name='burls_pac',
description='Pacific region used by Burls and Fedorov 2014',
mask_bounds=[(( 15, 65), (100, 260)),
(( 10, 15), (100, 275)),
(( -5, 10), (100, 290)),
((-65, -5), (130, 290))],
do_land_mask='strict_ocean'
)
burls_trop_pac = Region(
name='burls_trop_pac',
description='Tropical Pacific region used by Burls and Fedorov 2014',
mask_bounds=[(( -5, 8), (100, 290)),
(( -8, -5), (130, 290))],
do_land_mask='strict_ocean'
)
burls_ext_pac = Region(
name='burls_ext_pac',
description='Extratropical Pacific region used by Burls and Fedorov 2014',
mask_bounds=[(( 15, 65), (100, 260)),
(( 10, 15), (100, 275)),
(( 8, 10), (100, 290)),
((-65, -8), (130, 290))],
do_land_mask='strict_ocean'
)
nino1_2 = Region(
name='nino1_2',
description='Standard Nino 1+2 regions of equatorial E. Pacific',
lat_bounds=(-10, 0),
lon_bounds=(270, 280),
do_land_mask=False
)
nino3 = Region(
name='nino3',
description='Standard Nino 3 region of equatorial E. Pacific',
lat_bounds=(-5, 5),
lon_bounds=(210, 270),
do_land_mask=False
)
nino3_4 = Region(
name='nino3.4',
description='Standard Nino 3.4 region of equatorial E. Pacific',
lat_bounds=(-5, 5),
lon_bounds=(190, 240),
do_land_mask=False
)
nino4 = Region(
name='nino4',
description='Standard Nino 4 region of equatorial E. Pacific',
lat_bounds=(-5, 5),
lon_bounds=(160, 210),
do_land_mask=False
)
cld_seed_np = Region(
name='cld_seed_np',
description='North Pacific region of Hill & Ming 2012 GRL cloud brightening geoengineering study',
lat_bounds=(10, 30),
lon_bounds=(204, 244),
do_land_mask='ocean'
)
cld_seed_sp = Region(
name='cld_seed_sp',
description='South Pacific region of Hill & Ming 2012 GRL cloud brightening geoengineering study',
lat_bounds=(-30, -5),
lon_bounds=(240, 285),
do_land_mask='ocean'
)
cld_seed_sa = Region(
name='cld_seed_sa',
description='South Atlantic region of Hill & Ming 2012 GRL cloud brightening geoengineering study',
mask_bounds=[((-30, 5), (0, 12)),
((-30, 5), (342, 360))],
do_land_mask='ocean'
)
cld_seed_all = Region(
name='cld_seed_all',
description='All 3 regions from Hill & Ming 2012 GRL',
mask_bounds=[((-30, 5), (0, 12)),
((-30, 5), (342, 360)),
((-30, -5), (240, 285)),
((10, 30), (204, 244))],
do_land_mask='ocean'
)
east_asia_monsoon = Region(
name='east_asia_monsoon',
description='East Asian Monsoon land region',
lat_bounds=(22.5, 40),
lon_bounds=(100, 122.5),
do_land_mask=False
)
extrop = Region(
name='extratropics',
description='Extratropics (poleward of 30S/N)',
mask_bounds=[((-90, -30), (0, 360)),
((30, 90), (0, 360))],
do_land_mask=False
)
nh_tropics = Region(
name='nh_tropics',
description='Northern hemisphere tropics: 0-30N',
lat_bounds=(0, 30),
lon_bounds=(0, 360),
do_land_mask=False
)
sh_tropics = Region(
name='sh_tropics',
description='Southern hemisphere tropics: 30S-0',
lat_bounds=(-30, 0),
lon_bounds=(0, 360),
do_land_mask=False
)
nh_land = Region(
name='nh_land',
description='Northern hemisphere land',
lat_bounds=(0, 90),
lon_bounds=(0, 360),
do_land_mask=True
)
nh_ocean = Region(
name='nh_ocean',
description='Northern hemisphere ocean',
lat_bounds=(0, 90),
lon_bounds=(0, 360),
do_land_mask='ocean'
)
sh_land = Region(
name='sh_land',
description='Southern hemisphere land',
lat_bounds=(-90, 0),
lon_bounds=(0, 360),
do_land_mask=True
)
sh_ocean = Region(
name='sh_ocean',
description='Southern hemisphere ocean',
lat_bounds=(-90, 0),
lon_bounds=(0, 360),
do_land_mask='ocean'
)
extratrop_land = Region(
name='extratrop_land',
description='Extratropical (poleward of 30S/N) land',
mask_bounds=[((-90, -30), (0, 360)),
((30, 90), (0, 360))],
do_land_mask=True
)
extratrop_ocean = Region(
name='extratrop_ocean',
description='Extratropical (poleward of 30S/N) ocean',
mask_bounds=[((-90, -30), (0, 360)),
((30, 90), (0, 360))],
do_land_mask='ocean'
)
nh_extratrop = Region(
name='nh_extratrop',
description='Northern hemisphere extratropics (30-90N)',
lat_bounds=(30, 90),
lon_bounds=(0, 360),
do_land_mask=False
)
sh_extratrop = Region(
name='sh_extratrop',
description='Southern hemisphere extratropics (90S-30S)',
lat_bounds=(-90, -30),
lon_bounds=(0, 360),
do_land_mask=False
)
|
|
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
#
# Sandesh Session
#
import socket
from vr_nldump.pysandesh.transport import TTransport
from vr_nldump.pysandesh.protocol import TXMLProtocol
from vr_nldump.pysandesh.work_queue import WorkQueue
from vr_nldump.pysandesh.tcp_session import TcpSession
from vr_nldump.pysandesh.sandesh_logger import SandeshLogger
_XML_SANDESH_OPEN = '<sandesh length="0000000000">'
_XML_SANDESH_OPEN_ATTR_LEN = '<sandesh length="'
_XML_SANDESH_OPEN_END = '">'
_XML_SANDESH_CLOSE = '</sandesh>'
class SandeshReader(object):
_READ_OK = 0
_READ_ERR = -1
def __init__(self, session, sandesh_msg_handler):
self._session = session
self._sandesh_instance = session.sandesh_instance()
self._sandesh_msg_handler = sandesh_msg_handler
self._read_buf = ''
self._sandesh_len = 0
self._logger = session._logger
# end __init__
# Public functions
def read_msg(self, rcv_buf):
self._read_buf += rcv_buf
while True:
(ret, sandesh) = self._extract_sandesh()
if ret < 0:
self._logger.error('Failed to extract sandesh')
return self._READ_ERR
if not sandesh:
# read more data
self._logger.debug(
'Not enough data to extract sandesh. Read more data.')
break
# Call sandesh message handler
self._sandesh_msg_handler(self._session, sandesh)
self._read_buf = self._read_buf[self._sandesh_len:]
self._sandesh_len = 0
if not len(self._read_buf):
break
return self._READ_OK
# end read_msg
@staticmethod
def extract_sandesh_header(sandesh_xml):
transport = TTransport.TMemoryBuffer(sandesh_xml)
protocol_factory = TXMLProtocol.TXMLProtocolFactory()
protocol = protocol_factory.getProtocol(transport)
from gen_py.sandesh.ttypes import SandeshHeader
hdr = SandeshHeader()
hdr_len = hdr.read(protocol)
if hdr_len == -1:
return (None, 0, None)
# Extract the sandesh name
(length, sandesh_name) = protocol.readSandeshBegin()
if length == -1:
return (hdr, hdr_len, None)
return (hdr, hdr_len, sandesh_name)
# end extract_sandesh_header
# Private functions
def _extract_sandesh(self):
if not self._sandesh_len:
(ret, length) = self._extract_sandesh_len()
if ret < 0:
return (self._READ_ERR, None)
elif not length:
return (self._READ_OK, None)
self._sandesh_len = length
if len(self._read_buf) < self._sandesh_len:
return (self._READ_OK, None)
# Sanity check
sandesh_close_tag = self._read_buf[
self._sandesh_len - len(_XML_SANDESH_CLOSE):self._sandesh_len]
if sandesh_close_tag != _XML_SANDESH_CLOSE:
return (self._READ_ERR, None)
# Extract sandesh
sandesh_begin = len(_XML_SANDESH_OPEN)
sandesh_end = self._sandesh_len - len(_XML_SANDESH_CLOSE)
sandesh = self._read_buf[sandesh_begin:sandesh_end]
return (self._READ_OK, sandesh)
# end _extract_sandesh
def _extract_sandesh_len(self):
# Do we have enough data to extract the sandesh length?
if len(self._read_buf) < len(_XML_SANDESH_OPEN):
self._logger.debug('Not enough data to extract sandesh length')
return (self._READ_OK, 0)
# Sanity checks
if self._read_buf[:len(_XML_SANDESH_OPEN_ATTR_LEN)] != \
_XML_SANDESH_OPEN_ATTR_LEN:
return (self._READ_ERR, 0)
if self._read_buf[len(_XML_SANDESH_OPEN) - len(_XML_SANDESH_OPEN_END):
len(_XML_SANDESH_OPEN)] != _XML_SANDESH_OPEN_END:
return (self._READ_ERR, 0)
len_str = self._read_buf[len(_XML_SANDESH_OPEN_ATTR_LEN):
len(_XML_SANDESH_OPEN) -
len(_XML_SANDESH_OPEN_END)]
try:
length = int(len_str)
except ValueError:
self._logger.error(
'Invalid sandesh length [%s] in the received message' %
(len_str))
return (self._READ_ERR, 0)
self._logger.debug('Extracted sandesh length: %s' % (len_str))
return (self._READ_OK, length)
# end _extract_sandesh_len
# end class SandeshReader
class SandeshWriter(object):
_MAX_SEND_BUF_SIZE = 4096
def __init__(self, session):
self._session = session
self._sandesh_instance = session.sandesh_instance()
self._send_buf_cache = ''
self._logger = session._logger
# end __init__
# Public functions
@staticmethod
def encode_sandesh(sandesh):
transport = TTransport.TMemoryBuffer()
protocol_factory = TXMLProtocol.TXMLProtocolFactory()
protocol = protocol_factory.getProtocol(transport)
from gen_py.sandesh.ttypes import SandeshHeader
sandesh_hdr = SandeshHeader(sandesh.scope(),
sandesh.timestamp(),
sandesh.module(),
sandesh.source_id(),
sandesh.context(),
sandesh.seqnum(),
sandesh.versionsig(),
sandesh.type(),
sandesh.hints(),
sandesh.level(),
sandesh.category(),
sandesh.node_type(),
sandesh.instance_id())
# write the sandesh header
if sandesh_hdr.write(protocol) < 0:
print 'Error in encoding sandesh header'
return None
# write the sandesh
if sandesh.write(protocol) < 0:
print 'Error in encoding sandesh'
return None
# get the message
msg = transport.getvalue()
# calculate the message length
msg_len = len(_XML_SANDESH_OPEN) + len(msg) + len(_XML_SANDESH_CLOSE)
len_width = len(_XML_SANDESH_OPEN) - \
(len(_XML_SANDESH_OPEN_ATTR_LEN) + len(_XML_SANDESH_OPEN_END))
# pad the length with leading 0s
len_str = (str(msg_len)).zfill(len_width)
encoded_buf = _XML_SANDESH_OPEN_ATTR_LEN + len_str + \
_XML_SANDESH_OPEN_END + msg + _XML_SANDESH_CLOSE
return encoded_buf
# end encode_sandesh
def send_msg(self, sandesh, more):
send_buf = self.encode_sandesh(sandesh)
if send_buf is None:
self._logger.error('Failed to send sandesh')
return -1
# update sandesh tx stats
sandesh_name = sandesh.__class__.__name__
self._sandesh_instance.stats().update_stats(
sandesh_name, len(send_buf), True)
if more:
self._send_msg_more(send_buf)
else:
self._send_msg_all(send_buf)
return 0
# end send_msg
# Private functions
def _send_msg_more(self, send_buf):
self._send_buf_cache += send_buf
if len(self._send_buf_cache) >= self._MAX_SEND_BUF_SIZE:
# send the message
self._send(self._send_buf_cache)
# reset the cache
self._send_buf_cache = ''
# end _send_msg_more
def _send_msg_all(self, send_buf):
# send the message
self._send(self._send_buf_cache + send_buf)
# reset the cache
self._send_buf_cache = ''
# end _send_msg_all
def _send(self, send_buf):
if self._session.write(send_buf) < 0:
self._logger.error('Error sending message')
# end _send
# end class SandeshWriter
class SandeshSession(TcpSession):
_KEEPALIVE_IDLE_TIME = 45 # in secs
_KEEPALIVE_INTERVAL = 3 # in secs
_KEEPALIVE_PROBES = 5
def __init__(self, sandesh_instance, server, event_handler,
sandesh_msg_handler):
self._sandesh_instance = sandesh_instance
self._logger = sandesh_instance._logger
self._event_handler = event_handler
self._reader = SandeshReader(self, sandesh_msg_handler)
self._writer = SandeshWriter(self)
self._send_queue = WorkQueue(self._send_sandesh,
self._is_ready_to_send_sandesh)
TcpSession.__init__(self, server)
# end __init__
# Public functions
def sandesh_instance(self):
return self._sandesh_instance
# end sandesh_instance
def is_send_queue_empty(self):
return self._send_queue.is_queue_empty()
# end is_send_queue_empty
def is_connected(self):
return self._connected
# end is_connected
def enqueue_sandesh(self, sandesh):
self._send_queue.enqueue(sandesh)
# end enqueue_sandesh
def send_queue(self):
return self._send_queue
# end send_queue
# Overloaded functions from TcpSession
def connect(self):
TcpSession.connect(self, timeout=5)
# end connect
def _on_read(self, buf):
if self._reader.read_msg(buf) < 0:
self._logger.error('SandeshReader Error. Close Collector session')
self.close()
# end _on_read
def _handle_event(self, event):
self._event_handler(self, event)
# end _handle_event
def _set_socket_options(self):
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
if hasattr(socket, 'TCP_KEEPIDLE'):
self._socket.setsockopt(
socket.IPPROTO_TCP, socket.TCP_KEEPIDLE,
self._KEEPALIVE_IDLE_TIME)
if hasattr(socket, 'TCP_KEEPALIVE'):
self._socket.setsockopt(
socket.IPPROTO_TCP, socket.TCP_KEEPALIVE,
self._KEEPALIVE_IDLE_TIME)
if hasattr(socket, 'TCP_KEEPINTVL'):
self._socket.setsockopt(
socket.IPPROTO_TCP, socket.TCP_KEEPINTVL,
self._KEEPALIVE_INTERVAL)
if hasattr(socket, 'TCP_KEEPCNT'):
self._socket.setsockopt(
socket.IPPROTO_TCP, socket.TCP_KEEPCNT, self._KEEPALIVE_PROBES)
# end _set_socket_options
# Private functions
def _send_sandesh(self, sandesh):
if self._send_queue.is_queue_empty():
more = False
else:
more = True
if not self._connected:
if self._sandesh_instance.is_logging_dropped_allowed(sandesh):
self._logger.error(
"SANDESH: %s: %s" % ("Not connected", sandesh.log()))
return
if sandesh.is_logging_allowed(self._sandesh_instance):
self._logger.log(
SandeshLogger.get_py_logger_level(sandesh.level()),
sandesh.log())
self._writer.send_msg(sandesh, more)
# end _send_sandesh
def _is_ready_to_send_sandesh(self):
return self._sandesh_instance.is_send_queue_enabled()
# end _is_ready_to_send_sandesh
# end class SandeshSession
|
|
"""Module containing wrappers for multiprocessing"""
__author__ = 'Robert Meyer', 'Mehmet Nevvaf Timur'
from threading import ThreadError
import queue
import pickle
try:
import zmq
except ImportError:
zmq = None
from collections import deque
import copy as cp
import gc
import sys
from threading import Thread
import time
import os
import socket
import pypet.pypetconstants as pypetconstants
from pypet.pypetlogging import HasLogger
from pypet.utils.decorators import retry
from pypet.utils.helpful_functions import is_ipv6
class MultiprocWrapper(object):
"""Abstract class definition of a Wrapper.
Note that only storing is required, loading is optional.
ABSTRACT: Needs to be defined in subclass
"""
@property
def is_open(self):
""" Normally the file is opened and closed after each insertion.
However, the storage service may provide to keep the store open and signals
this via this property.
"""
return False
@property
def multiproc_safe(self):
"""This wrapper guarantees multiprocessing safety"""
return True
def store(self, *args, **kwargs):
raise NotImplementedError('Implement this!')
class ZMQServer(HasLogger):
""" Generic zmq server """
PING = 'PING' # for connection testing
PONG = 'PONG' # for connection testing
DONE = 'DONE' # signals stopping of server
CLOSED = 'CLOSED' # signals closing of server
def __init__(self, url="tcp://127.0.0.1:7777"):
self._url = url # server url
self._set_logger()
self._context = None
self._socket = None
def _start(self):
self._logger.info('Starting Server at `%s`' % self._url)
self._context = zmq.Context()
self._socket = self._context.socket(zmq.REP)
self._socket.ipv6 = is_ipv6(self._url)
self._socket.bind(self._url)
def _close(self):
self._logger.info('Closing Server')
self._socket.close()
self._context.term()
class LockerServer(ZMQServer):
""" Manages a database of locks """
LOCK = 'LOCK' # command for locking a lock
RELEASE_ERROR = 'RELEASE_ERROR' # signals unsuccessful attempt to unlock
MSG_ERROR = 'MSG_ERROR' # signals error in decoding client request
UNLOCK = 'UNLOCK' # command for unlocking a lock
RELEASED = 'RELEASED' # signals successful unlocking
LOCK_ERROR = 'LOCK_ERROR' # signals unsuccessful attempt to lock
GO = 'GO' # signals successful locking and and allwos continuing of client
WAIT = 'WAIT' # signals lock is already in use and client has to wait for release
DELIMITER = ':::' # delimiter to split messages
DEFAULT_LOCK = '_DEFAULT_' # default lock name
def __init__(self, url="tcp://127.0.0.1:7777"):
super(LockerServer, self).__init__(url)
self._locks = {} # lock DB, format 'lock_name': ('client_id', 'request_id')
def _pre_respond_hook(self, response):
""" Hook that can be used to temper with the server before responding
:param response: Response to be send
:return: Boolean value if response should be send or not
"""
return True
def _lock(self, name, client_id, request_id):
"""Hanldes locking of locks
If a lock is already locked sends a WAIT command,
else LOCKs it and sends GO.
Complains if a given client re-locks a lock without releasing it before.
"""
if name in self._locks:
other_client_id, other_request_id = self._locks[name]
if other_client_id == client_id:
response = (self.LOCK_ERROR + self.DELIMITER +
'Re-request of lock `%s` (old request id `%s`) by `%s` '
'(request id `%s`)' % (name, client_id, other_request_id, request_id))
self._logger.warning(response)
return response
else:
return self.WAIT
else:
self._locks[name] = (client_id, request_id)
return self.GO
def _unlock(self, name, client_id, request_id):
"""Handles unlocking
Complains if a non-existent lock should be released or
if a lock should be released that was acquired by
another client before.
"""
if name in self._locks:
other_client_id, other_request_id = self._locks[name]
if other_client_id != client_id:
response = (self.RELEASE_ERROR + self.DELIMITER +
'Lock `%s` was acquired by `%s` (old request id `%s`) and not by '
'`%s` (request id `%s`)' % (name,
other_client_id,
other_request_id,
client_id,
request_id))
self._logger.error(response)
return response
else:
del self._locks[name]
return self.RELEASED
else:
response = (self.RELEASE_ERROR + self.DELIMITER +
'Lock `%s` cannot be found in database (client id `%s`, '
'request id `%s`)' % (name, client_id, request_id))
self._logger.error(response)
return response
def run(self):
"""Runs server"""
try:
self._start()
running = True
while running:
msg = ''
name = ''
client_id = ''
request_id = ''
request = self._socket.recv_string()
self._logger.log(1, 'Recevied REQ `%s`', request)
split_msg = request.split(self.DELIMITER)
if len(split_msg) == 4:
msg, name, client_id, request_id = split_msg
if msg == self.LOCK:
response = self._lock(name, client_id, request_id)
elif msg == self.UNLOCK:
response = self._unlock(name, client_id, request_id)
elif msg == self.PING:
response = self.PONG
elif msg == self.DONE:
response = self.CLOSED
running = False
else:
response = (self.MSG_ERROR + self.DELIMITER +
'Request `%s` not understood '
'(or wrong number of delimiters)' % request)
self._logger.error(response)
respond = self._pre_respond_hook(response)
if respond:
self._logger.log(1, 'Sending REP `%s` to `%s` (request id `%s`)',
response, client_id, request_id)
self._socket.send_string(response)
# Close everything in the end
self._close()
except Exception:
self._logger.exception('Crashed Lock Server!')
raise
class TimeOutLockerServer(LockerServer):
""" Lock Server where each lock is valid only for a fixed period of time. """
def __init__(self, url, timeout):
super(TimeOutLockerServer, self).__init__(url)
self._timeout = timeout
self._timeout_locks = {}
def _lock(self, name, client_id, request_id):
"""Handles locking
Locking time is stored to determine time out.
If a lock is timed out it can be acquired by a different client.
"""
if name in self._locks:
other_client_id, other_request_id, lock_time = self._locks[name]
if other_client_id == client_id:
response = (self.LOCK_ERROR + self.DELIMITER +
'Re-request of lock `%s` (old request id `%s`) by `%s` '
'(request id `%s`)' % (name, client_id, other_request_id, request_id))
self._logger.warning(response)
return response
else:
current_time = time.time()
if current_time - lock_time < self._timeout:
return self.WAIT
else:
response = (self.GO + self.DELIMITER + 'Lock `%s` by `%s` (old request id `%s) '
'timed out' % (name,
other_client_id,
other_request_id))
self._logger.info(response)
self._locks[name] = (client_id, request_id, time.time())
self._timeout_locks[(name, other_client_id)] = (request_id, lock_time)
return response
else:
self._locks[name] = (client_id, request_id, time.time())
return self.GO
def _unlock(self, name, client_id, request_id):
"""Handles unlocking"""
if name in self._locks:
other_client_id, other_request_id, lock_time = self._locks[name]
if other_client_id != client_id:
response = (self.RELEASE_ERROR + self.DELIMITER +
'Lock `%s` was acquired by `%s` (old request id `%s`) and not by '
'`%s` (request id `%s`)' % (name,
other_client_id,
other_request_id,
client_id,
request_id))
self._logger.error(response)
return response
else:
del self._locks[name]
return self.RELEASED
elif (name, client_id) in self._timeout_locks:
other_request_id, lock_time = self._timeout_locks[(name, client_id)]
timeout = time.time() - lock_time - self._timeout
response = (self.RELEASE_ERROR + self.DELIMITER +
'Lock `%s` timed out %f seconds ago (client id `%s`, '
'old request id `%s`)' % (name, timeout, client_id, other_request_id))
return response
else:
response = (self.RELEASE_ERROR + self.DELIMITER +
'Lock `%s` cannot be found in database (client id `%s`, '
'request id `%s`)' % (name, client_id, request_id))
self._logger.warning(response)
return response
class ReliableClient(HasLogger):
"""Implements a reliable client that reconnects on server failure"""
SLEEP = 0.01 # Sleep time before reconnect in seconds
RETRIES = 9 # Number of reconnect retries
TIMEOUT = 2222 # Waiting time to reconnect in seconds
def __init__(self, url):
self.url = url
self._context = None
self._socket = None
self._poll = None
self._set_logger()
def __getstate__(self):
result_dict = super(ReliableClient, self).__getstate__()
# Do not pickle zmq data
result_dict['_context'] = None
result_dict['_socket'] = None
result_dict['_poll'] = None
return result_dict
def send_done(self):
"""Notifies the Server to shutdown"""
self.start(test_connection=False)
self._logger.debug('Sending shutdown signal')
self._req_rep(ZMQServer.DONE)
def test_ping(self):
"""Connection test"""
self.start(test_connection=False)
response = self._req_rep(ZMQServer.PING)
if response != ZMQServer.PONG:
raise RuntimeError('Connection Error to LockServer')
def finalize(self):
"""Closes socket and terminates context
NO-OP if already closed.
"""
if self._context is not None:
if self._socket is not None:
self._close_socket(confused=False)
self._context.term()
self._context = None
self._poll = None
def start(self, test_connection=True):
"""Starts connection to server if not existent.
NO-OP if connection is already established.
Makes ping-pong test as well if desired.
"""
if self._context is None:
self._logger.debug('Starting Client')
self._context = zmq.Context()
self._poll = zmq.Poller()
self._start_socket()
if test_connection:
self.test_ping()
def _start_socket(self):
self._socket = self._context.socket(zmq.REQ)
self._socket.ipv6 = is_ipv6(self.url)
self._socket.connect(self.url)
self._poll.register(self._socket, zmq.POLLIN)
def _close_socket(self, confused=False):
if confused:
self._socket.setsockopt(zmq.LINGER, 0)
self._socket.close()
self._poll.unregister(self._socket)
self._socket = None
def __del__(self):
# For Python 3.4 to avoid dead-lock due to wrong object clearing
# i.e. deleting context before socket
self.finalize()
def _req_rep(self, request):
"""Returns server response on `request_sketch`"""
return self._req_rep_retry(request)[0]
def _req_rep_retry(self, request):
"""Returns response and number of retries"""
retries_left = self.RETRIES
while retries_left:
self._logger.log(1, 'Sending REQ `%s`', request)
self._send_request(request)
socks = dict(self._poll.poll(self.TIMEOUT))
if socks.get(self._socket) == zmq.POLLIN:
response = self._receive_response()
self._logger.log(1, 'Received REP `%s`', response)
return response, self.RETRIES - retries_left
else:
self._logger.debug('No response from server (%d retries left)' %
retries_left)
self._close_socket(confused=True)
retries_left -= 1
if retries_left == 0:
raise RuntimeError('Server seems to be offline!')
time.sleep(self.SLEEP)
self._start_socket()
def _send_request(self, request):
"""Actual sending of the request over network"""
self._socket.send_string(request)
def _receive_response(self):
"""Actual receiving of response"""
return self._socket.recv_string()
class LockerClient(ReliableClient):
""" Implements a Lock by requesting lock information from LockServer"""
def __init__(self, url='tcp://127.0.0.1:7777', lock_name=LockerServer.DEFAULT_LOCK):
super(LockerClient, self).__init__(url)
self.lock_name = lock_name
self.id = None
def __getstate__(self):
result_dict = super(LockerClient, self).__getstate__()
result_dict['id'] = None
return result_dict
def start(self, test_connection=True):
if self._context is None:
self.id = self._get_id()
cls = self.__class__
self._set_logger('%s.%s_%s' % (cls.__module__, cls.__name__, self.id))
super(LockerClient, self).start(test_connection)
@staticmethod
def _get_id():
return socket.getfqdn().replace(LockerServer.DELIMITER, '-') + '__' + str(os.getpid())
@staticmethod
def _get_request_id():
return str(time.time()).replace(LockerServer.DELIMITER, '-')
def _compose_request(self, request_sketch):
request = (request_sketch + LockerServer.DELIMITER +
self.lock_name + LockerServer.DELIMITER + self.id +
LockerServer.DELIMITER + self._get_request_id())
return request
def acquire(self):
"""Acquires lock and returns `True`
Blocks until lock is available.
"""
self.start(test_connection=False)
while True:
str_response, retries = self._req_rep_retry(LockerServer.LOCK)
response = str_response.split(LockerServer.DELIMITER)
if response[0] == LockerServer.GO:
return True
elif response[0] == LockerServer.LOCK_ERROR and retries > 0:
# Message was sent but Server response was lost and we tried again
self._logger.error(str_response + '; Probably due to retry')
return True
elif response[0] == LockerServer.WAIT:
time.sleep(self.SLEEP)
else:
raise RuntimeError('Response `%s` not understood' % response)
def release(self):
"""Releases lock"""
# self.start(test_connection=False)
str_response, retries = self._req_rep_retry(LockerServer.UNLOCK)
response = str_response.split(LockerServer.DELIMITER)
if response[0] == LockerServer.RELEASED:
pass # Everything is fine
elif response[0] == LockerServer.RELEASE_ERROR and retries > 0:
# Message was sent but Server response was lost and we tried again
self._logger.error(str_response + '; Probably due to retry')
else:
raise RuntimeError('Response `%s` not understood' % response)
def _req_rep_retry(self, request):
request = self._compose_request(request)
return super(LockerClient, self)._req_rep_retry(request)
class QueuingServerMessageListener(ZMQServer):
""" Manages the listening requests"""
SPACE = 'SPACE' # for space in the queue
DATA = 'DATA' # for sending data
SPACE_AVAILABLE = 'SPACE_AVAILABLE'
SPACE_NOT_AVAILABLE = 'SPACE_NOT_AVAILABLE'
STORING = 'STORING'
def __init__(self, url, queue, queue_maxsize):
super(QueuingServerMessageListener, self).__init__(url)
self.queue = queue
if queue_maxsize == 0:
queue_maxsize = float('inf')
self.queue_maxsize = queue_maxsize
def listen(self):
""" Handles listening requests from the client.
There are 4 types of requests:
1- Check space in the queue
2- Tests the socket
3- If there is a space, it sends data
4- after data is sent, puts it to queue for storing
"""
count = 0
self._start()
while True:
result = self._socket.recv_pyobj()
if isinstance(result, tuple):
request, data = result
else:
request = result
data = None
if request == self.SPACE:
if self.queue.qsize() + count < self.queue_maxsize:
self._socket.send_string(self.SPACE_AVAILABLE)
count += 1
else:
self._socket.send_string(self.SPACE_NOT_AVAILABLE)
elif request == self.PING:
self._socket.send_string(self.PONG)
elif request == self.DATA:
self._socket.send_string(self.STORING)
self.queue.put(data)
count -= 1
elif request == self.DONE:
self._socket.send_string(ZMQServer.CLOSED)
self.queue.put(('DONE', [], {}))
self._close()
break
else:
raise RuntimeError('I did not understand your request %s' % request)
class QueuingServer(HasLogger):
""" Implements server architecture for Queueing"""
def __init__(self, url, storage_service, queue_maxsize, gc_interval):
self._url = url
self._storage_service = storage_service
self._queue_maxsize = queue_maxsize
self._gc_interval = gc_interval
def run(self):
main_queue = queue.Queue(maxsize=self._queue_maxsize)
server_message_listener = QueuingServerMessageListener(self._url, main_queue, self._queue_maxsize)
storage_writer = QueueStorageServiceWriter(self._storage_service, main_queue, self._gc_interval)
server_queue = Thread(target=server_message_listener.listen, args=())
server_queue.start()
storage_writer.run()
server_queue.join()
class QueuingClient(ReliableClient):
""" Manages the returning requests"""
def put(self, data, block=True):
""" If there is space it sends data to server
If no space in the queue
It returns the request in every 10 millisecond
until there will be space in the queue.
"""
self.start(test_connection=False)
while True:
response = self._req_rep(QueuingServerMessageListener.SPACE)
if response == QueuingServerMessageListener.SPACE_AVAILABLE:
self._req_rep((QueuingServerMessageListener.DATA, data))
break
else:
time.sleep(0.01)
def _send_request(self, request):
return self._socket.send_pyobj(request)
class ForkDetector(HasLogger):
def _detect_fork(self):
"""Detects if lock client was forked.
Forking is detected by comparing the PID of the current
process with the stored PID.
"""
if self._pid is None:
self._pid = os.getpid()
if self._context is not None:
current_pid = os.getpid()
if current_pid != self._pid:
self._logger.debug('Fork detected: My pid `%s` != os pid `%s`. '
'Restarting connection.' % (str(self._pid), str(current_pid)))
self._context = None
self._pid = current_pid
class ForkAwareQueuingClient(QueuingClient, ForkDetector):
""" Queuing Client can detect forking of process.
In this case the context and socket are restarted.
"""
def __init__(self, url='tcp://127.0.0.1:22334'):
super(ForkAwareQueuingClient, self).__init__(url)
self._pid = None
def __getstate__(self):
result_dict = super(ForkAwareQueuingClient, self).__getstate__()
result_dict['_pid'] = None
return result_dict
def start(self, test_connection=True):
self._detect_fork()
super(ForkAwareQueuingClient, self).start(test_connection)
class ForkAwareLockerClient(LockerClient, ForkDetector):
"""Locker Client that can detect forking of processes.
In this case the context and socket are restarted.
"""
def __init__(self, url='tcp://127.0.0.1:7777', lock_name=LockerServer.DEFAULT_LOCK):
super(ForkAwareLockerClient, self).__init__(url, lock_name)
self._pid = None
def __getstate__(self):
result_dict = super(ForkAwareLockerClient, self).__getstate__()
result_dict['_pid'] = None
return result_dict
def start(self, test_connection=True):
"""Checks for forking and starts/restarts if desired"""
self._detect_fork()
super(ForkAwareLockerClient, self).start(test_connection)
class QueueStorageServiceSender(MultiprocWrapper, HasLogger):
""" For multiprocessing with :const:`~pypet.pypetconstants.WRAP_MODE_QUEUE`, replaces the
original storage service.
All storage requests are send over a queue to the process running the
:class:`~pypet.storageservice.QdebugueueStorageServiceWriter`.
Does not support loading of data!
"""
def __init__(self, storage_queue=None):
self.queue = storage_queue
self.pickle_queue = True
self._set_logger()
def __getstate__(self):
result = super(QueueStorageServiceSender, self).__getstate__()
if not self.pickle_queue:
result['queue'] = None
return result
def load(self, *args, **kwargs):
raise NotImplementedError('Queue wrapping does not support loading. If you want to '
'load data in a multiprocessing environment, use a Lock '
'wrapping.')
@retry(9, Exception, 0.01, 'pypet.retry')
def _put_on_queue(self, to_put):
"""Puts data on queue"""
old = self.pickle_queue
self.pickle_queue = False
try:
self.queue.put(to_put, block=True)
finally:
self.pickle_queue = old
def store(self, *args, **kwargs):
"""Puts data to store on queue.
Note that the queue will no longer be pickled if the Sender is pickled.
"""
self._put_on_queue(('STORE', args, kwargs))
def send_done(self):
"""Signals the writer that it can stop listening to the queue"""
self._put_on_queue(('DONE', [], {}))
class LockAcquisition(HasLogger):
"""Abstract class to allow lock acquisition and release.
Assumes that implementing classes have a ``lock``, ``is_locked`` and
``is_open`` attribute.
Requires a ``_logger`` for error messaging.
"""
@retry(9, TypeError, 0.01, 'pypet.retry')
def acquire_lock(self):
if not self.is_locked:
self.is_locked = self.lock.acquire()
@retry(9, TypeError, 0.01, 'pypet.retry')
def release_lock(self):
if self.is_locked and not self.is_open:
try:
self.lock.release()
except (ValueError, ThreadError):
self._logger.exception('Could not release lock, '
'probably has been released already!')
self.is_locked = False
class PipeStorageServiceSender(MultiprocWrapper, LockAcquisition):
def __init__(self, storage_connection=None, lock=None):
self.conn = storage_connection
self.lock = lock
self.is_locked = False
self._set_logger()
def __getstate__(self):
# result = super(PipeStorageServiceSender, self).__getstate__()
result = self.__dict__.copy()
result['conn'] = None
result['lock'] = None
return result
def load(self, *args, **kwargs):
raise NotImplementedError('Pipe wrapping does not support loading. If you want to '
'load data in a multiprocessing environment, use the Lock '
'wrapping.')
@retry(9, Exception, 0.01, 'pypet.retry')
def _put_on_pipe(self, to_put):
"""Puts data on queue"""
self.acquire_lock()
self._send_chunks(to_put)
self.release_lock()
def _make_chunk_iterator(self, to_chunk, chunksize):
return (to_chunk[i:i + chunksize] for i in range(0, len(to_chunk), chunksize))
def _send_chunks(self, to_put):
put_dump = pickle.dumps(to_put)
data_size = sys.getsizeof(put_dump)
nchunks = data_size / 20000000. # chunks with size 20 MB
chunksize = int(len(put_dump) / nchunks)
chunk_iterator = self._make_chunk_iterator(put_dump, chunksize)
for chunk in chunk_iterator:
# print('S: sending False')
self.conn.send(False)
# print('S: sent False')
# print('S: sending chunk')
self.conn.send_bytes(chunk)
# print('S: sent chunk %s' % chunk[0:10])
# print('S: recv signal')
self.conn.recv() # wait for signal that message was received
# print('S: read signal')
# print('S: sending True')
self.conn.send(True)
# print('S: sent True')
# print('S: recving last signal')
self.conn.recv() # wait for signal that message was received
# print('S: read last signal')
# print('S; DONE SENDING data')
def store(self, *args, **kwargs):
"""Puts data to store on queue.
Note that the queue will no longer be pickled if the Sender is pickled.
"""
self._put_on_pipe(('STORE', args, kwargs))
def send_done(self):
"""Signals the writer that it can stop listening to the queue"""
self._put_on_pipe(('DONE', [], {}))
class StorageServiceDataHandler(HasLogger):
"""Class that can store data via a storage service, needs to be sub-classed to receive data"""
def __init__(self, storage_service, gc_interval=None):
self._storage_service = storage_service
self._trajectory_name = ''
self.gc_interval = gc_interval
self.operation_counter = 0
self._set_logger()
def __repr__(self):
return '<%s wrapping Storage Service %s>' % (self.__class__.__name__,
repr(self._storage_service))
def _open_file(self):
self._storage_service.store(pypetconstants.OPEN_FILE, None,
trajectory_name=self._trajectory_name)
self._logger.info('Opened the hdf5 file.')
def _close_file(self):
self._storage_service.store(pypetconstants.CLOSE_FILE, None)
self._logger.info('Closed the hdf5 file.')
def _check_and_collect_garbage(self):
if self.gc_interval and self.operation_counter % self.gc_interval == 0:
collected = gc.collect()
self._logger.debug('Garbage Collection: Found %d unreachable items.' % collected)
self.operation_counter += 1
def _handle_data(self, msg, args, kwargs):
"""Handles data and returns `True` or `False` if everything is done."""
stop = False
try:
if msg == 'DONE':
stop = True
elif msg == 'STORE':
if 'msg' in kwargs:
store_msg = kwargs.pop('msg')
else:
store_msg = args[0]
args = args[1:]
if 'stuff_to_store' in kwargs:
stuff_to_store = kwargs.pop('stuff_to_store')
else:
stuff_to_store = args[0]
args = args[1:]
trajectory_name = kwargs['trajectory_name']
if self._trajectory_name != trajectory_name:
if self._storage_service.is_open:
self._close_file()
self._trajectory_name = trajectory_name
self._open_file()
self._storage_service.store(store_msg, stuff_to_store, *args, **kwargs)
self._storage_service.store(pypetconstants.FLUSH, None)
self._check_and_collect_garbage()
else:
raise RuntimeError('You queued something that was not '
'intended to be queued. I did not understand message '
'`%s`.' % msg)
except Exception:
self._logger.exception('ERROR occurred during storing!')
time.sleep(0.01)
pass # We don't want to kill the queue process in case of an error
return stop
def run(self):
"""Starts listening to the queue."""
try:
while True:
msg, args, kwargs = self._receive_data()
stop = self._handle_data(msg, args, kwargs)
if stop:
break
finally:
if self._storage_service.is_open:
self._close_file()
self._trajectory_name = ''
def _receive_data(self):
raise NotImplementedError('Implement this!')
class QueueStorageServiceWriter(StorageServiceDataHandler):
"""Wrapper class that listens to the queue and stores queue items via the storage service."""
def __init__(self, storage_service, storage_queue, gc_interval=None):
super(QueueStorageServiceWriter, self).__init__(storage_service, gc_interval=gc_interval)
self.queue = storage_queue
@retry(9, Exception, 0.01, 'pypet.retry')
def _receive_data(self):
"""Gets data from queue"""
result = self.queue.get(block=True)
if hasattr(self.queue, 'task_done'):
self.queue.task_done()
return result
class PipeStorageServiceWriter(StorageServiceDataHandler):
"""Wrapper class that listens to the queue and stores queue items via the storage service."""
def __init__(self, storage_service, storage_connection, max_buffer_size=10, gc_interval=None):
super(PipeStorageServiceWriter, self).__init__(storage_service, gc_interval=gc_interval)
self.conn = storage_connection
if max_buffer_size == 0:
# no maximum buffer size
max_buffer_size = float('inf')
self.max_size = max_buffer_size
self._buffer = deque()
self._set_logger()
def _read_chunks(self):
chunks = []
stop = False
while not stop:
# print('W: recving stop')
stop = self.conn.recv()
# print('W: read stop = %s' % str(stop))
if not stop:
# print('W: recving chunk')
chunk = self.conn.recv_bytes()
chunks.append(chunk)
# print('W: read chunk')
# print('W: sending True')
self.conn.send(True)
# print('W: sent True')
# print('W: reconstructing data')
to_load = b''.join(chunks)
del chunks # free unnecessary memory
try:
data = pickle.loads(to_load)
except Exception:
# We don't want to crash the storage service if reconstruction
# due to errors fails
self._logger.exception('Could not reconstruct pickled data.')
data = None
return data
@retry(9, Exception, 0.01, 'pypet.retry')
def _receive_data(self):
"""Gets data from pipe"""
while True:
while len(self._buffer) < self.max_size and self.conn.poll():
data = self._read_chunks()
if data is not None:
self._buffer.append(data)
if len(self._buffer) > 0:
return self._buffer.popleft()
class LockWrapper(MultiprocWrapper, LockAcquisition):
"""For multiprocessing in :const:`~pypet.pypetconstants.WRAP_MODE_LOCK` mode,
augments a storage service with a lock.
The lock is acquired before storage or loading and released afterwards.
"""
def __init__(self, storage_service, lock=None):
self._storage_service = storage_service
self.lock = lock
self.is_locked = False
self.pickle_lock = True
self._set_logger()
def __getstate__(self):
result = super(LockWrapper, self).__getstate__()
if not self.pickle_lock:
result['lock'] = None
return result
def __repr__(self):
return '<%s wrapping Storage Service %s>' % (self.__class__.__name__,
repr(self._storage_service))
@property
def is_open(self):
""" Normally the file is opened and closed after each insertion.
However, the storage service may provide the option to keep the store open and signals
this via this property.
"""
return self._storage_service.is_open
@property
def multiproc_safe(self):
"""Usually storage services are not supposed to be multiprocessing safe"""
return True
def store(self, *args, **kwargs):
"""Acquires a lock before storage and releases it afterwards."""
try:
self.acquire_lock()
return self._storage_service.store(*args, **kwargs)
finally:
if self.lock is not None:
try:
self.release_lock()
except RuntimeError:
self._logger.error('Could not release lock `%s`!' % str(self.lock))
def __del__(self):
# In order to prevent a dead-lock in case of error,
# we release the lock once again
self.release_lock()
def load(self, *args, **kwargs):
"""Acquires a lock before loading and releases it afterwards."""
try:
self.acquire_lock()
return self._storage_service.load(*args, **kwargs)
finally:
if self.lock is not None:
try:
self.release_lock()
except RuntimeError:
self._logger.error('Could not release lock `%s`!' % str(self.lock))
class ReferenceWrapper(MultiprocWrapper):
"""Wrapper that just keeps references to data to be stored."""
def __init__(self):
self.references = {}
def store(self, msg, stuff_to_store, *args, **kwargs):
"""Simply keeps a reference to the stored data """
trajectory_name = kwargs['trajectory_name']
if trajectory_name not in self.references:
self.references[trajectory_name] = []
self.references[trajectory_name].append((msg, cp.copy(stuff_to_store), args, kwargs))
def load(self, *args, **kwargs):
"""Not implemented"""
raise NotImplementedError('Reference wrapping does not support loading. If you want to '
'load data in a multiprocessing environment, use a Lock '
'wrapping.')
def free_references(self):
self.references = {}
class ReferenceStore(HasLogger):
"""Class that can store references"""
def __init__(self, storage_service, gc_interval=None):
self._storage_service = storage_service
self.gc_interval = gc_interval
self.operation_counter = 0
self._set_logger()
def _check_and_collect_garbage(self):
if self.gc_interval and self.operation_counter % self.gc_interval == 0:
collected = gc.collect()
self._logger.debug('Garbage Collection: Found %d unreachable items.' % collected)
self.operation_counter += 1
def store_references(self, references):
"""Stores references to disk and may collect garbage."""
for trajectory_name in references:
self._storage_service.store(pypetconstants.LIST, references[trajectory_name], trajectory_name=trajectory_name)
self._check_and_collect_garbage()
|
|
#!/usr/bin/env python3
import functools
import re
import requests
from . import error, types, utils
class BaseAPIHandler(object):
"""Base API handler
Example:
>>> h = haiker.api.BaseAPIHandler(
... haiker.OAuth('MyConsumerKey', 'MyConsumerSecret',
... 'MyAccessToken', 'MyAccessTokenSecret'),
... 'http://h.hatena.ne.jp/api',
... haiker.utils.user_agent()
... )
>>> statuses = h.get('/statuses/public_timeline.json', {'count': 3})
>>> len(statuses)
3
>>> statuses[0]['created_at']
'2010-01-02T03:04:05Z'
>>> statuses[0]['user']['url']
'http://h.hatena.ne.jp/xxxxxx/'
"""
def __init__(self, auth, root, user_agent):
super().__init__()
self.auth = auth
self.root = root
self.user_agent = user_agent
def _request(self, method, path, params=None, data=None, files=None):
if re.search('[^a-zA-Z0-9./\\-_]|\\.\\.|//', path) is not None:
raise ValueError('suspicious path: {0!r}'.format(path))
url = self.root.rstrip('/') + '/' + path.lstrip('/')
headers = {'User-Agent': self.user_agent}
res = method(url, headers=headers, auth=self.auth,
params=utils.build_params(params),
data=utils.build_params(data),
files=files)
res.raise_for_status()
return res.json()
def get(self, path, params=None):
return self._request(requests.get, path, params=params)
def post(self, path, params=None, data=None, files=None):
return self._request(requests.post, path,
params=params, data=data, files=files)
class Haiker(object):
"""Handler for the Hatena Haiku RESTful API
Example:
>>> import haiker
>>> auth = haiker.OAuth('MyConsumerKey', 'MyConsumerSecret',
... 'MyAccessToken', 'MyAccessTokenSecret')
>>> api = haiker.Haiker(auth)
>>> statuses = api.public_timeline(count=3, body_formats=['haiku'])
>>> len(statuses)
3
>>> status = statuses[0]
>>> status.created_at
datetime.datetime(2010, 1, 2, 3, 4, 5, tzinfo=datetime.timezone.utc)
>>> status.user.url
'http://h.hatena.ne.jp/xxxxxx/'
"""
@error.HaikerError.replace
def __init__(self, auth=None, *,
user_agent=utils.user_agent(),
root='http://h.hatena.ne.jp/api'):
"""auth is used when calling API. It is required to be
None, a haiker.BasicAuth object or a haiker.OAuth object.
"""
super().__init__()
self._handler = BaseAPIHandler(auth, root, user_agent)
@property
@error.HaikerError.replace
def auth(self):
return self._handler.auth
@auth.setter
@error.HaikerError.replace
def auth(self, value):
self._handler.auth = value
# Timeline APIs
@error.HaikerError.replace
def public_timeline(self, *, body_formats=None, count=None, page=None,
since=None):
"""statuses/public_timeline"""
params = utils.removed_dict(locals(), {'self'})
path = '/statuses/public_timeline.json'
res = self._handler.get(path, params)
return types.list_of(types.Status)(res)
@error.HaikerError.replace
def keyword_timeline(self, word, *, count=None, page=None, since=None,
body_formats=None, sort=None):
"""statuses/keyword_timeline"""
params = utils.removed_dict(locals(), {'self'})
path = '/statuses/keyword_timeline.json'
res = self._handler.get(path, params)
return types.list_of(types.Status)(res)
@error.HaikerError.replace
def user_timeline(self, url_name=None, *, body_formats=None, count=None,
page=None, since=None, media=None, sort=None):
"""statuses/user_timeline"""
params = utils.removed_dict(locals(), {'self', 'url_name'})
if url_name is None:
path = '/statuses/user_timeline.json'
else:
path = '/statuses/user_timeline/{0}.json'.format(url_name)
res = self._handler.get(path, params)
return types.list_of(types.Status)(res)
@error.HaikerError.replace
def friends_timeline(self, url_name=None, *, count=None, page=None,
since=None, body_formats=None):
"""statuses/friends_timeline"""
params = utils.removed_dict(locals(), {'self', 'url_name'})
if url_name is None:
path = '/statuses/friends_timeline.json'
else:
path = '/statuses/friends_timeline/{0}.json'.format(url_name)
res = self._handler.get(path, params)
return types.list_of(types.Status)(res)
@error.HaikerError.replace
def album(self, *, body_formats=None, count=None, page=None,
since=None, sort=None, word=None):
"""statuses/album"""
params = utils.removed_dict(locals(), {'self'})
path = '/statuses/album.json'
res = self._handler.get(path, params)
return types.list_of(types.Status)(res)
# Entry and star APIs
@error.HaikerError.replace
def update_status(self, keyword, status, *, in_reply_to_status_id=None,
source=None, files=None, body_formats=None):
"""statuses/update"""
data = utils.removed_dict(locals(), {'self', 'files'})
if files is not None:
files = [('file', f) for f in files]
path = '/statuses/update.json'
res = self._handler.post(path, data=data, files=files)
return types.Status(res)
@error.HaikerError.replace
def show_status(self, eid, *, body_formats=None):
"""statuses/show"""
params = utils.removed_dict(locals(), {'self', 'eid'})
path = '/statuses/show/{0}.json'.format(eid)
res = self._handler.get(path, params)
return types.Status(res)
@error.HaikerError.replace
def delete_status(self, eid, author_url_name, *, body_formats=None):
"""statuses/destroy"""
params = utils.removed_dict(locals(), {'self', 'eid'})
path = '/statuses/destroy/{0}.json'.format(eid)
res = self._handler.post(path, params)
return types.Status(res)
@error.HaikerError.replace
def add_star(self, eid, *, body_formats=None):
"""favorites/create"""
params = utils.removed_dict(locals(), {'self', 'eid'})
path = '/favorites/create/{0}.json'.format(eid)
res = self._handler.post(path, params)
return types.Status(res)
@error.HaikerError.replace
def remove_star(self, eid, *, body_formats=None):
"""favorites/destroy"""
params = utils.removed_dict(locals(), {'self', 'eid'})
path = '/favorites/destroy/{0}.json'.format(eid)
res = self._handler.post(path, params)
return types.Status(res)
# User and keyword APIs
@error.HaikerError.replace
def show_user(self, url_name=None):
"""friendships/show"""
if url_name is None:
path = '/friendships/show.json'
else:
path = '/friendships/show/{0}.json'.format(url_name)
res = self._handler.get(path)
return types.User(res)
@error.HaikerError.replace
def show_keyword(self, word, *, without_related_keywords=None):
"""keywords/show"""
params = utils.removed_dict(locals(), {'self'})
path = '/keywords/show.json'
res = self._handler.get(path, params)
return types.Keyword(res)
@error.HaikerError.replace
def hot_keywords(self, *, without_related_keywords=None):
"""keywords/hot"""
params = utils.removed_dict(locals(), {'self'})
path = '/keywords/hot.json'
res = self._handler.get(path, params)
return types.list_of(types.Keyword)(res)
@error.HaikerError.replace
def keyword_list(self, *, page=None, without_related_keywords=None,
word=None):
"""keywords/list"""
params = utils.removed_dict(locals(), {'self'})
path = '/keywords/list.json'
res = self._handler.get(path, params)
return types.list_of(types.Keyword)(res)
@error.HaikerError.replace
def associate_keywords(self, word1, word2, *,
without_related_keywords=None):
"""keywords/relation/create"""
params = utils.removed_dict(locals(), {'self'})
path = '/keywords/relation/create.json'
res = self._handler.post(path, params)
return types.Keyword(res)
@error.HaikerError.replace
def dissociate_keywords(self, word1, word2, *,
without_related_keywords=None):
"""keywords/relation/destroy"""
params = utils.removed_dict(locals(), {'self'})
path = '/keywords/relation/destroy.json'
res = self._handler.post(path, params)
return types.Keyword(res)
# Favorite APIs
@error.HaikerError.replace
def friends(self, url_name=None, *, page=None):
"""statuses/friends"""
params = utils.removed_dict(locals(), {'self', 'url_name'})
if url_name is None:
path = '/statuses/friends.json'
else:
path = '/statuses/friends/{0}.json'.format(url_name)
res = self._handler.get(path, params)
return types.list_of(types.User)
@error.HaikerError.replace
def followers(self, url_name=None, *, page=None):
"""statuses/followers"""
params = utils.removed_dict(locals(), {'self', 'url_name'})
if url_name is None:
path = '/statuses/followers.json'
else:
path = '/statuses/followers/{0}.json'.format(url_name)
res = self._handler.get(path, params)
return types.list_of(types.User)
@error.HaikerError.replace
def follow_user(self, url_name):
"""friendships/create"""
path = '/friendships/create/{0}.json'.format(url_name)
res = self._handler.post(path)
return types.User(res)
@error.HaikerError.replace
def unfollow_user(self, url_name):
"""friendships/destroy"""
path = '/friendships/destroy/{0}.json'.format(url_name)
res = self._handler.post(path)
return types.User(res)
@error.HaikerError.replace
def favorite_keywords(self, url_name=None, *, page=None,
without_related_keywords=None):
"""statuses/keywords"""
params = utils.removed_dict(locals(), {'self', 'url_name'})
if url_name is None:
path = '/statuses/keywords.json'
else:
path = '/statuses/keywords/{0}.json'.format(url_name)
res = self._handler.get(path, params)
return types.list_of(types.Keyword)
@error.HaikerError.replace
def follow_keyword(self, word, *, without_related_keywords=None):
"""keywords/create"""
params = utils.removed_dict(locals(), {'self'})
path = '/keywords/create.json'
res = self._handler.post(path, params)
return types.Keyword(res)
@error.HaikerError.replace
def unfollow_keyword(self, word, *, without_related_keywords=None):
"""keywords/destroy"""
params = utils.removed_dict(locals(), {'self'})
path = '/keywords/destroy.json'
res = self._handler.post(path, params)
return types.Keyword(res)
|
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from typing import cast, List, TYPE_CHECKING, Union
import time
from azure.core.credentials import AzureKeyCredential
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.exceptions import ServiceResponseTimeoutError
from ._timer import Timer
from ._utils_async import get_async_authentication_policy
from .._utils import is_retryable_status_code
from .._search_indexing_buffered_sender_base import SearchIndexingBufferedSenderBase
from .._generated.aio import SearchClient as SearchIndexClient
from .._generated.models import IndexingResult
from .._search_documents_error import RequestEntityTooLargeError
from ._index_documents_batch_async import IndexDocumentsBatch
from .._headers_mixin import HeadersMixin
from .._version import SDK_MONIKER
if TYPE_CHECKING:
# pylint:disable=unused-import,ungrouped-imports
from typing import Any
from azure.core.credentials_async import AsyncTokenCredential
class SearchIndexingBufferedSender(SearchIndexingBufferedSenderBase, HeadersMixin):
"""A buffered sender for document indexing actions.
:param endpoint: The URL endpoint of an Azure search service
:type endpoint: str
:param index_name: The name of the index to connect to
:type index_name: str
:param credential: A credential to authorize search client requests
:type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials_async.AsyncTokenCredential
:keyword int auto_flush_interval: how many max seconds if between 2 flushes. This only takes effect
when auto_flush is on. Default to 60 seconds.
:keyword int initial_batch_action_count: The initial number of actions to group into a batch when
tuning the behavior of the sender. The default value is 512.
:keyword int max_retries_per_action: The number of times to retry a failed document. The default value is 3.
:keyword callable on_new: If it is set, the client will call corresponding methods when there
is a new IndexAction added.
:keyword callable on_progress: If it is set, the client will call corresponding methods when there
is a IndexAction succeeds.
:keyword callable on_error: If it is set, the client will call corresponding methods when there
is a IndexAction fails.
:keyword callable on_remove: If it is set, the client will call corresponding methods when there
is a IndexAction removed from the queue (succeeds or fails).
:keyword str api_version: The Search API version to use for requests.
"""
# pylint: disable=too-many-instance-attributes
def __init__(
self,
endpoint: str,
index_name: str,
credential: Union[AzureKeyCredential, "AsyncTokenCredential"],
**kwargs
) -> None:
super(SearchIndexingBufferedSender, self).__init__(
endpoint=endpoint, index_name=index_name, credential=credential, **kwargs
)
self._index_documents_batch = IndexDocumentsBatch()
if isinstance(credential, AzureKeyCredential):
self._aad = False
self._client = SearchIndexClient(
endpoint=endpoint,
index_name=index_name,
sdk_moniker=SDK_MONIKER,
api_version=self._api_version,
**kwargs
) # type: SearchIndexClient
else:
self._aad = True
authentication_policy = get_async_authentication_policy(credential)
self._client = SearchIndexClient(
endpoint=endpoint,
index_name=index_name,
authentication_policy=authentication_policy,
sdk_moniker=SDK_MONIKER,
api_version=self._api_version,
**kwargs
) # type: SearchIndexClient
self._reset_timer()
async def _cleanup(self, flush=True):
# type: () -> None
"""Clean up the client.
:param bool flush: flush the actions queue before shutdown the client
Default to True.
"""
if flush:
await self.flush()
if self._auto_flush:
self._timer.cancel()
def __repr__(self):
# type: () -> str
return "<SearchIndexingBufferedSender [endpoint={}, index={}]>".format(
repr(self._endpoint), repr(self._index_name)
)[:1024]
@property
def actions(self):
# type: () -> List[IndexAction]
"""The list of currently index actions in queue to index.
:rtype: List[IndexAction]
"""
return self._index_documents_batch.actions
@distributed_trace_async
async def close(self, **kwargs): # pylint: disable=unused-argument
# type: () -> None
"""Close the :class:`~azure.search.documents.aio.SearchClient` session."""
await self._cleanup(flush=True)
return await self._client.close()
@distributed_trace_async
async def flush(self, timeout=86400, **kwargs): # pylint:disable=unused-argument
# type: (bool) -> bool
"""Flush the batch.
:param int timeout: time out setting. Default is 86400s (one day)
:return: True if there are errors. Else False
:rtype: bool
:raises ~azure.core.exceptions.ServiceResponseTimeoutError:
"""
has_error = False
begin_time = int(time.time())
while len(self.actions) > 0:
now = int(time.time())
remaining = timeout - (now - begin_time)
if remaining < 0:
if self._on_error:
actions = await self._index_documents_batch.dequeue_actions()
for action in actions:
await self._on_error(action)
raise ServiceResponseTimeoutError("Service response time out")
result = await self._process(timeout=remaining, raise_error=False)
if result:
has_error = True
return has_error
async def _process(self, timeout=86400, **kwargs):
# type: (int) -> bool
from ..indexes.aio import SearchIndexClient as SearchServiceClient
raise_error = kwargs.pop("raise_error", True)
actions = await self._index_documents_batch.dequeue_actions()
has_error = False
if not self._index_key:
try:
client = SearchServiceClient(self._endpoint, self._credential)
result = await client.get_index(self._index_name)
if result:
for field in result.fields:
if field.key:
self._index_key = field.name
break
except Exception: # pylint: disable=broad-except
pass
self._reset_timer()
try:
results = await self._index_documents_actions(
actions=actions, timeout=timeout
)
for result in results:
try:
action = next(
x
for x in actions
if x.additional_properties.get(self._index_key) == result.key
)
if result.succeeded:
await self._callback_succeed(action)
elif is_retryable_status_code(result.status_code):
await self._retry_action(action)
has_error = True
else:
await self._callback_fail(action)
has_error = True
except StopIteration:
pass
return has_error
except Exception: # pylint: disable=broad-except
for action in actions:
await self._retry_action(action)
if raise_error:
raise
return True
async def _process_if_needed(self):
# type: () -> bool
"""Every time when a new action is queued, this method
will be triggered. It checks the actions already queued and flushes them if:
1. Auto_flush is on
2. There are self._batch_action_count actions queued
"""
if not self._auto_flush:
return
if len(self._index_documents_batch.actions) < self._batch_action_count:
return
await self._process(raise_error=False)
def _reset_timer(self):
# pylint: disable=access-member-before-definition
try:
self._timer.cancel()
except AttributeError:
pass
if self._auto_flush:
self._timer = Timer(self._auto_flush_interval, self._process)
@distributed_trace_async
async def upload_documents(
self, documents, **kwargs
): # pylint: disable=unused-argument
# type: (List[dict]) -> None
"""Queue upload documents actions.
:param documents: A list of documents to upload.
:type documents: List[dict]
"""
actions = await self._index_documents_batch.add_upload_actions(documents)
await self._callback_new(actions)
await self._process_if_needed()
@distributed_trace_async
async def delete_documents(
self, documents, **kwargs
): # pylint: disable=unused-argument
# type: (List[dict]) -> None
"""Queue delete documents actions
:param documents: A list of documents to delete.
:type documents: List[dict]
"""
actions = await self._index_documents_batch.add_delete_actions(documents)
await self._callback_new(actions)
await self._process_if_needed()
@distributed_trace_async
async def merge_documents(
self, documents, **kwargs
): # pylint: disable=unused-argument
# type: (List[dict]) -> None
"""Queue merge documents actions
:param documents: A list of documents to merge.
:type documents: List[dict]
"""
actions = await self._index_documents_batch.add_merge_actions(documents)
await self._callback_new(actions)
await self._process_if_needed()
@distributed_trace_async
async def merge_or_upload_documents(
self, documents, **kwargs
): # pylint: disable=unused-argument
# type: (List[dict]) -> None
"""Queue merge documents or upload documents actions
:param documents: A list of documents to merge or upload.
:type documents: List[dict]
"""
actions = await self._index_documents_batch.add_merge_or_upload_actions(
documents
)
await self._callback_new(actions)
await self._process_if_needed()
@distributed_trace_async
async def index_documents(self, batch, **kwargs):
# type: (IndexDocumentsBatch, **Any) -> List[IndexingResult]
"""Specify a document operations to perform as a batch.
:param batch: A batch of document operations to perform.
:type batch: IndexDocumentsBatch
:rtype: List[IndexingResult]
:raises :class:`~azure.search.documents.RequestEntityTooLargeError`
"""
return await self._index_documents_actions(actions=batch.actions, **kwargs)
async def _index_documents_actions(self, actions, **kwargs):
# type: (List[IndexAction], **Any) -> List[IndexingResult]
error_map = {413: RequestEntityTooLargeError}
timeout = kwargs.pop("timeout", 86400)
begin_time = int(time.time())
kwargs["headers"] = self._merge_client_headers(kwargs.get("headers"))
try:
batch_response = await self._client.documents.index(
actions=actions, error_map=error_map, **kwargs
)
return cast(List[IndexingResult], batch_response.results)
except RequestEntityTooLargeError:
if len(actions) == 1:
raise
pos = round(len(actions) / 2)
if pos < self._batch_action_count:
self._index_documents_batch = pos
now = int(time.time())
remaining = timeout - (now - begin_time)
if remaining < 0:
raise ServiceResponseTimeoutError("Service response time out")
batch_response_first_half = await self._index_documents_actions(
actions=actions[:pos], error_map=error_map, **kwargs
)
if len(batch_response_first_half) > 0:
result_first_half = cast(
List[IndexingResult], batch_response_first_half.results
)
else:
result_first_half = []
now = int(time.time())
remaining = timeout - (now - begin_time)
if remaining < 0:
raise ServiceResponseTimeoutError("Service response time out")
batch_response_second_half = await self._index_documents_actions(
actions=actions[pos:], error_map=error_map, **kwargs
)
if len(batch_response_second_half) > 0:
result_second_half = cast(
List[IndexingResult], batch_response_second_half.results
)
else:
result_second_half = []
return result_first_half.extend(result_second_half)
async def __aenter__(self):
# type: () -> SearchIndexingBufferedSender
await self._client.__aenter__() # pylint: disable=no-member
return self
async def __aexit__(self, *args):
# type: (*Any) -> None
await self.close()
await self._client.__aexit__(*args) # pylint: disable=no-member
async def _retry_action(self, action):
# type: (IndexAction) -> None
if not self._index_key:
await self._callback_fail(action)
return
key = action.additional_properties.get(self._index_key)
counter = self._retry_counter.get(key)
if not counter:
# first time that fails
self._retry_counter[key] = 1
await self._index_documents_batch.enqueue_actions(action)
elif counter < self._max_retries_per_action - 1:
# not reach retry limit yet
self._retry_counter[key] = counter + 1
await self._index_documents_batch.enqueue_actions(action)
else:
await self._callback_fail(action)
async def _callback_succeed(self, action):
# type: (IndexAction) -> None
if self._on_remove:
await self._on_remove(action)
if self._on_progress:
await self._on_progress(action)
async def _callback_fail(self, action):
# type: (IndexAction) -> None
if self._on_remove:
await self._on_remove(action)
if self._on_error:
await self._on_error(action)
async def _callback_new(self, actions):
# type: (List[IndexAction]) -> None
if self._on_new:
for action in actions:
await self._on_new(action)
|
|
import datetime
import logging
from collections import OrderedDict
from sqlalchemy.exc import IntegrityError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.schema import Index, UniqueConstraint
from sqlalchemy.dialects.postgresql import (ARRAY, JSON)
from sqlalchemy import (Column, Integer, Text, ForeignKey, ForeignKeyConstraint,
DateTime, Boolean, func, CheckConstraint)
from sqlalchemy.ext.mutable import MutableDict
from sqlalchemy.sql.expression import text
from sqlalchemy.orm import relationship, backref, validates
from croniter import croniter
import utc
import mettle_protocol as mp
Base = declarative_base()
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class Service(Base):
__tablename__ = 'services'
id = Column(Integer, primary_key=True)
name = Column(Text, nullable=False)
description = Column(Text)
updated_by = Column(Text, nullable=False)
# TODO: once all services have this field filled in, update this to
# nullable=False, and create a migration to match.
pipeline_names = Column(ARRAY(Text))
notifications = relationship("Notification", lazy='dynamic',
backref=backref('service'))
@validates('name')
def validate_name(self, key, name):
# Ensure that the name has no characters that have special meanings in
# RabbitMQ routing keys.
assert '.' not in name
assert '*' not in name
assert '#' not in name
return name
__table_args__ = (
UniqueConstraint('name'),
)
def as_dict(self):
return dict(id=self.id,
name=self.name,
description=self.description,
updated_by=self.updated_by,
pipeline_names=self.pipeline_names)
def __repr__(self):
return 'Service <%s>' % self.name
class NotificationList(Base):
__tablename__ = 'notification_lists'
id = Column(Integer, primary_key=True)
name = Column(Text, nullable=False)
recipients = Column(ARRAY(Text), nullable=False)
updated_by = Column(Text, nullable=False)
def __repr__(self):
return 'NotificationList <%s>' % self.name
class Pipeline(Base):
__tablename__ = 'pipelines'
DEFAULT_RETRIES = 3
id = Column(Integer, primary_key=True)
name = Column(Text, nullable=False)
service_id = Column(Integer, ForeignKey('services.id'), nullable=False)
notification_list_id = Column(Integer, ForeignKey('notification_lists.id'),
nullable=False)
updated_by = Column(Text, nullable=False)
active = Column(Boolean, nullable=False, server_default=text('true'))
retries = Column(Integer, default=DEFAULT_RETRIES)
service = relationship("Service", backref=backref('pipelines', order_by=name))
notification_list = relationship('NotificationList',
backref=backref('pipelines', order_by=name))
# A pipeline must have either a crontab or a trigger pipeline, but not both.
crontab = Column(Text)
chained_from_id = Column(Integer, ForeignKey('pipelines.id'))
chained_from = relationship("Pipeline", remote_side=id, backref='chains_to')
runs = relationship("PipelineRun", lazy='dynamic',
backref=backref('pipelines'))
def next_run_time(self):
if self.chained_from:
return self.chained_from.next_run_time()
schedule = croniter(self.crontab, utc.now())
return schedule.get_next(datetime.datetime)
def last_run_time(self):
last_run = self.runs.order_by(PipelineRun.target_time.desc()).first()
if last_run:
return last_run.target_time
@validates('name')
def validate_name(self, key, name):
# Ensure that the name has no characters that have special meanings in
# RabbitMQ routing keys.
assert '.' not in name
assert '*' not in name
assert '#' not in name
return name
@validates('crontab')
def validate_crontab(self, key, cronspec):
if cronspec is not None:
# If the cronspec is not parseable, croniter will raise an exception
# here.
croniter(cronspec, utc.now())
return cronspec
__table_args__ = (
UniqueConstraint('name', 'service_id'),
UniqueConstraint('id', 'service_id'), # needed for composite FK
CheckConstraint('crontab IS NOT NULL OR chained_from_id IS NOT NULL',
name='crontab_or_pipeline_check'),
CheckConstraint('NOT (crontab IS NOT NULL AND chained_from_id IS NOT NULL)',
name='crontab_and_pipeline_check'),
)
def __repr__(self):
return 'Pipeline <%s>' % self.name
def as_dict(self):
next_time = self.next_run_time()
last_time = self.last_run_time()
return OrderedDict(
id=self.id,
name=self.name,
service_id=self.service_id,
service_name=self.service.name, # can cause extra query!
notification_list_id=self.notification_list_id,
updated_by=self.updated_by,
active=self.active,
retries=self.retries,
crontab=self.crontab,
chained_from_id=self.chained_from_id,
next_run_time=next_time.isoformat() if next_time else None,
last_run_time=last_time.isoformat() if last_time else None,
)
class PipelineRun(Base):
__tablename__ = 'pipeline_runs'
id = Column(Integer, primary_key=True)
pipeline_id = Column(Integer, ForeignKey('pipelines.id'), nullable=False)
target_time = Column(DateTime(timezone=True), nullable=False)
created_time = Column(DateTime(timezone=True), nullable=False,
server_default=func.now())
succeeded = Column(Boolean, nullable=False, default=False,
server_default=text('false'))
started_by = Column(Text, nullable=False) # username or 'timer'
chained_from_id = Column(Integer, ForeignKey('pipeline_runs.id'))
# These fields are set when we get an ack from the ETL service
ack_time = Column(DateTime(timezone=True))
targets = Column(MutableDict.as_mutable(JSON), default={})
target_parameters = Column(MutableDict.as_mutable(JSON), default={})
# end_time is set by dispatcher after it has heard in from all job runs
end_time = Column(DateTime(timezone=True))
pipeline = relationship("Pipeline", backref=backref('pipeline_runs',
order_by=created_time))
chained_from = relationship("PipelineRun", backref=backref('chains_to'),
remote_side=id)
def get_announce_time(self):
if self.nacks:
return max(n.reannounce_time for n in self.nacks)
return None
def is_ended(self, db):
if self.ack_time is None:
return False
return all(self.target_is_ended(db, t) for t in self.targets)
def is_failed(self, db):
return any(self.target_is_failed(db, t) for t in self.targets)
def all_targets_succeeded(self, db):
return all(self.target_is_succeeded(db, t) for t in self.targets)
def target_is_succeeded(self, db, target):
job = db.query(Job).filter(Job.pipeline_run==self,
Job.target==target,
Job.succeeded==True).first()
return job is not None
def target_is_failed(self, db, target):
failure_count = db.query(Job).filter(Job.pipeline_run==self,
Job.target==target,
Job.end_time!=None,
Job.succeeded==False).count()
return failure_count >= self.pipeline.retries
def target_is_ended(self, db, target):
return (self.target_is_succeeded(db, target) or
self.target_is_failed(db, target))
def target_is_in_progress(self, db, target):
job = db.query(Job).filter(Job.pipeline_run==self,
Job.target==target,
Job.end_time==None).first()
return job is not None
def target_deps_met(self, db, target):
for dep in self.targets[target]:
if not self.target_is_succeeded(db, dep):
return False
return True
def target_is_ready(self, db, target):
# Return true if the target meets these conditions:
# 1. Is not ended.
# 2. Does not already have an in-progress job in the DB.
# 3. If it has dependencies, there is a successful job in the DB whose
# target provides that dependency.
if self.target_is_ended(db, target):
return False
if self.target_is_in_progress(db, target):
return False
return self.target_deps_met(db, target)
def get_ready_targets(self, db):
return [t for t in self.targets if self.target_is_ready(db, t)]
def make_job(self, db, target):
target_params = (self.target_parameters.get(target) if
self.target_parameters else None)
job = Job(
pipeline_run=self,
target=target,
target_parameters=target_params,
)
db.add(job)
try:
db.commit()
except IntegrityError as e:
logger.error(str(e))
db.rollback()
return None
return job
@validates('targets')
def validate_targets(self, key, targets):
mp.validate_targets_graph(targets)
return targets
__table_args__ = (
Index('unique_run_in_progress', pipeline_id, target_time,
postgresql_where=end_time==None, unique=True),
# Prevent duplicate runs of the same pipeline chained from the same
# previous pipeline.
Index('unique_run_chained_from', pipeline_id, chained_from_id,
unique=True),
# Can't have a ack time without targets, even if it's an empty list
CheckConstraint('NOT (ack_time IS NOT NULL AND targets IS NULL)',
name='run_ack_without_targets_check'),
# Can't have a end time without an ack time
CheckConstraint('NOT (end_time IS NOT NULL AND ack_time IS NULL)',
name='run_end_without_ack_check'),
UniqueConstraint('id', 'pipeline_id'), # needed for composite FK
)
def as_dict(self):
return OrderedDict(
id=self.id,
pipeline_id=self.pipeline_id,
target_time=self.target_time.isoformat(),
created_time=self.created_time.isoformat(),
succeeded=self.succeeded,
started_by=self.started_by,
ack_time=self.ack_time.isoformat() if self.ack_time else None,
targets=self.targets,
end_time=self.end_time.isoformat() if self.end_time else None,
)
class PipelineRunNack(Base):
__tablename__ = 'pipeline_runs_nacks'
id = Column(Integer, primary_key=True)
pipeline_run_id = Column(Integer, ForeignKey('pipeline_runs.id'),
nullable=False)
created_time = Column(DateTime(timezone=True), nullable=False,
server_default=func.now())
message = Column(Text, nullable=False)
# If a nack message specifies that a run should not be reannounced, this
# column will be left null.
reannounce_time = Column(DateTime(timezone=True))
pipeline_run = relationship("PipelineRun", backref=backref('nacks',
order_by=created_time))
class Job(Base):
__tablename__ = 'jobs'
id = Column(Integer, primary_key=True)
# These fields are populated when the job record is first created.
pipeline_run_id = Column(Integer, ForeignKey('pipeline_runs.id'),
nullable=False)
target = Column(Text, nullable=False)
target_parameters = Column(MutableDict.as_mutable(JSON), default={})
succeeded = Column(Boolean, nullable=False, default=False,
server_default=text('false'))
created_time = Column(DateTime(timezone=True), nullable=False,
server_default=func.now())
# These fields are populated when we receive an ack for the job from the ETL
# service.
start_time = Column(DateTime(timezone=True))
assigned_worker = Column(Text)
expires = Column(DateTime(timezone=True))
# And end_time is populated either when we receive a job_end message from
# the ETL service, or the timer proc decides that the job has timed out.
end_time = Column(DateTime(timezone=True))
pipeline_run = relationship("PipelineRun", backref=backref('jobs',
order_by=created_time))
__table_args__ = (
# Only allow one non-ended job for a target at a time.
Index('unique_job_in_progress', pipeline_run_id, target,
postgresql_where=end_time==None, unique=True),
# Can't be succeeded unless you have an end time
CheckConstraint('NOT (succeeded AND end_time IS NULL)',
name='job_succeeded_without_end_check'),
# Can't have an end time without a start time.
CheckConstraint('NOT (end_time IS NOT NULL AND start_time IS NULL)',
name='job_end_without_start_check'),
# Can't have a start time without a hostname
CheckConstraint('NOT (start_time IS NOT NULL AND assigned_worker IS NULL)',
name='job_start_without_worker_check'),
# Can't have a start time without a expire time
CheckConstraint('NOT (start_time IS NOT NULL AND expires IS NULL)',
name='job_start_without_expire_check'),
UniqueConstraint('id', 'pipeline_run_id'), # needed for composite FK
)
def as_dict(self):
stime = self.start_time.isoformat() if self.start_time else None
etime = self.end_time.isoformat() if self.end_time else None
expires = self.expires.isoformat() if self.expires else None
# A bit ugly. split these into separate DB columns?
host = None
pid = None
if self.assigned_worker:
host, pid = self.assigned_worker.split('_')[:2]
pid = int(pid)
return dict(
id=self.id,
pipeline_run_id=self.pipeline_run_id,
target=self.target,
succeeded=self.succeeded,
created_time=self.created_time.isoformat(),
start_time=stime,
end_time=etime,
assigned_worker=self.assigned_worker,
expires=expires,
host=host,
pid=pid,
)
def get_queue(self, service_name):
if self.target_parameters is None:
return mp.service_queue_name(service_name)
return self.target_parameters.get('queue',
mp.service_queue_name(service_name))
class JobLogLine(Base):
__tablename__ = 'job_log_lines'
id = Column(Integer, primary_key=True)
job_id = Column(Integer, ForeignKey('jobs.id'), nullable=False)
message = Column(Text, nullable=False)
line_num = Column(Integer, nullable=False)
received_time = Column(DateTime(timezone=True), nullable=False,
server_default=func.now())
job = relationship("Job", backref=backref('job_log_lines',
order_by=line_num))
__table_args__ = (
UniqueConstraint('job_id', 'line_num'),
)
def as_dict(self):
# This must match the structure of the messages coming over rabbitmq.
job = self.job
run = job.pipeline_run
pipeline = run.pipeline
service = pipeline.service
return {
'service': service.name,
'pipeline': pipeline.name,
'run_id': run.id,
'job_id': job.id,
'line_num': self.line_num,
'msg': self.message,
}
def __repr__(self):
return "%s - %s: %s" % (self.job_id, self.line_num, self.message)
# This table is populated by triggers on services, pipelines, and
# notification_lists
class ChangeRecord(Base):
__tablename__ = 'change_records'
id = Column(Integer, primary_key=True)
table = Column(Text, nullable=False)
row_id = Column(Integer, nullable=False)
time = Column(DateTime(timezone=True), nullable=False,
server_default=func.now())
operation = Column(Text, nullable=False)
who = Column(Text)
old = Column(JSON)
new = Column(JSON)
class Notification(Base):
__tablename__ = 'notifications'
id = Column(Integer, primary_key=True)
created_time = Column(DateTime(timezone=True), nullable=False,
server_default=func.now())
message = Column(Text, nullable=False)
acknowledged_by = Column(Text)
acknowledged_time = Column(DateTime(timezone=True))
# Notifications can be attached to services, pipelines, pipeline runs, or
# jobs. If you attach to one of the more specific things (e.g. Job), then
# you must also specify the intermediate things (pipeline run, pipeline). A
# foreign key constraint will prevent non-sensical links.
# Requiring the population of the intermediate FKs will make check
# constraints and queries on notifications much simpler.
service_id = Column(Integer, ForeignKey('services.id'), nullable=False)
pipeline_id = Column(Integer, ForeignKey('pipelines.id'))
pipeline_run_id = Column(Integer, ForeignKey('pipeline_runs.id'))
job_id = Column(Integer, ForeignKey('jobs.id'))
pipeline = relationship("Pipeline", backref=backref('notifications',
order_by=created_time,
lazy='dynamic'),
foreign_keys=[pipeline_id])
pipeline_run = relationship("PipelineRun", backref=backref('notifications',
order_by=created_time),
foreign_keys=[pipeline_run_id])
job = relationship("Job", backref=backref('notifications',
order_by=created_time),
foreign_keys=[job_id])
def as_dict(self):
return {
'id': self.id,
'created_time': self.created_time.isoformat(),
'message': self.message,
'acknowledged_by': self.acknowledged_by,
'acknowledged_time': (self.acknowledged_time.isoformat() if
self.acknowledged_time else None),
'service_id': self.service_id,
'service_name': self.service.name if self.service else None,
'pipeline_id': self.pipeline_id,
'pipeline_name': self.pipeline.name if self.pipeline else None,
'pipeline_run_id': self.pipeline_run_id,
'job_id': self.job_id,
'target': self.job.target if self.job else None,
}
__table_args__ = (
# acknowledgement fields must be filled together.
CheckConstraint(
'NOT (acknowledged_by IS NOT NULL AND acknowledged_time IS NULL)',
name='notification_ack_time_check'
),
CheckConstraint(
'NOT (acknowledged_time IS NOT NULL AND acknowledged_by IS NULL)',
name='notification_ack_by_check'
),
# Must provide pipeline run if you provide job.
CheckConstraint('NOT (job_id IS NOT NULL AND pipeline_run_id IS NULL)',
name='notification_job_check'),
# Must provide pipeline if you provide pipeline run.
CheckConstraint('NOT (pipeline_run_id IS NOT NULL AND pipeline_id IS NULL)',
name='notification_run_check'),
# Don't need a check constraint on pipeline+service, since service is
# already NOT NULL
# Prevent service/pipeline mismatches
ForeignKeyConstraint(['service_id', 'pipeline_id'],
['pipelines.service_id', 'pipelines.id']),
# Prevent pipeline/run mismatches
ForeignKeyConstraint(['pipeline_id', 'pipeline_run_id'],
['pipeline_runs.pipeline_id', 'pipeline_runs.id']),
# Prevent run/job mismatches
ForeignKeyConstraint(['pipeline_run_id', 'job_id'],
['jobs.pipeline_run_id', 'jobs.id']),
)
class Checkin(Base):
__tablename__ = 'checkins'
proc_name = Column(Text, primary_key=True)
time = Column(DateTime(timezone=True), nullable=False)
def as_dict(self):
return {
'proc_name': self.proc_name,
'time': self.time.isoformat(),
}
|
|
import argparse
import os
os.environ['THEANO_FLAGS'] = 'floatX=float32,device=gpu0,nvcc.fastmath=True,lib.cnmem=0.85'
import sys
import importlib
from time import strftime
import theano
import theano.tensor as T
import numpy as np
import matplotlib
import cPickle as pickle
matplotlib.use('Agg')
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.metrics import log_loss
from sklearn.cross_validation import ShuffleSplit, StratifiedShuffleSplit, StratifiedKFold
from sklearn.preprocessing import LabelEncoder
def load_data(fname, use_cropped=False, as_grey=False):
n = 4543
size = int(fname.split('_')[0])
if use_cropped:
if as_grey:
X_fname = 'cache/X_cropped_grey_%s.npy' % fname
y_fname = 'cache/y_cropped_grey_%s.npy' % fname
else:
X_fname = 'cache/X_cropped_%s.npy' % fname
y_fname = 'cache/y_cropped_%s.npy' % fname
else:
X_fname = 'cache/X_%s.npy' % fname
y_fname = 'cache/y_%s.npy' % fname
num_channels = 1 if args.as_grey else 3
X_shape = (n, num_channels, size, size)
y_shape = (n,)
X = np.memmap(X_fname, dtype=np.float32, mode='r', shape=X_shape)
y = np.memmap(y_fname, dtype=np.int32, mode='r', shape=y_shape)
assert X.shape == X_shape
assert y.shape == y_shape
return X, y
def load_mean(fname, use_cropped, as_grey):
if use_cropped:
if as_grey:
mean_fname = 'cache/X_cropped_grey_%s_mean.npy' % fname
else:
mean_fname = 'cache/X_cropped_%s_mean.npy' % fname
else:
mean_fname = 'cache/X_%s_mean.npy' % fname
if os.path.exists(mean_fname):
return np.load(mean_fname)
else:
return None
def filter_by_min_occ(X, y, min_occ):
occs = np.bincount(y)
mask = np.zeros_like(y).astype(bool)
for i, occ in enumerate(occs):
if occ == min_occ:
mask[y == i] = True
return X[mask], y[mask]
def train_test_split(X, y, test_size=0.25, random_state=42, stratify=True):
if stratify:
n_folds = int(round(1 / test_size))
sss = StratifiedKFold(y, n_folds=n_folds, random_state=random_state)
else:
sss = ShuffleSplit(len(y), test_size=test_size, random_state=random_state)
train_idx, test_idx = iter(sss).next()
return X[train_idx], X[test_idx], y[train_idx], y[test_idx]
def load_model(fname):
model = importlib.import_module('model_definitions.%s' % fname)
return model
def load_encoder(fname='models/encoder.pkl'):
encoder = pickle.load(open(fname, 'r'))
return encoder
def get_current_time():
return strftime('%Y%m%d_%H%M%S')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data', required=True)
parser.add_argument('--model', required=True)
parser.add_argument('--overwrite', action='store_true')
parser.add_argument('--use_cropped', action='store_true')
parser.add_argument('--no_test', action='store_true')
parser.add_argument('--min_occ', type=int, default=None)
parser.add_argument('--as_grey', action='store_true')
parser.add_argument('--no_mean', action='store_true')
parser.add_argument('--continue_training', action='store_true')
args = parser.parse_args()
# log_fname = 'logs/%s.log' % get_current_time()
# print 'Will write logs to %s' % log_fname
print 'args'
print args
print
print 'Loading model: %s' % args.model
model = load_model(args.model)
net = model.net
net.initialize()
print
output_exists = any([
os.path.exists(x) for x in [
model.model_fname, model.model_graph_fname, model.model_history_fname
]
])
if output_exists and not args.overwrite:
print 'Model output exists. Use --overwrite'
sys.exit(1)
print 'Loading data: %s' % args.data
X, y = load_data(args.data, args.use_cropped, args.as_grey)
print X.shape, y.shape
print
# TODO exit if the shapes don't match image_size
if args.min_occ is not None:
print 'Filtering dataset with min occurence of %i' % args.min_occ
X, y = filter_by_min_occ(X, y, args.min_occ)
print X.shape, y.shape
print 'WARNING: update the number of units at the final layer to %i' % np.unique(y).shape[0]
print
print 'Loading encoder'
encoder = load_encoder()
# encoder = LabelEncoder().fit(y)
y = encoder.transform(y).astype(np.int32)
print np.unique(y).shape[0]
print y.min(), y.max()
print
print 'Loading mean image'
X_mean = load_mean(args.data, args.use_cropped, args.as_grey)
if not args.no_mean and X_mean is not None:
net.batch_iterator_train.mean = X_mean
net.batch_iterator_test.mean = X_mean
print 'Injected mean image'
else:
print 'Cannot load mean image'
print
if args.continue_training and os.path.exists(model.model_fname):
print 'Loading model params from %s' % model.model_fname
net.load_params_from(model.model_fname)
with open(model.model_history_fname) as f:
net.train_history_ = pickle.load(f)
if not args.no_test:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42, stratify=True)
print 'Train / Test set split'
print X_train.shape, X_train.dtype
print y_train.shape, y_train.dtype
print X_test.shape, y_train.dtype
print y_test.shape, y_test.dtype
print
print 'Training set images / label: min=%i, max=%i' % (
np.bincount(y_train).min(), np.bincount(y_train).max()
)
print 'Test set images / label: min=%i, max=%i' % (
np.bincount(y_test).min(), np.bincount(y_test).max()
)
net.fit(X_train, y_train)
print 'Loading best param'
net.load_params_from(model.model_fname)
print
print 'Evaluating on test set'
y_test_pred = net.predict(X_test)
y_test_pred_proba = net.predict_proba(X_test)
print
print 'Classification Report'
print '====================='
print classification_report(y_test, y_test_pred)
print
print 'Accuracy Score'
print '=============='
score = accuracy_score(y_test, y_test_pred)
print '%.6f' % score
print
print 'Logloss'
print '======='
logloss = log_loss(y_test, y_test_pred_proba)
print '%.6f' % logloss
print
else:
net.fit(X, y)
|
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Integration tests for WithSimilarityScore logic."""
import json
from ggrc import db
from ggrc import models
from ggrc.snapshotter.rules import Types
from integration.ggrc import TestCase
import integration.ggrc.generator
from integration.ggrc.models import factories
class TestWithSimilarityScore(TestCase):
"""Integration test suite for WithSimilarityScore functionality."""
def setUp(self):
super(TestWithSimilarityScore, self).setUp()
self.obj_gen = integration.ggrc.generator.ObjectGenerator()
self.client.get("/login")
@staticmethod
def make_relationships(source, destinations):
for destination in destinations:
factories.RelationshipFactory(
source=source,
destination=destination,
)
@staticmethod
def get_object_snapshot(scope_parent, object_):
# pylint: disable=protected-access
return db.session.query(models.Snapshot).filter(
models.Snapshot.parent_type == scope_parent._inflector.table_singular,
models.Snapshot.parent_id == scope_parent.id,
models.Snapshot.child_type == object_._inflector.table_singular,
models.Snapshot.child_id == object_.id
).one()
def make_scope_relationships(self, source, scope_parent, objects):
"""Create relationships between object and snapshots of provided object"""
snapshots = []
for object_ in objects:
snapshot = self.get_object_snapshot(scope_parent, object_)
snapshots += [snapshot]
self.make_relationships(source, snapshots)
def make_assessments(self, assessment_mappings):
"""Create six assessments and map them to audit, control, objective.
Each of the created assessments is mapped to its own subset of {audit,
control, objective} so each of them has different similarity weight.
Returns: the six generated assessments and their weights in a dict.
"""
assessments = []
for all_mappings in assessment_mappings:
audit = [x for x in all_mappings if x.type == "Audit"][0]
assessment = factories.AssessmentFactory(audit=audit)
mappings = all_mappings[1:]
ordinary_mappings = [x for x in mappings if x.type not in Types.all]
snapshot_mappings = [x for x in mappings if x.type in Types.all]
self.make_relationships(assessment, [audit] + ordinary_mappings)
self.make_scope_relationships(assessment, audit,
snapshot_mappings)
assessments.append(assessment)
return assessments
def test_get_similar_basic(self):
"""Basic check of similar objects manually and via Query API.
We create two programs, map them to the same control, create two audits
and verify that we get the same result manually and via Query API.
"""
program_1 = factories.ProgramFactory(title="Program 1")
program_2 = factories.ProgramFactory(title="Program 2")
control_program_1 = factories.ControlFactory(title="Control 1")
self.make_relationships(
program_1, [
control_program_1,
],
)
self.make_relationships(
program_2, [
control_program_1,
],
)
program_1 = models.Program.query.filter_by(title="Program 1").one()
program_2 = models.Program.query.filter_by(title="Program 2").one()
control_program_1 = models.Control.query.filter_by(title="Control 1").one()
audit_1 = self.obj_gen.generate_object(models.Audit, {
"title": "Audit 1",
"program": {"id": program_1.id},
"status": "Planned"
})[1]
audit_2 = self.obj_gen.generate_object(models.Audit, {
"title": "Audit 2",
"program": {"id": program_2.id},
"status": "Planned"
})[1]
assessment_mappings = [
[audit_1, control_program_1],
[audit_2, control_program_1],
]
assessments = self.make_assessments(assessment_mappings)
similar_objects = models.Assessment.get_similar_objects_query(
id_=assessments[0].id,
types=["Assessment"],
).all()
expected_ids = {assessments[1].id}
self.assertSetEqual(
{obj.id for obj in similar_objects},
expected_ids,
)
query = [{
"object_name": "Assessment",
"type": "ids",
"filters": {
"expression": {
"op": {"name": "similar"},
"object_name": "Assessment",
"ids": [str(assessments[0].id)],
},
},
}]
response = self.client.post(
"/query",
data=json.dumps(query),
headers={"Content-Type": "application/json"},
)
self.assertSetEqual(
set(json.loads(response.data)[0]["Assessment"]["ids"]),
expected_ids,
)
def test_similar_partially_matching(self):
"""Basic check of similar objects manually and via Query API.
We create three programs, map one them to the two objectives, create two
audits and verify that we get the same result manually and via Query API.
We also ensure that for only single matching objective we do not
fetch that assessment is as related.
"""
# pylint: disable=too-many-locals
program_1 = factories.ProgramFactory(title="Program 1")
program_2 = factories.ProgramFactory(title="Program 2")
program_3 = factories.ProgramFactory(title="Program 3")
objective_1_program_1 = factories.ObjectiveFactory(title="Objective 1")
objective_2_program_1 = factories.ObjectiveFactory(title="Objective 2")
self.make_relationships(
program_1, [
objective_1_program_1,
objective_2_program_1,
],
)
self.make_relationships(
program_2, [
objective_1_program_1,
objective_2_program_1,
],
)
self.make_relationships(
program_3, [
objective_1_program_1,
],
)
program_1 = models.Program.query.filter_by(title="Program 1").one()
program_2 = models.Program.query.filter_by(title="Program 2").one()
program_3 = models.Program.query.filter_by(title="Program 3").one()
objective_1_program_1 = models.Objective.query.filter_by(
title="Objective 1").one()
objective_2_program_1 = models.Objective.query.filter_by(
title="Objective 2").one()
_, audit_1 = self.obj_gen.generate_object(models.Audit, {
"title": "Audit 1",
"program": {"id": program_1.id},
"status": "Planned",
})
_, audit_2 = self.obj_gen.generate_object(models.Audit, {
"title": "Audit 2",
"program": {"id": program_2.id},
"status": "Planned",
})
_, audit_3 = self.obj_gen.generate_object(models.Audit, {
"title": "Audit 3",
"program": {"id": program_3.id},
"status": "Planned",
})
assessment_mappings = [
[audit_1, objective_1_program_1, objective_2_program_1],
[audit_2, objective_1_program_1, objective_2_program_1],
[audit_3],
]
assessments = self.make_assessments(assessment_mappings)
similar_objects = models.Assessment.get_similar_objects_query(
id_=assessments[0].id,
types=["Assessment"],
).all()
expected_ids = {assessments[1].id}
self.assertSetEqual(
{obj.id for obj in similar_objects},
expected_ids,
)
self.assertNotIn(assessments[2].id, similar_objects)
query = [{
"object_name": "Assessment",
"type": "ids",
"filters": {
"expression": {
"op": {"name": "similar"},
"object_name": "Assessment",
"ids": [str(assessments[0].id)],
},
},
}]
response = self.client.post(
"/query",
data=json.dumps(query),
headers={"Content-Type": "application/json"},
)
self.assertSetEqual(
set(json.loads(response.data)[0]["Assessment"]["ids"]),
expected_ids,
)
def test_sort_by_similarity(self):
"""Check sorting by __similarity__ value with query API."""
# pylint: disable=too-many-locals
program_1 = factories.ProgramFactory(title="Program 1")
objective_1_program_1 = factories.ObjectiveFactory(title="Objective 1")
objective_2_program_1 = factories.ObjectiveFactory(title="Objective 2")
control_1_program_1 = factories.ControlFactory(title="Control 1")
control_2_program_1 = factories.ControlFactory(title="Control 2")
self.make_relationships(
program_1, [
objective_1_program_1,
objective_2_program_1,
control_1_program_1,
control_2_program_1
],
)
program_1 = models.Program.query.filter_by(title="Program 1").one()
_, audit_1 = self.obj_gen.generate_object(models.Audit, {
"title": "Audit 1",
"program": {"id": program_1.id},
"status": "Planned",
})
_, audit_2 = self.obj_gen.generate_object(models.Audit, {
"title": "Audit 2",
"program": {"id": program_1.id},
"status": "Planned",
})
objective_1_program_1 = models.Objective.query.filter_by(
title="Objective 1").one()
objective_2_program_1 = models.Objective.query.filter_by(
title="Objective 2").one()
control_1_program_1 = models.Control.query.filter_by(
title="Control 1").one()
control_2_program_1 = models.Control.query.filter_by(
title="Control 2").one()
assessment_mappings = [
[audit_1,
objective_1_program_1, objective_2_program_1,
control_1_program_1, control_2_program_1],
[audit_1, control_1_program_1, control_2_program_1],
[audit_1,
objective_1_program_1, control_1_program_1],
[audit_2,
objective_1_program_1, objective_2_program_1,
control_1_program_1, control_2_program_1],
[audit_2,
objective_1_program_1, control_1_program_1],
[audit_2, control_1_program_1, control_2_program_1],
]
weights = [
[4, 4, 4, 4, 8],
[2, 2, 4, 4, 4],
[2, 2, 4, 4, 4],
[4, 4, 4, 4, 8],
[2, 2, 4, 4, 4],
[2, 2, 4, 4, 4],
]
assessments = self.make_assessments(
assessment_mappings)
assessment_ids = [ass.id for ass in assessments]
for aid, weight_defs in zip(assessment_ids, weights):
similar_objects = models.Assessment.get_similar_objects_query(
id_=aid,
types=["Assessment"],
).all()
sorted_similar = sorted(similar_objects,
key=lambda x: x.weight)
self.assertEqual(
weight_defs,
[x.weight for x in sorted_similar]
)
query = [{
"object_name": "Assessment",
"type": "ids",
"order_by": [{"name": "__similarity__"}],
"filters": {
"expression": {
"op": {"name": "similar"},
"object_name": "Assessment",
"ids": [str(aid)],
},
},
}]
response = self.client.post(
"/query",
data=json.dumps(query),
headers={"Content-Type": "application/json"},
)
# our sorted results are only unstably sorted. As such we verify that
# weights match and not actual object ids
obj_weight = {so.id: so.weight for so in similar_objects}
response_ids = json.loads(response.data)[0]["Assessment"]["ids"]
response_weights = [obj_weight[rid] for rid in response_ids]
self.assertListEqual(
response_weights,
[obj.weight for obj in sorted_similar],
)
def test_empty_similar_results(self):
"""Check empty similarity result."""
query = [{
"object_name": "Assessment",
"type": "ids",
"filters": {
"expression": {
"op": {"name": "similar"},
"object_name": "Assessment",
"ids": ["-1"],
},
},
}]
response = self.client.post(
"/query",
data=json.dumps(query),
headers={"Content-Type": "application/json"},
)
self.assertListEqual(
response.json[0]["Assessment"]["ids"],
[],
)
def test_invalid_sort_by_similarity(self):
"""Check sorting by __similarity__ with query API when it is impossible."""
# no filter by similarity but order by similarity
query = [{
"object_name": "Assessment",
"order_by": [{"name": "__similarity__"}],
"filters": {"expression": {}},
}]
response = self.client.post(
"/query",
data=json.dumps(query),
headers={"Content-Type": "application/json"},
)
self.assert400(response)
self.assertEqual(response.json["message"],
"Can't order by '__similarity__' when no 'similar' "
"filter was applied.")
# filter by similarity in one query and order by similarity in another
query = [
{
"object_name": "Assessment",
"filters": {
"expression": {
"op": {"name": "similar"},
"object_name": "Assessment",
"ids": [1],
},
},
},
{
"object_name": "Assessment",
"order_by": [{"name": "__similarity__"}],
"filters": {"expression": {}},
},
]
response = self.client.post(
"/query",
data=json.dumps(query),
headers={"Content-Type": "application/json"},
)
self.assert400(response)
def test_asmt_issue_similarity(self):
"""Test Issues related to assessments."""
audit = factories.AuditFactory()
assessment1 = factories.AssessmentFactory(audit=audit)
assessment2 = factories.AssessmentFactory(audit=audit)
issue = factories.IssueFactory(audit=audit)
control = factories.ControlFactory()
snapshot = factories.SnapshotFactory(
parent=audit,
child_id=control.id,
child_type=control.type,
revision_id=models.Revision.query.filter_by(
resource_type=control.type).one().id
)
factories.RelationshipFactory(source=audit, destination=assessment1)
factories.RelationshipFactory(source=audit, destination=assessment2)
factories.RelationshipFactory(source=audit, destination=issue)
factories.RelationshipFactory(source=snapshot, destination=assessment1)
factories.RelationshipFactory(source=snapshot, destination=issue)
query = [{
"object_name": "Issue",
"type": "ids",
"filters": {
"expression": {
"op": {"name": "similar"},
"object_name": "Assessment",
"ids": [assessment1.id],
},
},
}]
expected_ids = [issue.id]
response = self.client.post(
"/query",
data=json.dumps(query),
headers={"Content-Type": "application/json"},
)
self.assertListEqual(
response.json[0]["Issue"]["ids"],
expected_ids
)
|
|
import unittest
import os
from sqltxt.table import Table
from sqltxt.query import Query, condition_applies, stage_columns, stage_conditions
from sqltxt.column import Column, ColumnName, AmbiguousColumnNameError
from sqltxt.expression import Expression, AndList, OrList
import subprocess
class QueryTest(unittest.TestCase):
def setUp(self):
# TODO: replace this hack to make sure test files are found with fixtures
if 'tests/data' not in os.getcwd():
os.chdir(os.path.join(os.getcwd(), 'tests/data'))
table_header = ["col_a", "col_b"]
table_contents = """1,1\n2,3\n3,2"""
self.table_a = Table.from_cmd(
name = 'table_a',
cmd = 'echo -e "{0}"'.format(table_contents),
columns = table_header
)
table_header = ["col_a", "col_z"]
table_contents = """1,w\n2,x\n2,y\n5,z"""
self.table_b = Table.from_cmd(
name = 'table_b',
cmd = 'echo -e "{0}"'.format(table_contents),
columns = table_header
)
def test_condition_applies(self):
condition = AndList([
Expression('table_a.col_a', '==', 'table_b.col_z'),
Expression('table_a.col_b', '==', 'table_b.col_a')
])
self.assertTrue(condition_applies(condition, self.table_a, self.table_b))
condition = OrList([
Expression('table_a.col_a', '==', 'table_b.col_z'),
Expression('table_a.col_b', '==', 'table_b.col_a')
])
self.assertTrue(condition_applies(condition, self.table_a, self.table_b))
condition = OrList([
Expression('table_a.col_a', '==', 'table_b.col_z'),
Expression('table_a.col_b', '==', 'table_b.col_a')
])
self.assertFalse(condition_applies(condition, self.table_a))
condition = AndList([
Expression('table_a.col_a', '==', 'table_b.col_z'),
Expression('table_c.col_b', '==', 'table_b.col_a')
])
self.assertFalse(condition_applies(condition, self.table_a, self.table_b))
condition = OrList([
Expression('table_a.col_a', '==', 'table_b.col_j'),
Expression('table_a.col_b', '==', 'table_b.col_a')
])
self.assertFalse(condition_applies(condition, self.table_a, self.table_b))
condition = AndList([
Expression('table_a.col_a', '==', '1'),
Expression('table_a.col_b', '==', 'table_b.col_a')
])
self.assertTrue(condition_applies(condition, self.table_a, self.table_b))
with self.assertRaises(AmbiguousColumnNameError):
condition = AndList([
Expression('table_a.col_a', '==', '1'),
Expression('table_a.col_b', '==', 'col_a')
])
condition_applies(condition, self.table_a, self.table_b)
def test_stage_columns(self):
column_names = [
ColumnName('table_a.col_a'),
ColumnName('table_b.col_a'),
ColumnName('col_z'),
ColumnName('col_b'),
]
expected_stages = [
[ColumnName('table_a.col_a'), ColumnName('col_b'), ],
[ColumnName('table_b.col_a'), ColumnName('col_z'), ]
]
actual_stages = stage_columns([self.table_a, self.table_b], column_names)
self.assertEqual(expected_stages, actual_stages)
with self.assertRaises(AmbiguousColumnNameError):
column_names = [ColumnName('col_a'), ]
actual_stages = stage_columns([self.table_a, self.table_b], column_names)
def test_stage_conditions(self):
conditions = [
Expression('table_a.col_a', '==', 'table_b.col_z'),
Expression('table_a.col_a', '==', 'table_a.col_b'),
OrList([
Expression('table_a.col_a', '==', 'table_b.col_z'),
Expression('table_a.col_a', '==', 'table_a.col_b')
]),
]
expected_condition_order = [
[Expression('table_a.col_a', '==', 'table_a.col_b')],
[
Expression('table_a.col_a', '==', 'table_b.col_z'),
OrList([
Expression('table_a.col_a', '==', 'table_b.col_z'),
Expression('table_a.col_a', '==', 'table_a.col_b')
])
],
]
actual_condition_order = stage_conditions([self.table_a, self.table_b], conditions)
self.assertEqual(expected_condition_order, actual_condition_order)
def test_select(self):
query = Query(
[{'path': 'table_a.txt', 'alias': 'table_a.txt'}],
columns=['col_b']
)
table_actual = query.execute()
table_expected = Table.from_cmd(
name = 'expected',
cmd = 'echo -e "1\n3\n2"',
columns = ["col_b"]
)
table_expected_out = subprocess.check_output(['/bin/bash', '-c', table_expected.get_cmd_str(output_column_names=True)])
table_actual_out = subprocess.check_output(['/bin/bash', '-c', table_actual.get_cmd_str(output_column_names=True)])
self.assertEqual(table_actual_out, table_expected_out)
def test_where(self):
query = Query(
[{'path': 'table_a.txt', 'alias': 'table_a.txt'}],
conditions=[['col_b', '<', '3'], 'or', ['col_b', '<', '3']],
columns=['col_a']
)
table_actual = query.execute()
table_expected = Table.from_cmd(
'expected',
cmd = 'echo -e "1\n3"',
columns = ['col_a']
)
table_expected_out = subprocess.check_output(['/bin/bash', '-c', table_expected.get_cmd_str(output_column_names=True)])
table_actual_out = subprocess.check_output(['/bin/bash', '-c', table_actual.get_cmd_str(output_column_names=True)])
self.assertEqual(table_actual_out, table_expected_out)
def test_join_columns(self):
query = Query(
[
{'path': 'table_a.txt', 'alias': 'table_a.txt'},
{'path': 'table_b.txt', 'alias': 'table_b.txt'}
],
conditions=[ ['table_a.txt.col_a', '==', 'table_b.txt.col_a'], ],
columns=['table_a.txt.col_a', 'col_b', 'col_z']
)
t = query.execute()
header_actual = t.columns
header_expected = ['col_a', 'col_b', 'col_z']
self.assertEqual([str(col) for col in header_actual], header_expected)
def test_join_two_tables(self):
query = Query(
[
{'path': 'table_a.txt', 'alias': 'table_a.txt'},
{'path': 'table_b.txt', 'alias': 'table_b.txt'}
],
conditions=[ ['table_a.txt.col_a', '==', 'table_b.txt.col_a'], ],
columns=['table_a.txt.col_a', 'col_b', 'col_z']
)
table_actual = query.execute()
table_expected = Table.from_cmd(
name = 'table_a',
cmd = 'echo -e "1,1,w\n2,3,x\n2,3,y"',
columns = ['col_a','col_b','col_z']
)
table_expected_out = subprocess.check_output(['/bin/bash', '-c', table_expected.get_cmd_str(output_column_names=True)])
table_actual_out = subprocess.check_output(['/bin/bash', '-c', table_actual.get_cmd_str(output_column_names=True)])
self.assertEqual(table_actual_out, table_expected_out)
def test_join_two_tables_with_sort(self):
query = Query(
[
{'path': 'table_a.txt', 'alias': 'table_a.txt'},
{'path': 'table_b.txt', 'alias': 'table_b.txt'}
],
conditions=[ ['table_a.txt.col_b', '==', 'table_b.txt.col_a'], ],
columns=['col_b', 'table_b.txt.col_a', 'col_z']
)
table_actual = query.execute()
cmd_actual = table_actual.get_cmd_str(output_column_names=True)
cmd_expected = \
'echo "col_b,col_a,col_z"; ' + \
"join -t, -1 2 -2 1 <(tail -n+2 table_a.txt | sort -t, -k 2,2) <(tail -n+2 table_b.txt | sort -t, -k 1,1) | awk -F\',\' \'OFS=\",\" { print $1,$1,$3 }\'"
assert cmd_actual == cmd_expected
table_actual_out = subprocess.check_output(['/bin/bash', '-c', cmd_actual])
table_expected_out = subprocess.check_output(['/bin/bash', '-c', cmd_expected])
self.assertEqual(table_actual_out, table_expected_out)
def test_join_two_tables_with_multiple_join_conditions(self):
query = Query(
[
{'path': 'table_a.txt', 'alias': 'table_a.txt'},
{'path': 'table_d.txt', 'alias': 'table_d.txt'}
],
conditions=[
['table_a.txt.col_a', '==', 'table_d.txt.col_a'], 'and',
['table_a.txt.col_b', '==', 'table_d.txt.col_b'],
],
columns=['table_a.txt.col_b', 'table_a.txt.col_a', 'col_x']
)
table_actual = query.execute()
cmd_actual = table_actual.get_cmd_str(output_column_names=True)
cmd_expected = \
'echo "col_b,col_a,col_x"; ' + \
"join -t, -1 1 -2 1 <(tail -n+2 table_d.txt | sort -t, -k 1,1) <(tail -n+2 table_a.txt | sort -t, -k 1,1) | awk -F\',\' \'OFS=\",\" { if ($4 == $2) { print $1,$2,$3,$4 } }\' | awk -F\',\' \'OFS=\",\" { print $4,$1,$3 }\'"
assert cmd_actual == cmd_expected
table_actual_out = subprocess.check_output(['/bin/bash', '-c', cmd_actual])
table_expected_out = subprocess.check_output(['/bin/bash', '-c', cmd_expected])
self.assertEqual(table_actual_out, table_expected_out)
def test_join_three_tables(self):
query = Query(
[
{'path': 'table_a.txt', 'alias': 'table_a.txt'},
{'path': 'table_b.txt', 'alias': 'table_b.txt'},
{'path': 'table_d.txt', 'alias': 'table_d.txt'}
],
conditions=[
['table_a.txt.col_a', '==', 'table_d.txt.col_a'], 'and',
['table_a.txt.col_a', '==', 'table_b.txt.col_a'],
],
columns=['col_z', 'table_a.txt.col_a', 'col_x']
)
table_actual = query.execute()
cmd_actual = table_actual.get_cmd_str(output_column_names=True)
cmd_expected = \
'echo "col_z,col_a,col_x"; ' + \
'join -t, -1 1 -2 1 ' + \
'<(join -t, -1 1 -2 1 ' + \
'<(tail -n+2 table_d.txt | sort -t, -k 1,1) ' + \
'<(tail -n+2 table_a.txt | sort -t, -k 1,1) | sort -t, -k 1,1) ' + \
'<(tail -n+2 table_b.txt | sort -t, -k 1,1) ' + \
'| awk -F\',\' \'OFS="," { print $5,$1,$3 }\''
assert cmd_actual == cmd_expected
table_actual_out = subprocess.check_output(['/bin/bash', '-c', cmd_actual])
table_expected_out = subprocess.check_output(['/bin/bash', '-c', cmd_expected])
self.assertEqual(table_actual_out, table_expected_out)
def test_wildcard_selects_all_columns(self):
query = Query(
[{'path': 'table_a.txt', 'alias': 'table_a.txt'}],
columns=['*']
)
table_actual = query.execute()
table_expected = Table.from_cmd(
name = 'expected',
cmd = 'echo -e "1,1\n2,3\n3,2"',
columns = ["col_a", "col_b"]
)
table_expected_out = subprocess.check_output(['/bin/bash', '-c', table_expected.get_cmd_str(output_column_names=True)])
table_actual_out = subprocess.check_output(['/bin/bash', '-c', table_actual.get_cmd_str(output_column_names=True)])
self.assertEqual(table_actual_out, table_expected_out)
def test_qualified_wildcard_selects_all_table_columns_for_table_qualifier(self):
query = Query(
[{'path': 'table_a.txt', 'alias': 'table_a.txt'}],
columns=['table_a.txt.*']
)
table_actual = query.execute()
table_expected = Table.from_cmd(
name = 'expected',
cmd = 'echo -e "1,1\n2,3\n3,2"',
columns = ["col_a", "col_b"]
)
table_expected_out = subprocess.check_output(['/bin/bash', '-c', table_expected.get_cmd_str(output_column_names=True)])
table_actual_out = subprocess.check_output(['/bin/bash', '-c', table_actual.get_cmd_str(output_column_names=True)])
self.assertEqual(table_actual_out, table_expected_out)
query = Query(
[
{'path': 'table_a.txt', 'alias': 'table_a.txt'},
{'path': 'table_b.txt', 'alias': 'table_b.txt'}
],
conditions=[ ['table_a.txt.col_a', '==', 'table_b.txt.col_a'], ],
columns=['table_a.txt.*']
)
t = query.execute()
header_actual = t.columns
header_expected = ['col_a', 'col_b']
self.assertEqual([str(col) for col in header_actual], header_expected)
query = Query(
[
{'path': 'table_a.txt', 'alias': 'table_a.txt'},
{'path': 'table_b.txt', 'alias': 'tb'}
],
conditions=[ ['table_a.txt.col_a', '==', 'table_b.txt.col_a'], ],
columns=['tb.*']
)
t = query.execute()
header_actual = t.columns
header_expected = ['col_a', 'col_z']
self.assertEqual([str(col) for col in header_actual], header_expected)
def test_multiple_wildcards_result_in_duplicate_columns(self):
query = Query(
[
{'path': 'table_a.txt', 'alias': 'table_a.txt'},
{'path': 'table_b.txt', 'alias': 'tb'}
],
conditions=[ ['table_a.txt.col_a', '==', 'tb.col_a'], ],
columns=['table_a.txt.col_a', 'tb.*', '*']
)
t = query.execute()
header_actual = t.columns
header_expected = ['col_a', 'col_a', 'col_z', 'col_a', 'col_b', 'col_a', 'col_z', ]
self.assertEqual([str(col) for col in header_actual], header_expected)
|
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from heatclient import exc as heat_exc
from oslo_config import cfg
from oslo_log import log as logging
from sahara import conductor as c
from sahara import context
from sahara.i18n import _
from sahara.i18n import _LW
from sahara.service import engine as e
from sahara.service.heat import templates as ht
from sahara.service import volumes
from sahara.utils import cluster as c_u
from sahara.utils import cluster_progress_ops as cpo
from sahara.utils.openstack import base as b
from sahara.utils.openstack import heat
conductor = c.API
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
CREATE_STAGES = [c_u.CLUSTER_STATUS_SPAWNING, c_u.CLUSTER_STATUS_WAITING,
c_u.CLUSTER_STATUS_PREPARING]
SCALE_STAGES = [c_u.CLUSTER_STATUS_SCALING_SPAWNING,
c_u.CLUSTER_STATUS_SCALING_WAITING,
c_u.CLUSTER_STATUS_SCALING_PREPARING]
ROLLBACK_STAGES = [c_u.CLUSTER_STATUS_ROLLBACK_SPAWNING,
c_u.CLUSTER_STATUS_ROLLBACK_WAITING,
c_u.CLUSTER_STATUS_ROLLBACK__PREPARING]
class HeatEngine(e.Engine):
def get_type_and_version(self):
return "heat.3.0"
def create_cluster(self, cluster):
self._update_rollback_strategy(cluster, shutdown=True)
target_count = self._get_ng_counts(cluster)
self._nullify_ng_counts(cluster)
self._launch_instances(cluster, target_count, CREATE_STAGES)
self._update_rollback_strategy(cluster)
def _get_ng_counts(self, cluster):
count = {}
for node_group in cluster.node_groups:
count[node_group.id] = node_group.count
return count
def _nullify_ng_counts(self, cluster):
ctx = context.ctx()
for node_group in cluster.node_groups:
conductor.node_group_update(ctx, node_group, {"count": 0})
def scale_cluster(self, cluster, target_count):
ctx = context.ctx()
rollback_count = self._get_ng_counts(cluster)
self._update_rollback_strategy(cluster, rollback_count=rollback_count,
target_count=target_count)
inst_ids = self._launch_instances(
cluster, target_count, SCALE_STAGES,
update_stack=True, disable_rollback=False)
cluster = conductor.cluster_get(ctx, cluster)
c_u.clean_cluster_from_empty_ng(cluster)
self._update_rollback_strategy(cluster)
return inst_ids
def rollback_cluster(self, cluster, reason):
rollback_info = cluster.rollback_info or {}
self._update_rollback_strategy(cluster)
if rollback_info.get('shutdown', False):
self._rollback_cluster_creation(cluster, reason)
LOG.warning(_LW("Cluster creation rollback "
"(reason: {reason})").format(reason=reason))
return False
rollback_count = rollback_info.get('rollback_count', {}).copy()
target_count = rollback_info.get('target_count', {}).copy()
if rollback_count or target_count:
self._rollback_cluster_scaling(
cluster, rollback_count, target_count, reason)
LOG.warning(_LW("Cluster scaling rollback "
"(reason: {reason})").format(reason=reason))
return True
return False
def _update_rollback_strategy(self, cluster, shutdown=False,
rollback_count=None, target_count=None):
rollback_info = {}
if shutdown:
rollback_info['shutdown'] = shutdown
if rollback_count:
rollback_info['rollback_count'] = rollback_count
if target_count:
rollback_info['target_count'] = target_count
cluster = conductor.cluster_update(
context.ctx(), cluster, {'rollback_info': rollback_info})
return cluster
def _populate_cluster(self, cluster, stack):
ctx = context.ctx()
old_ids = [i.instance_id for i in c_u.get_instances(cluster)]
new_ids = []
for node_group in cluster.node_groups:
instances = stack.get_node_group_instances(node_group)
for instance in instances:
nova_id = instance['physical_id']
name = instance['name']
if nova_id not in old_ids:
instance_id = conductor.instance_add(
ctx, node_group, {"instance_id": nova_id,
"instance_name": name})
new_ids.append(instance_id)
return new_ids
def _rollback_cluster_creation(self, cluster, ex):
"""Shutdown all instances and update cluster status."""
self.shutdown_cluster(cluster)
def _rollback_cluster_scaling(self, cluster, rollback_count,
target_count, ex):
"""Attempt to rollback cluster scaling.
Our rollback policy for scaling is as follows:
We shut down nodes created during scaling, but we don't try to
to get back decommissioned nodes. I.e. during the rollback
we only shut down nodes and not launch them. That approach should
maximize the chance of rollback success.
"""
for ng in rollback_count:
if rollback_count[ng] > target_count[ng]:
rollback_count[ng] = target_count[ng]
self._launch_instances(cluster, rollback_count, ROLLBACK_STAGES,
update_stack=True)
def shutdown_cluster(self, cluster):
"""Shutdown specified cluster and all related resources."""
try:
b.execute_with_retries(heat.client().stacks.delete, cluster.name)
stack = heat.get_stack(cluster.name)
heat.wait_stack_completion(stack)
except heat_exc.HTTPNotFound:
LOG.warning(_LW('Did not find stack for cluster. Trying to delete '
'cluster manually.'))
# Stack not found. Trying to delete cluster like direct engine
# do it
self._shutdown_instances(cluster)
self._delete_aa_server_group(cluster)
self._clean_job_executions(cluster)
self._remove_db_objects(cluster)
@cpo.event_wrapper(
True, step=_('Create Heat stack'), param=('cluster', 1))
def _create_instances(self, cluster, target_count, update_stack=False,
disable_rollback=True):
stack = ht.ClusterStack(cluster)
self._update_instance_count(stack, cluster, target_count)
stack.instantiate(update_existing=update_stack,
disable_rollback=disable_rollback)
heat.wait_stack_completion(stack.heat_stack)
return self._populate_cluster(cluster, stack)
def _launch_instances(self, cluster, target_count, stages,
update_stack=False, disable_rollback=True):
# create all instances
cluster = c_u.change_cluster_status(cluster, stages[0])
inst_ids = self._create_instances(
cluster, target_count, update_stack, disable_rollback)
# wait for all instances are up and networks ready
cluster = c_u.change_cluster_status(cluster, stages[1])
instances = c_u.get_instances(cluster, inst_ids)
self._await_networks(cluster, instances)
# prepare all instances
cluster = c_u.change_cluster_status(cluster, stages[2])
instances = c_u.get_instances(cluster, inst_ids)
volumes.mount_to_instances(instances)
self._configure_instances(cluster)
return inst_ids
def _update_instance_count(self, stack, cluster, target_count):
ctx = context.ctx()
for node_group in cluster.node_groups:
count = target_count[node_group.id]
stack.add_node_group_extra(node_group.id, count,
self._generate_user_data_script)
# if number of instances decreases, we need to drop
# the excessive ones
for i in range(count, node_group.count):
conductor.instance_remove(ctx, node_group.instances[i])
|
|
#!/usr/bin/python
# TODO - python path - assuming condo here
## HEALTH WARNING - BETA CODE IN DEVELOPMENT ##
'''
This standalone application will build a mesh from a nifti classification file.
To keep the procedure as similar as possible to the way mrMesh used to do this,
we will keep this as a standalon application. Matlab reads in the segmented
nifti file using vistasofts own nifti class handler meshBuild>mrmBuild>
meshBuildFromClass - we just dont use the old build_mesh mex file - we do that
bit and any smoothing in this application and send a mesh struture back to
matlab.
AG 2017
'''
import os,sys
import scipy
import vtk
from numpy import *
from scipy.io import loadmat, savemat
from vtk.util import numpy_support
debug = True
#TODO error handling
fileToLoad = sys.argv[1]
fileToSave = sys.argv[2]
# load the voxel data that has been dumped to disk
voxels = scipy.io.loadmat(fileToLoad)
mmPerVox = voxels['mmPerVox'][0]
if debug: print mmPerVox
voxels = voxels['voxels'] #unpack
if debug: print voxels
if debug: print shape(voxels)
extent = shape(voxels)
if debug: print extent
if debug: print extent[0]
if debug: print extent[1]
if debug: print extent[2]
'''
### ------------------------------------------------------------------------------
### this is faster but for now exactly replicate the way mrMesh sets up the volume array
# import voxels to vtk
dataImporter = vtk.vtkImageImport()
data_string = voxels.tostring()
dataImporter.CopyImportVoidPointer(data_string, len(data_string))
dataImporter.SetDataScalarTypeToUnsignedChar()
dataImporter.SetDataExtent(0, extent[2]-1, 0, extent[1]-1, 0, extent[0]-1) # TODO have to work this out
dataImporter.SetWholeExtent(0, extent[2]-1, 0, extent[1]-1, 0, extent[0]-1) # TODO have to work this out
dataImporter.SetDataSpacing(mmPerVox[0],mmPerVox[1],mmPerVox[2]) # TODO have to work this out
dataImporter.Update()
if debug: print dataImporter.GetOutput()
### ------------------------------------------------------------------------------
'''
### ------- the way mrMesh did it in mesh_build --------------------------------
pArray = map(ord,voxels.tostring()) #unpack
pDims = shape(voxels)
scale = mmPerVox
iSizes = [pDims[0]+2, pDims[1]+2, pDims[2]+2]
nTotalValues = iSizes[0] * iSizes[1] * iSizes[2]
pClassValues = vtk.vtkUnsignedCharArray()
pClassValues.SetNumberOfValues(nTotalValues)
pClassData = vtk.vtkStructuredPoints()
pClassData.SetDimensions(iSizes[0], iSizes[1], iSizes[2])
pClassData.SetOrigin(-scale[0], -scale[1], -scale[2]) #???
pClassData.SetOrigin(-1, -1, -1) #???
pClassData.SetSpacing(scale[0], scale[1], scale[2])
for iSrcZ in range(pDims[2]):
for iSrcY in range(pDims[1]):
iSrcIndex = iSrcZ * pDims[1] * pDims[0] + iSrcY * pDims[0]
iDstIndex = (iSrcZ+1) * iSizes[1] * iSizes[0] + (iSrcY+1) * iSizes[0] + 1
for iSrcX in range(pDims[0]):
fTemp = int(pArray[iSrcIndex])
#if debug: print fTemp, 'iSrcIndex', iSrcIndex, 'iDstIndex', iDstIndex
if fTemp>0:
pClassValues.SetValue(iDstIndex, 0)
else:
pClassValues.SetValue(iDstIndex, 1)
iSrcIndex+=1
iDstIndex+=1
pClassData.GetPointData().SetScalars(pClassValues)
pClassData.Modified()
if debug:
spw = vtk.vtkStructuredPointsWriter()
spw.SetFileTypeToASCII()
spw.SetInputData(pClassData)
spw.SetFileName("/tmp/test-mrMeshPy-structuredPoints.vtk")
spw.Write()
spw.Update()
### ------ Data volume is loaded and constructed - extract some surfaces -------------
mc = vtk.vtkMarchingCubes()
# mc = vtk.vtkContourFilter() #- could use a contour filter instead?
# mc.SetInputConnection(dataImporter.GetOutputPort()) # later - for use with direct imagedata import
mc.SetInputData(pClassData)
mc.SetValue(0,0.5) #extract 0-th surface at 0.5?
mc.ComputeGradientsOff()
mc.ComputeNormalsOff()
mc.ComputeScalarsOff()
mc.Update()
if debug:
print mc.GetOutput()
write = vtk.vtkPolyDataWriter()
write.SetFileName('/htmp/test-mrMeshPy-marchingCubesOutput.txt')
write.SetFileTypeToASCII()
write.SetInputData(mc.GetOutput())
write.Write()
write.Update()
# ---- To exactly replicate the mrMesh routines, we also need to reverse the triangles (??)
# its unclear why they did this but we will do it because they did
# its a little trcky in vtk so we pull the vertex indices out of the vtk cell array
# and then rebuild it after shuffling the order
polyDat = mc.GetOutput()
theCellArray = polyDat.GetPolys()
numpyCellValues = numpy_support.vtk_to_numpy(theCellArray.GetData()) #convert to numpy
if debug: print(numpyCellValues)
nar = numpyCellValues.reshape(theCellArray.GetNumberOfCells(),4) # reshape to n x 4
nar2 = zeros(nar.shape) # create a temporary holder
nar2[:,0]=nar[:,0] # keep col 1 in place (always the number 3)
nar2[:,1]=nar[:,3] # swap vertex 3 with 1
nar2[:,2]=nar[:,2] # keep vertex 2 in place
nar2[:,3]=nar[:,1] # swap vertex 1 with 3
#triangles now reversed
nar2r = nar2.reshape(1,theCellArray.GetNumberOfCells()*4) # reshape to a vector
# overwite the original values in the cell array
for i in range(theCellArray.GetNumberOfCells()*4):
theCellArray.ReplaceCell(i-1,1,[int(nar2r[0][i])]) #TODO why off by one?
na = numpy_support.vtk_to_numpy(theCellArray.GetData())
if debug: print(na)
polyDat.Modified()
# ---- extract just "center surface" - edges are normally extracted too (the cube around the edge of the volume) --------
confilter = vtk.vtkPolyDataConnectivityFilter()
confilter.SetInputData(polyDat)
confilter.SetExtractionModeToClosestPointRegion()
confilter.SetClosestPoint(extent[0]/2.0,extent[1]/2.0,extent[2]/2.0) # center of volume
confilter.Update()
# ---- Normals ---------------------
# normals already computed by mc algorithm so this code is obsolete
normals = vtk.vtkPolyDataNormals()
normals.ComputePointNormalsOn()
normals.SplittingOff()
normals.SetInputConnection(confilter.GetOutputPort())
######normals.SetInputData(polyDat)
normals.Update()
print normals.GetOutput()
norm = normals.GetOutput().GetPointData().GetNormals()
output_normals = array(numpy_support.vtk_to_numpy(norm).transpose(),'d')
####if debug: print output_normals
# ---- Initial vertices - unsmoothed ---------------------
init_verts = normals.GetOutput().GetPoints().GetData()
output_init_verts = array(numpy_support.vtk_to_numpy(init_verts).transpose(),'d')
if debug: print output_init_verts
# ---- Polys (triangles) ---------------------
triangles = normals.GetOutput().GetPolys().GetData()
tmp_triangles = numpy_support.vtk_to_numpy(triangles)
# N.B. the polygon data returned here have 4 values for poly - the first is the number
# of vertices that describe the polygo (ironically always 3) and the next 3 are the
# indices of the vertices that make up the polygon
# so first we need to reshape data from a vector
tmp_triangles = reshape(tmp_triangles,(len(tmp_triangles)/4,4))
# and then we drop the first column (all 3's)
output_triangles = array((tmp_triangles[:,1:4]).transpose(),'d') #remember zero index here, add one for matlab
if debug: print output_triangles
# -------- smoothed version of mesh ----------------
smooth = vtk.vtkSmoothPolyDataFilter()
smooth.SetNumberOfIterations(32) #standard value sused in old mrMesh
smooth.SetRelaxationFactor(0.5) #standard value sused in old mrMesh
smooth.FeatureEdgeSmoothingOff()
smooth.SetFeatureAngle(45)
smooth.SetEdgeAngle(15)
smooth.SetBoundarySmoothing(1)
smooth.SetInputConnection(normals.GetOutputPort())
smooth.Update()
# different smoothing option?
'''
smooth = vtk.vtkWindowedSincPolyDataFilter()
smooth.SetInputConnection(mc.GetOutputPort())
smooth.SetNumberOfIterations(30)
smooth.SetPassBand(0.5)
# different smoothing option?
smooth.SetFeatureAngle(45)
smooth.SetEdgeAngle(15)
smooth.SetBoundarySmoothing(1)
smooth.SetFeatureEdgeSmoothing(0)
smooth.Update()
'''
#### NEW NORMALS !!
norm = smooth.GetOutput().GetPointData().GetNormals()
output_normals = array(numpy_support.vtk_to_numpy(norm).transpose(),'d')
# ---- Vertices - smoothed ---------------------
smooth_verts = smooth.GetOutput().GetPoints().GetData()
output_smooth_verts = array(numpy_support.vtk_to_numpy(smooth_verts).transpose(),'d')
if debug: print output_smooth_verts
# ---- Curvature ---------------------
curvature = vtk.vtkCurvatures()
curvature.SetInputConnection(smooth.GetOutputPort())
curvature.SetCurvatureTypeToMean()
curvature.Update()
curv = curvature.GetOutput().GetPointData().GetScalars()
output_curvature = array(numpy_support.vtk_to_numpy(curv).transpose(),'d')
if debug: print min(output_curvature)
if debug: print max(output_curvature)
if debug: print output_curvature
# -------- colours based on curvature ------------
# turn curvature into color
tmp_colors = output_curvature.copy()
#min_curv = min(tmp_colors)
#max_curv = max(tmp_colors)
#tmp_colors = (tmp_colors -min_curv) / (max_curv-min_curv) *255
tmp_colors[tmp_colors>=0] = 85 #standard value sused in old mrMesh
tmp_colors[tmp_colors<0] = 160 #standard value sused in old mrMesh
output_colors = vstack((tmp_colors, tmp_colors, tmp_colors, ones((1,len(tmp_colors)))*255))
output_colors = array(output_colors,'d')
if debug: print output_colors
# OK we have all the data we need now, lets write it out to file
data = {} #empty dictionary
data['initVertices'] = output_init_verts
data['initialvertices'] = output_init_verts
data['vertices'] = output_smooth_verts
data['colors'] = output_colors
data['normals'] = output_normals
data['triangles'] = output_triangles
data['curvature'] = output_curvature
# save it out
savemat(fileToSave,data)
# data have been sent, but let's view them here
pdm = vtk.vtkPolyDataMapper()
pdm.SetInputConnection(confilter.GetOutputPort())
######pdm.SetInputData(normals.GetOutput())
actor = vtk.vtkActor()
actor.SetMapper(pdm)
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren.AddActor(actor)
ren.SetBackground(1,1,1)
renWin.SetSize(500,500)
iren.Initialize()
iren.Start()
pdm = vtk.vtkPolyDataMapper()
pdm.SetInputConnection(curvature.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(pdm)
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren.AddActor(actor)
ren.SetBackground(1,1,1)
renWin.SetSize(500,500)
iren.Initialize()
iren.Start()
|
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for LocalFileSystem."""
import filecmp
import os
import shutil
import tempfile
import unittest
import mock
from apache_beam.io import localfilesystem
from apache_beam.io.filesystem import BeamIOError
from apache_beam.io.filesystems import FileSystems
def _gen_fake_join(separator):
"""Returns a callable that joins paths with the given separator."""
def _join(first_path, *paths):
return separator.join((first_path.rstrip(separator),) + paths)
return _join
class FileSystemsTest(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test_get_scheme(self):
self.assertIsNone(FileSystems.get_scheme('/abc/cdf'))
self.assertIsNone(FileSystems.get_scheme('c:\\abc\cdf')) # pylint: disable=anomalous-backslash-in-string
self.assertEqual(FileSystems.get_scheme('gs://abc/cdf'), 'gs')
def test_get_filesystem(self):
self.assertTrue(isinstance(FileSystems.get_filesystem('/tmp'),
localfilesystem.LocalFileSystem))
self.assertTrue(isinstance(FileSystems.get_filesystem('c:\\abc\def'), # pylint: disable=anomalous-backslash-in-string
localfilesystem.LocalFileSystem))
with self.assertRaises(ValueError):
FileSystems.get_filesystem('error://abc/def')
@mock.patch('apache_beam.io.localfilesystem.os')
def test_unix_path_join(self, *unused_mocks):
# Test joining of Unix paths.
localfilesystem.os.path.join.side_effect = _gen_fake_join('/')
self.assertEqual('/tmp/path/to/file',
FileSystems.join('/tmp/path', 'to', 'file'))
self.assertEqual('/tmp/path/to/file',
FileSystems.join('/tmp/path', 'to/file'))
self.assertEqual('/tmp/path/to/file',
FileSystems.join('/', 'tmp/path', 'to/file'))
self.assertEqual('/tmp/path/to/file',
FileSystems.join('/tmp/', 'path', 'to/file'))
@mock.patch('apache_beam.io.localfilesystem.os')
def test_windows_path_join(self, *unused_mocks):
# Test joining of Windows paths.
localfilesystem.os.path.join.side_effect = _gen_fake_join('\\')
self.assertEqual(r'C:\tmp\path\to\file',
FileSystems.join(r'C:\tmp\path', 'to', 'file'))
self.assertEqual(r'C:\tmp\path\to\file',
FileSystems.join(r'C:\tmp\path', r'to\file'))
self.assertEqual(r'C:\tmp\path\to\file',
FileSystems.join(r'C:\tmp\path\\', 'to', 'file'))
def test_mkdirs(self):
path = os.path.join(self.tmpdir, 't1/t2')
FileSystems.mkdirs(path)
self.assertTrue(os.path.isdir(path))
def test_mkdirs_failed(self):
path = os.path.join(self.tmpdir, 't1/t2')
FileSystems.mkdirs(path)
# Check IOError if existing directory is created
with self.assertRaises(IOError):
FileSystems.mkdirs(path)
with self.assertRaises(IOError):
FileSystems.mkdirs(os.path.join(self.tmpdir, 't1'))
def test_match_file(self):
path = os.path.join(self.tmpdir, 'f1')
open(path, 'a').close()
# Match files in the temp directory
result = FileSystems.match([path])[0]
files = [f.path for f in result.metadata_list]
self.assertEqual(files, [path])
def test_match_file_empty(self):
path = os.path.join(self.tmpdir, 'f2') # Does not exist
# Match files in the temp directory
result = FileSystems.match([path])[0]
files = [f.path for f in result.metadata_list]
self.assertEqual(files, [])
def test_match_file_exception(self):
# Match files with None so that it throws an exception
with self.assertRaises(BeamIOError) as error:
FileSystems.match([None])
self.assertTrue(
error.exception.message.startswith('Unable to get the Filesystem'))
self.assertEqual(error.exception.exception_details.keys(), [None])
def test_match_directory(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
open(path1, 'a').close()
open(path2, 'a').close()
# Match both the files in the directory
path = os.path.join(self.tmpdir, '*')
result = FileSystems.match([path])[0]
files = [f.path for f in result.metadata_list]
self.assertEqual(files, [path1, path2])
def test_match_directory(self):
result = FileSystems.match([self.tmpdir])[0]
files = [f.path for f in result.metadata_list]
self.assertEqual(files, [self.tmpdir])
def test_copy(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
with open(path1, 'a') as f:
f.write('Hello')
FileSystems.copy([path1], [path2])
self.assertTrue(filecmp.cmp(path1, path2))
def test_copy_error(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
with self.assertRaises(BeamIOError) as error:
FileSystems.copy([path1], [path2])
self.assertTrue(
error.exception.message.startswith('Copy operation failed'))
self.assertEqual(error.exception.exception_details.keys(), [(path1, path2)])
def test_copy_directory(self):
path_t1 = os.path.join(self.tmpdir, 't1')
path_t2 = os.path.join(self.tmpdir, 't2')
FileSystems.mkdirs(path_t1)
FileSystems.mkdirs(path_t2)
path1 = os.path.join(path_t1, 'f1')
path2 = os.path.join(path_t2, 'f1')
with open(path1, 'a') as f:
f.write('Hello')
FileSystems.copy([path_t1], [path_t2])
self.assertTrue(filecmp.cmp(path1, path2))
def test_rename(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
with open(path1, 'a') as f:
f.write('Hello')
FileSystems.rename([path1], [path2])
self.assertTrue(FileSystems.exists(path2))
self.assertFalse(FileSystems.exists(path1))
def test_rename_error(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
with self.assertRaises(BeamIOError) as error:
FileSystems.rename([path1], [path2])
self.assertTrue(
error.exception.message.startswith('Rename operation failed'))
self.assertEqual(error.exception.exception_details.keys(), [(path1, path2)])
def test_rename_directory(self):
path_t1 = os.path.join(self.tmpdir, 't1')
path_t2 = os.path.join(self.tmpdir, 't2')
FileSystems.mkdirs(path_t1)
path1 = os.path.join(path_t1, 'f1')
path2 = os.path.join(path_t2, 'f1')
with open(path1, 'a') as f:
f.write('Hello')
FileSystems.rename([path_t1], [path_t2])
self.assertTrue(FileSystems.exists(path_t2))
self.assertFalse(FileSystems.exists(path_t1))
self.assertTrue(FileSystems.exists(path2))
self.assertFalse(FileSystems.exists(path1))
def test_exists(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
with open(path1, 'a') as f:
f.write('Hello')
self.assertTrue(FileSystems.exists(path1))
self.assertFalse(FileSystems.exists(path2))
def test_delete(self):
path1 = os.path.join(self.tmpdir, 'f1')
with open(path1, 'a') as f:
f.write('Hello')
self.assertTrue(FileSystems.exists(path1))
FileSystems.delete([path1])
self.assertFalse(FileSystems.exists(path1))
def test_delete_error(self):
path1 = os.path.join(self.tmpdir, 'f1')
with self.assertRaises(BeamIOError) as error:
FileSystems.delete([path1])
self.assertTrue(
error.exception.message.startswith('Delete operation failed'))
self.assertEqual(error.exception.exception_details.keys(), [path1])
|
|
"""
.. module:: mcmc
:synopsis: Monte Carlo procedure
.. moduleauthor:: Benjamin Audren <benjamin.audren@epfl.ch>
This module defines one key function, :func:`chain`, that handles the Markov
chain. So far, the code uses only one chain, as no parallelization is done.
The following routine is also defined in this module, which is called at
every step:
* :func:`get_new_position` returns a new point in the parameter space,
depending on the proposal density.
The :func:`chain` in turn calls several helper routines, defined in
:mod:`sampler`. These are called just once:
* :func:`compute_lkl() <sampler.compute_lkl>` is called at every step in the Markov chain, returning
the likelihood at the current point in the parameter space.
* :func:`get_covariance_matrix() <sampler.get_covariance_matrix>`
* :func:`read_args_from_chain() <sampler.read_args_from_chain>`
* :func:`read_args_from_bestfit() <sampler.read_args_from_bestfit>`
* :func:`accept_step() <sampler.accept_step>`
Their usage is described in :mod:`sampler`. On the contrary, the following
routines are called at every step:
The arguments of these functions will often contain **data** and/or **cosmo**.
They are both initialized instances of respectively :class:`data` and the
cosmological class. They will thus not be described for every function.
"""
import os
import sys
import math
import random as rd
import numpy as np
import warnings
import scipy.linalg as la
from pprint import pprint
import io_mp
import sampler
def get_new_position(data, eigv, U, k, Cholesky, Rotation):
"""
Obtain a new position in the parameter space from the eigen values of the
inverse covariance matrix, or from the Cholesky decomposition (original
idea by Anthony Lewis, in `Efficient sampling of fast and slow
cosmological parameters <http://arxiv.org/abs/1304.4473>`_ )
The three different jumping options, decided when starting a run with the
flag **-j** are **global**, **sequential** and **fast** (by default) (see
:mod:`parser_mp` for reference).
.. warning::
For running Planck data, the option **fast** is highly recommended, as
it speeds up the convergence. Note that when using this option, the
list of your likelihoods in your parameter file **must match** the
ordering of your nuisance parameters (as always, they must come after
the cosmological parameters, but they also must be ordered between
likelihood, with, preferentially, the slowest likelihood to compute
coming first).
- **global**: varies all the parameters at the same time. Depending on the
input covariance matrix, some degeneracy direction will be followed,
otherwise every parameter will jump independently of each other.
- **sequential**: varies every parameter sequentially. Works best when
having no clue about the covariance matrix, or to understand which
estimated sigma is wrong and slowing down the whole process.
- **fast**: privileged method when running the Planck likelihood. Described
in the aforementioned article, it separates slow (cosmological) and fast
(nuisance) parameters.
Parameters
----------
eigv : numpy array
Eigenvalues previously computed
U : numpy_array
Covariance matrix.
k : int
Number of points so far in the chain, is used to rotate through
parameters
Cholesky : numpy array
Cholesky decomposition of the covariance matrix, and its inverse
Rotation : numpy_array
Not used yet
"""
parameter_names = data.get_mcmc_parameters(['varying'])
vector_new = np.zeros(len(parameter_names), 'float64')
sigmas = np.zeros(len(parameter_names), 'float64')
# Write the vector of last accepted points, or if it does not exist
# (initialization routine), take the mean value
vector = np.zeros(len(parameter_names), 'float64')
try:
for elem in parameter_names:
vector[parameter_names.index(elem)] = \
data.mcmc_parameters[elem]['last_accepted']
except KeyError:
for elem in parameter_names:
vector[parameter_names.index(elem)] = \
data.mcmc_parameters[elem]['initial'][0]
# Initialize random seed
rd.seed()
# Choice here between sequential and global change of direction
if data.jumping == 'global':
for i in range(len(vector)):
sigmas[i] = (math.sqrt(1/eigv[i]/len(vector))) * \
rd.gauss(0, 1)*data.jumping_factor
elif data.jumping == 'sequential':
i = k % len(vector)
sigmas[i] = (math.sqrt(1/eigv[i]))*rd.gauss(0, 1)*data.jumping_factor
elif data.jumping == 'fast':
#i = k % len(vector)
j = k % len(data.over_sampling_indices)
i = data.over_sampling_indices[j]
###############
# method fast+global
for index, elem in enumerate(data.block_parameters):
# When the running index is below the maximum index of a block of
# parameters, this block is varied, and **only this one** (note the
# break at the end of the if clause, it is not a continue)
if i < elem:
if index == 0:
Range = elem
Previous = 0
else:
Range = elem-data.block_parameters[index-1]
Previous = data.block_parameters[index-1]
# All the varied parameters are given a random variation with a
# sigma of 1. This will translate in a jump for all the
# parameters (as long as the Cholesky matrix is non diagonal)
for j in range(Range):
sigmas[j+Previous] = (math.sqrt(1./Range)) * \
rd.gauss(0, 1)*data.jumping_factor
break
else:
continue
else:
print('\n\n Jumping method unknown (accepted : ')
print('global, sequential, fast (default))')
# Fill in the new vector
if data.jumping in ['global', 'sequential']:
vector_new = vector + np.dot(U, sigmas)
else:
vector_new = vector + np.dot(Cholesky, sigmas)
# Check for boundaries problems
flag = 0
for i, elem in enumerate(parameter_names):
value = data.mcmc_parameters[elem]['initial']
if((str(value[1]) != str(-1) and value[1] is not None) and
(vector_new[i] < value[1])):
flag += 1 # if a boundary value is reached, increment
elif((str(value[2]) != str(-1) and value[2] is not None) and
vector_new[i] > value[2]):
flag += 1 # same
# At this point, if a boundary condition is not fullfilled, ie, if flag is
# different from zero, return False
if flag != 0:
return False
# Check for a slow step (only after the first time, so we put the test in a
# try: statement: the first time, the exception KeyError will be raised)
try:
data.check_for_slow_step(vector_new)
except KeyError:
pass
# If it is not the case, proceed with normal computation. The value of
# new_vector is then put into the 'current' point in parameter space.
for index, elem in enumerate(parameter_names):
data.mcmc_parameters[elem]['current'] = vector_new[index]
# Propagate the information towards the cosmo arguments
data.update_cosmo_arguments()
return True
######################
# MCMC CHAIN
######################
def chain(cosmo, data, command_line):
"""
Run a Markov chain of fixed length with a Metropolis Hastings algorithm.
Main function of this module, this is the actual Markov chain procedure.
After having selected a starting point in parameter space defining the
first **last accepted** one, it will, for a given amount of steps :
+ choose randomnly a new point following the *proposal density*,
+ compute the cosmological *observables* through the cosmological module,
+ compute the value of the *likelihoods* of the desired experiments at this
point,
+ *accept/reject* this point given its likelihood compared to the one of
the last accepted one.
Every time the code accepts :code:`data.write_step` number of points
(quantity defined in the input parameter file), it will write the result to
disk (flushing the buffer by forcing to exit the output file, and reopen it
again.
.. note::
to use the code to set a fiducial file for certain fixed parameters,
you can use two solutions. The first one is to put all input 1-sigma
proposal density to zero (this method still works, but is not
recommended anymore). The second one consist in using the flag "-f 0",
to force a step of zero amplitude.
"""
## Initialisation
loglike = 0
# In case command_line.silent has been asked, outputs should only contain
# data.out. Otherwise, it will also contain sys.stdout
outputs = [data.out]
if not command_line.silent:
outputs.append(sys.stdout)
# check for MPI
try:
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
# suppress duplicate output from slaves
if rank:
command_line.quiet = True
except ImportError:
# set all chains to master if no MPI
rank = 0
# Recover the covariance matrix according to the input, if the varying set
# of parameters is non-zero
if (data.get_mcmc_parameters(['varying']) != []):
sigma_eig, U, C = sampler.get_covariance_matrix(cosmo, data, command_line)
if data.jumping_factor == 0:
warnings.warn(
"The jumping factor has been set to 0. The above covariance " +
"matrix will not be used.")
# In case of a fiducial run (all parameters fixed), simply run once and
# print out the likelihood. This should not be used any more (one has to
# modify the log.param, which is never a good idea. Instead, force the code
# to use a jumping factor of 0 with the option "-f 0".
else:
warnings.warn(
"You are running with no varying parameters... I will compute " +
"only one point and exit")
data.update_cosmo_arguments() # this fills in the fixed parameters
loglike = sampler.compute_lkl(cosmo, data)
io_mp.print_vector(outputs, 1, loglike, data)
return 1, loglike
# In the fast-slow method, one need the Cholesky decomposition of the
# covariance matrix. Return the Cholesky decomposition as a lower
# triangular matrix
Cholesky = None
Rotation = None
if command_line.jumping == 'fast':
Cholesky = la.cholesky(C).T
Rotation = np.identity(len(sigma_eig))
# If the update mode was selected, the previous (or original) matrix should be stored
if command_line.update:
previous = (sigma_eig, U, C, Cholesky)
# If restart wanted, pick initial value for arguments
if command_line.restart is not None:
sampler.read_args_from_chain(data, command_line.restart)
# If restart from best fit file, read first point (overwrite settings of
# read_args_from_chain)
if command_line.bf is not None:
sampler.read_args_from_bestfit(data, command_line.bf)
# Pick a position (from last accepted point if restart, from the mean value
# else), with a 100 tries.
for i in range(100):
if get_new_position(data, sigma_eig, U, i,
Cholesky, Rotation) is True:
break
if i == 99:
raise io_mp.ConfigurationError(
"You should probably check your prior boundaries... because " +
"no valid starting position was found after 100 tries")
# Compute the starting Likelihood
loglike = sampler.compute_lkl(cosmo, data)
# Choose this step as the last accepted value
# (accept_step), and modify accordingly the max_loglike
sampler.accept_step(data)
max_loglike = loglike
# If the jumping factor is 0, the likelihood associated with this point is
# displayed, and the code exits.
if data.jumping_factor == 0:
io_mp.print_vector(outputs, 1, loglike, data)
return 1, loglike
acc, rej = 0.0, 0.0 # acceptance and rejection number count
N = 1 # number of time the system stayed in the current position
# define path and covmat
input_covmat = command_line.cov
base = os.path.basename(command_line.folder)
# the previous line fails when "folder" is a string ending with a slash. This issue is cured by the next lines:
if base == '':
base = os.path.basename(command_line.folder[:-1])
command_line.cov = os.path.join(
command_line.folder, base+'.covmat')
# Print on screen the computed parameters
if not command_line.silent and not command_line.quiet:
io_mp.print_parameters(sys.stdout, data)
# Suppress non-informative output after initializing
command_line.quiet = True
k = 1
# Main loop, that goes on while the maximum number of failure is not
# reached, and while the expected amount of steps (N) is not taken.
while k <= command_line.N:
# If the number of steps reaches the number set in the update method,
# then the proposal distribution should be adapted.
if command_line.update:
# master chain behavior
if not rank:
# Add the folder to the list of files to analyze, and switch on the
# options for computing only the covmat
from parser_mp import parse
info_command_line = parse(
'info %s --minimal --noplot --keep-fraction 0.5 --keep-non-markovian --want-covmat' % command_line.folder)
info_command_line.update = command_line.update
# the +10 below is here to ensure that the first master update will take place before the first slave updates,
# but this is a detail, the code is robust against situations where updating is not possible, so +10 could be omitted
if not (k+10) % command_line.update and k > 10:
# Try to launch an analyze
try:
from analyze import analyze
R_minus_one = analyze(info_command_line)
except:
if not command_line.silent:
print 'Step ',k,' chain ', rank,': Failed to calculate covariant matrix'
pass
if not (k-1) % command_line.update:
try:
# Read the covmat
sigma_eig, U, C = sampler.get_covariance_matrix(
cosmo, data, command_line)
if command_line.jumping == 'fast':
Cholesky = la.cholesky(C).T
# Test here whether the covariance matrix has really changed
# We should in principle test all terms, but testing the first one should suffice
if not C[0,0] == previous[2][0,0]:
previous = (sigma_eig, U, C, Cholesky)
if k == 1:
if not command_line.silent:
if not input_covmat == None:
warnings.warn(
'Appending to an existing folder: using %s instead of %s. '
'If new input covmat is desired, please delete previous covmat.'
% (command_line.cov, input_covmat))
else:
warnings.warn(
'Appending to an existing folder: using %s. '
'If no starting covmat is desired, please delete previous covmat.'
% command_line.cov)
else:
data.out.write('# After %d accepted steps: update proposal with max(R-1) = %f \n' % (int(acc), max(R_minus_one)))
if not command_line.silent:
print 'After %d accepted steps: update proposal with max(R-1) = %f \n' % (int(acc), max(R_minus_one))
try:
if stop-after-update:
k = command_line.N
print 'Covariant matrix updated - stopping run'
except:
pass
except:
pass
command_line.quiet = True
# slave chain behavior
else:
if not (k-1) % command_line.update:
try:
sigma_eig, U, C = sampler.get_covariance_matrix(
cosmo, data, command_line)
if command_line.jumping == 'fast':
Cholesky = la.cholesky(C).T
# Test here whether the covariance matrix has really changed
# We should in principle test all terms, but testing the first one should suffice
if not C[0,0] == previous[2][0,0] and not k == 1:
data.out.write('# After %d accepted steps: update proposal \n' % int(acc))
if not command_line.silent:
print 'After %d accepted steps: update proposal \n' % int(acc)
try:
if stop_after_update:
k = command_line.N
print 'Covariant matrix updated - stopping run'
except:
pass
previous = (sigma_eig, U, C, Cholesky)
except:
pass
# Pick a new position ('current' flag in mcmc_parameters), and compute
# its likelihood. If get_new_position returns True, it means it did not
# encounter any boundary problem. Otherwise, just increase the
# multiplicity of the point and start the loop again
if get_new_position(
data, sigma_eig, U, k, Cholesky, Rotation) is True:
newloglike = sampler.compute_lkl(cosmo, data)
else: # reject step
rej += 1
N += 1
k += 1
continue
# Harmless trick to avoid exponentiating large numbers. This decides
# whether or not the system should move.
if (newloglike != data.boundary_loglike):
if (newloglike >= loglike):
alpha = 1.
else:
alpha = np.exp(newloglike-loglike)
else:
alpha = -1
if ((alpha == 1.) or (rd.uniform(0, 1) < alpha)): # accept step
# Print out the last accepted step (WARNING: this is NOT the one we
# just computed ('current' flag), but really the previous one.)
# with its proper multiplicity (number of times the system stayed
# there).
io_mp.print_vector(outputs, N, loglike, data)
# Report the 'current' point to the 'last_accepted'
sampler.accept_step(data)
loglike = newloglike
if loglike > max_loglike:
max_loglike = loglike
acc += 1.0
N = 1 # Reset the multiplicity
else: # reject step
rej += 1.0
N += 1 # Increase multiplicity of last accepted point
# Regularly (option to set in parameter file), close and reopen the
# buffer to force to write on file.
if acc % data.write_step == 0:
io_mp.refresh_file(data)
# Update the outputs list
outputs[0] = data.out
k += 1 # One iteration done
# END OF WHILE LOOP
# If at this moment, the multiplicity is higher than 1, it means the
# current point is not yet accepted, but it also mean that we did not print
# out the last_accepted one yet. So we do.
if N > 1:
io_mp.print_vector(outputs, N-1, loglike, data)
# Print out some information on the finished chain
rate = acc / (acc + rej)
sys.stdout.write('\n# {0} steps done, acceptance rate: {1}\n'.
format(command_line.N, rate))
# In case the acceptance rate is too low, or too high, print a warning
if rate < 0.05:
warnings.warn("The acceptance rate is below 0.05. You might want to "
"set the jumping factor to a lower value than the "
"default (2.4), with the option `-f 1.5` for instance.")
elif rate > 0.6:
warnings.warn("The acceptance rate is above 0.6, which means you might"
" have difficulties exploring the entire parameter space"
". Try analysing these chains, and use the output "
"covariance matrix to decrease the acceptance rate to a "
"value between 0.2 and 0.4 (roughly).")
# For a restart, erase the starting point to keep only the new, longer
# chain.
if command_line.restart is not None:
os.remove(command_line.restart)
sys.stdout.write(' deleting starting point of the chain {0}\n'.
format(command_line.restart))
return
|
|
#!/usr/bin/env python
"""
conference.py -- Udacity conference server-side Python App Engine API uses Google Cloud Endpoints
$Id: conference.py,v 1.25 2014/05/24 23:42:19 wesc Exp wesc $
created by wesc on 2014 apr 21
"""
__author__ = 'wesc+api@google.com (Wesley Chun)'
from datetime import datetime
import time
import endpoints
import logging
from protorpc import messages
from protorpc import message_types
from protorpc import remote
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from google.appengine.ext import ndb
from models import ConflictException
from models import Profile
from models import ProfileMiniForm
from models import ProfileForm
from models import StringMessage
from models import BooleanMessage
from models import Conference
from models import ConferenceForm
from models import ConferenceForms
from models import ConferenceQueryForm
from models import ConferenceQueryForms
from models import ConferenceSession
from models import ConferenceSessionForm
from models import ConferenceSessionForms
from models import ConferenceSessionQueryForm
from models import ConferenceSessionQueryForms
from models import TeeShirtSize
from settings import WEB_CLIENT_ID
from settings import ANDROID_CLIENT_ID
from settings import IOS_CLIENT_ID
from settings import ANDROID_AUDIENCE
from utils import getUserId
EMAIL_SCOPE = endpoints.EMAIL_SCOPE
API_EXPLORER_CLIENT_ID = endpoints.API_EXPLORER_CLIENT_ID
MEMCACHE_ANNOUNCEMENTS_KEY = "RECENT_ANNOUNCEMENTS"
ANNOUNCEMENT_TPL = ('Last chance to attend! The following conferences are nearly sold out: %s')
#Set memcache variables
MEMCACHE_SESSIONS_KEY = "RECENT_SESSION_SPEAKERS"
SESSIONS_TPL = ('%s is the Speaker for these sessions: %s')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
DEFAULTS = {
"city": "Default City",
"maxAttendees": 0,
"seatsAvailable": 0,
"topics": [ "Default", "Topic" ],
}
OPERATORS = {
'EQ': '=',
'GT': '>',
'GTEQ': '>=',
'LT': '<',
'LTEQ': '<=',
'NE': '!='
}
FIELDS = {
'CITY': 'city',
'TOPIC': 'topics',
'MONTH': 'month',
'MAX_ATTENDEES': 'maxAttendees',
}
#Define endpoint request message classes
CONF_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
)
CONF_POST_REQUEST = endpoints.ResourceContainer(
ConferenceForm,
websafeConferenceKey=messages.StringField(1),
)
CONF_SESSION_POST_REQUEST = endpoints.ResourceContainer(
ConferenceSessionForm,
websafeConferenceKey=messages.StringField(1),
)
CONF_SESSION_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
)
CONF_SESSION_SPEAKER_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
speaker=messages.StringField(1),
)
CONF_SESSION_DATE_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
date=messages.StringField(1),
)
CONF_SESSION_TIME_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
time=messages.StringField(1),
)
CONF_SESSION_TIME_TYPE_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
time=messages.StringField(1),
type=messages.StringField(2),
)
CONF_SESSION_TYPE_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
typeOfSession=messages.StringField(2),
)
CONF_SESSION_WISHLIST_POST_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
SessionKey=messages.StringField(1),
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@endpoints.api(name='conference', version='v1', audiences=[ANDROID_AUDIENCE], allowed_client_ids=[WEB_CLIENT_ID, API_EXPLORER_CLIENT_ID, ANDROID_CLIENT_ID, IOS_CLIENT_ID], scopes=[EMAIL_SCOPE])
class ConferenceApi(remote.Service):
"""Conference API v0.1"""
# - - - Conference Session objects - - - - - - - - - - - - - - - - - - - -
def _copyConferenceSessionToForm(self, conf):
"""Copy relevant fields from ConferenceSession to ConferenceSessionForm."""
cf = ConferenceSessionForm()
for field in cf.all_fields():
if hasattr(conf, field.name):
# convert Date to date string; just copy others
if field.name.endswith('Date'):
setattr(cf, field.name, str(getattr(conf, field.name)))
elif field.name == "duration":
setattr(cf, field.name, str(getattr(conf, field.name)))
elif field.name == "startTime":
setattr(cf, field.name, str(getattr(conf, field.name)))
else:
setattr(cf, field.name, getattr(conf, field.name))
elif field.name == "websafeKey":
setattr(cf, field.name, conf.key.urlsafe())
cf.check_initialized()
return cf
def _createSessionObject(self, request):
"""Create ConferenceSession object, returning ConferenceSessionForm/request."""
# preload necessary data items
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
if not request.name:
raise endpoints.BadRequestException("ConferenceSession 'name' field required")
if not request.speaker:
raise endpoints.BadRequestException("ConferenceSession 'speaker' field required")
# check if conf exists given websafeConfKey
# get conference; check that it exists
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException('No conference found with key: %s' % request.websafeConferenceKey)
# Check to see if user is Conference Organiser
if conf.organizerUserId != user_id:
raise endpoints.NotFoundException('Only the conference organiser %s can add conference sessions.' % conf.organizerUserId)
# copy ConferenceSessionForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
del data['websafeKey']
# convert dates from strings to Date objects; set month based on start_date
if data['startDate']:
data['startDate'] = datetime.strptime(data['startDate'][:10], "%Y-%m-%d").date()
# convert startTime from strings to Time objects
if data['startTime']:
data['startTime'] = datetime.strptime(data['startTime'][:5], "%H:%M").time()
# convert duration from strings to Time objects
if data['duration']:
data['duration'] = datetime.strptime(data['duration'][:5], "%H:%M").time()
# generate Profile Key based on user ID and ConferenceSession
# ID based on Profile key get ConferenceSession key from ID
p_key = ndb.Key(Profile, user_id)
c_id = ConferenceSession.allocate_ids(size=1, parent=p_key)[0]
c_key = ndb.Key(ConferenceSession, c_id, parent=p_key)
data['key'] = c_key
data['organizerUserId'] = request.organizerUserId = user_id
# create ConferenceSession, send email to organizer confirming
# creation of ConferenceSession & return (modified) ConferenceSessionForm
ConferenceSession(**data).put()
#taskqueue.add(params={'email': user.email(), 'conferenceSessionInfo': repr(request)}, url='/tasks/send_confirmation_session_email')
taskqueue.add(params={'websafeConferenceKey': request.websafeConferenceKey, 'speaker': data['speaker'], 'conferenceInfo': repr(request)}, url='/tasks/getFeaturedSpeaker')
return self._copyConferenceSessionToForm(request)
@ndb.transactional()
def _updateSessionObject(self, request):
"""Update ConferenceSession object, returning ConferenceSessionForm/request."""
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# copy ConferenceSessionForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
# update existing conference session
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
# check that conference session exists
if not conf:
raise endpoints.NotFoundException(
'No conference session found with key: %s' % request.websafeConferenceKey)
# check that user is owner
if user_id != conf.organizerUserId:
raise endpoints.ForbiddenException(
'Only the owner can update the conference session.')
# Not getting all the fields, so don't create a new object; just
# copy relevant fields from ConferenceSessionForm to ConferenceSession object
for field in request.all_fields():
data = getattr(request, field.name)
# only copy fields where we get data
if data not in (None, []):
# special handling for dates (convert string to Date)
if field.name in ('startDate', 'endDate'):
data = datetime.strptime(data, "%Y-%m-%d").date()
if field.name == 'startDate':
conf.month = data.month
# write to ConferenceSession object
setattr(conf, field.name, data)
conf.put()
prof = ndb.Key(Profile, user_id).get()
return self._copyConferenceSessionToForm(conf)
@endpoints.method(CONF_SESSION_POST_REQUEST, ConferenceSessionForm, path='conference/session/create/{websafeConferenceKey}', http_method='POST', name='createSession')
def createSession(self, request):
"""Create conference session w/provided fields & return w/updated info."""
return self._createSessionObject(request)
@endpoints.method(CONF_SESSION_POST_REQUEST, ConferenceSessionForm, path='conference/session/update/{websafeConferenceKey}', http_method='PUT', name='updateSession')
def updateSession(self, request):
"""Update conference session w/provided fields & return w/updated info."""
return self._updateSessionObject(request)
@endpoints.method(CONF_SESSION_GET_REQUEST, ConferenceSessionForms, path='conference/session/get/{websafeConferenceKey}', http_method='POST', name='getConferenceSessions')
def getConferenceSessions(self, request):
"""Return requested conference session (by websafeConferenceKey)."""
qry1 = ConferenceSession.query()
qry2 = qry1.filter(ConferenceSession.websafeConferenceKey == request.websafeConferenceKey) # Filter on websafeConferenceKey
return ConferenceSessionForms(items=[self._copyConferenceSessionToForm(conf1) for conf1 in qry2])
@endpoints.method(CONF_SESSION_SPEAKER_GET_REQUEST, ConferenceSessionForms, path='conference/session/bySpeaker/{speaker}', http_method='POST', name='getSessionsBySpeaker')
def getSessionsBySpeaker(self, request):
"""Return requested conference session (by speaker)."""
qry1 = ConferenceSession.query()
qry2 = qry1.filter(ConferenceSession.speaker == request.speaker) # Filter on speaker
return ConferenceSessionForms(items=[self._copyConferenceSessionToForm(conf1) for conf1 in qry2])
@endpoints.method(CONF_SESSION_DATE_GET_REQUEST, ConferenceSessionForms, path='conference/session/byDate/{date}', http_method='POST', name='getSessionsByDate')
def getSessionsByDate(self, request):
"""Return requested conference session (by date)."""
qry1 = ConferenceSession.query()
qry2 = qry1.filter(ConferenceSession.startDate == datetime.strptime(request.date[:10], "%Y-%m-%d").date()) # Filter on date
return ConferenceSessionForms(items=[self._copyConferenceSessionToForm(conf1) for conf1 in qry2])
@endpoints.method(CONF_SESSION_TIME_GET_REQUEST, ConferenceSessionForms, path='conference/session/byTime/{time}', http_method='POST', name='getSessionsByTime')
def getSessionsByTime(self, request):
"""Return requested conference session (by time)."""
# can't put colons in the url param of time. 12-30 works for 12:30.
qry1 = ConferenceSession.query()
qry2 = qry1.filter(ConferenceSession.startTime == datetime.strptime(request.time[:5], "%H-%M").time()) # Filter on time
return ConferenceSessionForms(items=[self._copyConferenceSessionToForm(conf1) for conf1 in qry2])
@endpoints.method(CONF_SESSION_TIME_TYPE_GET_REQUEST, ConferenceSessionForms, path='conference/session/byTimeAndType/{time}/{type}', http_method='POST', name='getSessionsByTimeAndType')
def getSessionsByTimeAndType(self, request):
"""Return requested conference session (by less than time and not by session type)."""
# can't put colons in the url param of time. 12-30 works for 12:30.
# Since inequality is only limited to 1 then use multiple queries.
# get a list of session types
qry1 = ConferenceSession.query()
qry2 = qry1.filter(ConferenceSession.typeOfSession != None) # Filter on session type
listofSessionTypes = []
# add session types to list if not equal to request.type
for conf1 in qry2:
if request.type != conf1.typeOfSession:
listofSessionTypes.append(conf1.typeOfSession)
# make list distinct
listofSessionTypes = set(listofSessionTypes)
qry3 = qry1.filter(ConferenceSession.startTime < datetime.strptime(request.time[:5], "%H-%M").time()) # Filter on time
qry4 = qry3.filter(ConferenceSession.typeOfSession.IN(listofSessionTypes)) # Filter on type of session
return ConferenceSessionForms(items=[self._copyConferenceSessionToForm(conf1) for conf1 in qry4])
@endpoints.method(CONF_SESSION_TYPE_GET_REQUEST, ConferenceSessionForms, path='conference/session/byType/{websafeConferenceKey}/{typeOfSession}', http_method='POST', name='getConferenceSessionsByType')
def getConferenceSessionsByType(self, request):
"""Return requested conference session (by websafeConferenceKey and type of session)."""
qry1 = ConferenceSession.query()
qry2 = qry1.filter(ConferenceSession.websafeConferenceKey == request.websafeConferenceKey) # Filter on websafeConferenceKey
qry3 = qry2.filter(ConferenceSession.typeOfSession == request.typeOfSession) # Filter on type of session
return ConferenceSessionForms(items=[self._copyConferenceSessionToForm(conf1) for conf1 in qry3])
@endpoints.method(message_types.VoidMessage, ConferenceSessionForms, path='getConferenceSessionsCreated', http_method='POST', name='getConferenceSessionsCreated')
def getConferenceSessionsCreated(self, request):
"""Return conference sessions created by user."""
# make sure user is authed
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# create ancestor query for all key matches for this user
confs = ConferenceSession.query(ancestor=ndb.Key(Profile, user_id))
prof = ndb.Key(Profile, user_id).get()
# return set of ConferenceSessionForm objects per ConferenceSession
return ConferenceSessionForms(
items=[self._copyConferenceSessionToForm(conf) for conf in confs]
)
# - - - Conference objects - - - - - - - - - - - - - - - - -
def _copyConferenceToForm(self, conf, displayName):
"""Copy relevant fields from Conference to ConferenceForm."""
cf = ConferenceForm()
for field in cf.all_fields():
if hasattr(conf, field.name):
# convert Date to date string; just copy others
if field.name.endswith('Date'):
setattr(cf, field.name, str(getattr(conf, field.name)))
else:
setattr(cf, field.name, getattr(conf, field.name))
elif field.name == "websafeKey":
setattr(cf, field.name, conf.key.urlsafe())
if displayName:
setattr(cf, 'organizerDisplayName', displayName)
cf.check_initialized()
return cf
def _createConferenceObject(self, request):
"""Create or update Conference object, returning ConferenceForm/request."""
# preload necessary data items
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
if not request.name:
raise endpoints.BadRequestException("Conference 'name' field required")
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
del data['websafeKey']
del data['organizerDisplayName']
# add default values for those missing (both data model & outbound Message)
for df in DEFAULTS:
if data[df] in (None, []):
data[df] = DEFAULTS[df]
setattr(request, df, DEFAULTS[df])
# convert dates from strings to Date objects; set month based on start_date
if data['startDate']:
data['startDate'] = datetime.strptime(data['startDate'][:10], "%Y-%m-%d").date()
data['month'] = data['startDate'].month
else:
data['month'] = 0
if data['endDate']:
data['endDate'] = datetime.strptime(data['endDate'][:10], "%Y-%m-%d").date()
# set seatsAvailable to be same as maxAttendees on creation
if data["maxAttendees"] > 0:
data["seatsAvailable"] = data["maxAttendees"]
# generate Profile Key based on user ID and Conference
# ID based on Profile key get Conference key from ID
p_key = ndb.Key(Profile, user_id)
c_id = Conference.allocate_ids(size=1, parent=p_key)[0]
c_key = ndb.Key(Conference, c_id, parent=p_key)
data['key'] = c_key
data['organizerUserId'] = request.organizerUserId = user_id
# create Conference, send email to organizer confirming
# creation of Conference & return (modified) ConferenceForm
Conference(**data).put()
taskqueue.add(params={'email': user.email(),'conferenceInfo': repr(request)}, url='/tasks/send_confirmation_email')
return request
@ndb.transactional()
def _updateConferenceObject(self, request):
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
# update existing conference
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
# check that conference exists
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
# check that user is owner
if user_id != conf.organizerUserId:
raise endpoints.ForbiddenException(
'Only the owner can update the conference.')
# Not getting all the fields, so don't create a new object; just
# copy relevant fields from ConferenceForm to Conference object
for field in request.all_fields():
data = getattr(request, field.name)
# only copy fields where we get data
if data not in (None, []):
# special handling for dates (convert string to Date)
if field.name in ('startDate', 'endDate'):
data = datetime.strptime(data, "%Y-%m-%d").date()
if field.name == 'startDate':
conf.month = data.month
# write to Conference object
setattr(conf, field.name, data)
conf.put()
prof = ndb.Key(Profile, user_id).get()
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@endpoints.method(ConferenceForm, ConferenceForm, path='conference', http_method='POST', name='createConference')
def createConference(self, request):
"""Create new conference."""
return self._createConferenceObject(request)
@endpoints.method(CONF_POST_REQUEST, ConferenceForm, path='conference/{websafeConferenceKey}', http_method='PUT', name='updateConference')
def updateConference(self, request):
"""Update conference w/provided fields & return w/updated info."""
return self._updateConferenceObject(request)
@endpoints.method(CONF_GET_REQUEST, ConferenceForm, path='conference/{websafeConferenceKey}', http_method='GET', name='getConference')
def getConference(self, request):
"""Return requested conference (by websafeConferenceKey)."""
# get Conference object from request; bail if not found
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
prof = conf.key.parent().get()
# return ConferenceForm
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@endpoints.method(message_types.VoidMessage, ConferenceForms, path='getConferencesCreated', http_method='POST', name='getConferencesCreated')
def getConferencesCreated(self, request):
"""Return conferences created by user."""
# make sure user is authed
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# create ancestor query for all key matches for this user
confs = Conference.query(ancestor=ndb.Key(Profile, user_id))
prof = ndb.Key(Profile, user_id).get()
# return set of ConferenceForm objects per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(conf, getattr(prof, 'displayName')) for conf in confs]
)
# - - - Conference Filter objects - - - - - - - - - - - - - - - - - - -
def _getQuery(self, request):
"""Return formatted query from the submitted filters."""
q = Conference.query()
inequality_filter, filters = self._formatFilters(request.filters)
# If exists, sort on inequality filter first
if not inequality_filter:
q = q.order(Conference.name)
else:
q = q.order(ndb.GenericProperty(inequality_filter))
q = q.order(Conference.name)
for filtr in filters:
if filtr["field"] in ["month", "maxAttendees"]:
filtr["value"] = int(filtr["value"])
formatted_query = ndb.query.FilterNode(filtr["field"], filtr["operator"], filtr["value"])
q = q.filter(formatted_query)
return q
def _formatFilters(self, filters):
"""Parse, check validity and format user supplied filters."""
formatted_filters = []
inequality_field = None
for f in filters:
filtr = {field.name: getattr(f, field.name) for field in f.all_fields()}
try:
filtr["field"] = FIELDS[filtr["field"]]
filtr["operator"] = OPERATORS[filtr["operator"]]
except KeyError:
raise endpoints.BadRequestException("Filter contains invalid field or operator.")
# Every operation except "=" is an inequality
if filtr["operator"] != "=":
# check if inequality operation has been used in previous filters
# disallow the filter if inequality was performed on a different field before
# track the field on which the inequality operation is performed
if inequality_field and inequality_field != filtr["field"]:
raise endpoints.BadRequestException("Inequality filter is allowed on only one field.")
else:
inequality_field = filtr["field"]
formatted_filters.append(filtr)
return (inequality_field, formatted_filters)
@endpoints.method(ConferenceQueryForms, ConferenceForms, path='queryConferences', http_method='POST', name='queryConferences')
def queryConferences(self, request):
"""Query for conferences."""
conferences = self._getQuery(request)
# need to fetch organiser displayName from profiles
# get all keys and use get_multi for speed
organisers = [(ndb.Key(Profile, conf.organizerUserId)) for conf in conferences]
profiles = ndb.get_multi(organisers)
# put display names in a dict for easier fetching
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
# return individual ConferenceForm object per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(conf, names[conf.organizerUserId]) for conf in \
conferences]
)
# - - - Conference Profile objects - - - - - - - - - - - - - - - - - - -
def _copyProfileToForm(self, prof):
"""Copy relevant fields from Profile to ProfileForm."""
pf = ProfileForm()
for field in pf.all_fields():
if hasattr(prof, field.name):
# convert t-shirt string to Enum; just copy others
if field.name == 'teeShirtSize':
setattr(pf, field.name, getattr(TeeShirtSize, getattr(prof, field.name)))
else:
setattr(pf, field.name, getattr(prof, field.name))
pf.check_initialized()
return pf
def _getProfileFromUser(self):
"""Return user Profile from datastore, creating new one if non-existent."""
# make sure user is authed
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
# get Profile from datastore
user_id = getUserId(user)
p_key = ndb.Key(Profile, user_id)
profile = p_key.get()
# create new Profile if not there
if not profile:
profile = Profile(
key = p_key,
displayName = user.nickname(),
mainEmail= user.email(),
teeShirtSize = str(TeeShirtSize.NOT_SPECIFIED),
)
profile.put()
return profile # return Profile
def _doProfile(self, save_request=None):
"""Get user Profile and return to user, possibly updating it first."""
# get user Profile
prof = self._getProfileFromUser()
# if saveProfile(), process user-modifyable fields
if save_request:
for field in ('displayName', 'teeShirtSize'):
if hasattr(save_request, field):
val = getattr(save_request, field)
if val:
setattr(prof, field, str(val))
#if field == 'teeShirtSize':
# setattr(prof, field, str(val).upper())
#else:
# setattr(prof, field, val)
prof.put()
# return ProfileForm
return self._copyProfileToForm(prof)
@endpoints.method(message_types.VoidMessage, ProfileForm, path='profile', http_method='GET', name='getProfile')
def getProfile(self, request):
"""Return user profile."""
return self._doProfile()
@endpoints.method(ProfileMiniForm, ProfileForm, path='profile', http_method='POST', name='saveProfile')
def saveProfile(self, request):
"""Update & return user profile."""
return self._doProfile(request)
# - - - Session Memcache - - - - - - - - - - - - - - - - - - - -
@staticmethod
def _getFeaturedSpeaker(speaker, websafeConferenceKey):
# Check to see if Speaker exists in other sessions in this conference
qry1 = ConferenceSession.query()
qry2 = qry1.filter(ConferenceSession.speaker == speaker) # Filter on speaker
qry3 = qry2.filter(ConferenceSession.websafeConferenceKey == websafeConferenceKey) # Filter on websafeConferenceKey
if qry3:
# If speaker is in another session in this conference
# format sessionSpeakerList and set it in memcache
sessionSpeakerList = SESSIONS_TPL % (speaker, ', '.join(conf.name for conf in qry3))
memcache.set(MEMCACHE_SESSIONS_KEY, sessionSpeakerList)
else:
# If no speaker is in another session in this conference
# delete the memcache announcements entry
sessionSpeakerList = ""
memcache.delete(MEMCACHE_SESSIONS_KEY)
return sessionSpeakerList
@endpoints.method(message_types.VoidMessage, StringMessage, path='conference/session/getFeaturedSpeaker', http_method='GET', name='getFeaturedSpeaker')
def getFeaturedSpeaker(self, request):
"""Return Session from memcache."""
return StringMessage(data=memcache.get(MEMCACHE_SESSIONS_KEY) or "")
# - - - Conference Announcements - - - - - - - - - - - - - - - - - - - -
@staticmethod
def _cacheAnnouncement():
"""Create Announcement & assign to memcache; used by memcache cron job & putAnnouncement(). """
confs = Conference.query(ndb.AND(Conference.seatsAvailable <= 5, Conference.seatsAvailable > 0) ).fetch(projection=[Conference.name])
if confs:
# If there are almost sold out conferences,
# format announcement and set it in memcache
announcement = ANNOUNCEMENT_TPL % (', '.join(conf.name for conf in confs))
memcache.set(MEMCACHE_ANNOUNCEMENTS_KEY, announcement)
else:
# If there are no sold out conferences,
# delete the memcache announcements entry
announcement = ""
memcache.delete(MEMCACHE_ANNOUNCEMENTS_KEY)
return announcement
@endpoints.method(message_types.VoidMessage, StringMessage, path='conference/announcement/get', http_method='GET', name='getAnnouncement')
def getAnnouncement(self, request):
"""Return Announcement from memcache."""
return StringMessage(data=memcache.get(MEMCACHE_ANNOUNCEMENTS_KEY) or "")
# - - - Conference Session Wishlist - - - - - - - - - - - - - - - - - - - -
@ndb.transactional(xg=True)
def _addSessionToWishlist(self, request):
"""Add Session to Wishlist."""
retval = True
prof = self._getProfileFromUser() # get user Profile
# check if conf exists given websafeConfKey
# get conference; check that it exists
conf = ndb.Key(urlsafe=request.SessionKey).get()
if not conf:
raise endpoints.NotFoundException('No conference session found with key: %s' % request.SessionKey)
# check if user already added otherwise add
if request.SessionKey not in prof.conferenceSessionWishKeysToAttend:
prof.conferenceSessionWishKeysToAttend.append(request.SessionKey)
else:
raise endpoints.NotFoundException('Conference session with key: %s is already in your wishlist' % request.SessionKey)
# write things back to the datastore & return
prof.put()
return BooleanMessage(data=retval)
@endpoints.method(CONF_SESSION_WISHLIST_POST_REQUEST, BooleanMessage, path='conference/wishlist/{SessionKey}', http_method='POST', name='addSessionToWishlist')
def addSessionToWishlist(self, request):
"""Add Session to Wishlist."""
return self._addSessionToWishlist(request)
@endpoints.method(message_types.VoidMessage, ConferenceForms, path='conferences/wishlist', http_method='GET', name='getSessionsInWishlist')
def getSessionsInWishlist(self, request):
"""Get list of sessions in Wishlist"""
prof = self._getProfileFromUser() # get user Profile
conf_keys = [ndb.Key(urlsafe=wsck) for wsck in prof.conferenceSessionWishKeysToAttend]
conferences = ndb.get_multi(conf_keys)
# get organizers
organisers = [ndb.Key(Profile, conf.organizerUserId) for conf in conferences]
profiles = ndb.get_multi(organisers)
# put display names in a dict for easier fetching
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
# return set of ConferenceForm objects per Conference
return ConferenceForms(items=[self._copyConferenceToForm(conf, names[conf.organizerUserId]) for conf in conferences])
# - - - Conference Registration - - - - - - - - - - - - - - - - - - - -
@ndb.transactional(xg=True)
def _conferenceRegistration(self, request, reg=True):
"""Register or unregister user for selected conference."""
retval = None
prof = self._getProfileFromUser() # get user Profile
# check if conf exists given websafeConfKey
# get conference; check that it exists
wsck = request.websafeConferenceKey
conf = ndb.Key(urlsafe=wsck).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % wsck)
# register
if reg:
# check if user already registered otherwise add
if wsck in prof.conferenceKeysToAttend:
raise ConflictException(
"You have already registered for this conference")
# check if seats avail
if conf.seatsAvailable <= 0:
raise ConflictException(
"There are no seats available.")
# register user, take away one seat
prof.conferenceKeysToAttend.append(wsck)
conf.seatsAvailable -= 1
retval = True
# unregister
else:
# check if user already registered
if wsck in prof.conferenceKeysToAttend:
# unregister user, add back one seat
prof.conferenceKeysToAttend.remove(wsck)
conf.seatsAvailable += 1
retval = True
else:
retval = False
# write things back to the datastore & return
prof.put()
conf.put()
return BooleanMessage(data=retval)
@endpoints.method(message_types.VoidMessage, ConferenceForms, path='conferences/attending', http_method='GET', name='getConferencesToAttend')
def getConferencesToAttend(self, request):
"""Get list of conferences that user has registered for."""
prof = self._getProfileFromUser() # get user Profile
conf_keys = [ndb.Key(urlsafe=wsck) for wsck in prof.conferenceKeysToAttend]
conferences = ndb.get_multi(conf_keys)
# get organizers
organisers = [ndb.Key(Profile, conf.organizerUserId) for conf in conferences]
profiles = ndb.get_multi(organisers)
# put display names in a dict for easier fetching
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
# return set of ConferenceForm objects per Conference
return ConferenceForms(items=[self._copyConferenceToForm(conf, names[conf.organizerUserId]) for conf in conferences])
@endpoints.method(CONF_GET_REQUEST, BooleanMessage, path='conference/{websafeConferenceKey}', http_method='POST', name='registerForConference')
def registerForConference(self, request):
"""Register user for selected conference."""
return self._conferenceRegistration(request)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage, path='conference/{websafeConferenceKey}', http_method='DELETE', name='unregisterFromConference')
def unregisterFromConference(self, request):
"""Unregister user for selected conference."""
return self._conferenceRegistration(request, reg=False)
# - - - Filter Playground - - - - - - - - - - - - - - - - - - - -
@endpoints.method(message_types.VoidMessage, ConferenceForms, path='filterPlayground', http_method='GET', name='filterPlayground')
def filterPlayground(self, request):
"""Filter Playground"""
q = Conference.query()
# field = "city"
# operator = "="
# value = "London"
# f = ndb.query.FilterNode(field, operator, value)
# q = q.filter(f)
q = q.filter(Conference.city=="London")
q = q.filter(Conference.topics=="Medical Innovations")
q = q.filter(Conference.month==6)
return ConferenceForms(
items=[self._copyConferenceToForm(conf, "") for conf in q]
)
api = endpoints.api_server([ConferenceApi]) # register API
|
|
import sys
class Node(object):
def __init__(self, data=None, pred_node=None, succ_node=None):
self.data = data
self.succ_node = succ_node
self.pred_node = pred_node
def insert_as_pred(self, new_data):
node = Node(new_data, self.pred_node, self)
self.pred_node.succ_node = node
self.pred_node = node
return node
def insert_as_succ(self, new_data):
node = Node(new_data, self, self.succ_node)
self.succ_node.pred_node = node
self.succ_node = node
return node
def is_header(self):
return self.pred_node is None
def is_tailer(self):
return self.succ_node is None
def is_first(self):
return False if self.is_header() else self.pred_node.is_header()
def __str__(self):
return str(self.data)
class LinkedList(object):
def __init__(self, fromList=None):
self.header = Node(None, None, None)
self.tailer = Node(None, None, None)
self.header.succ_node = self.tailer
self.tailer.pred_node = self.header
self.__size = 0
if type(fromList) is list and len(fromList) > 0:
node = self.header.insert_as_succ(fromList[0])
self.__size = len(fromList)
for i in range(1, len(fromList)):
node = node.insert_as_succ(fromList[i])
def __str__(self):
data_list = []
node = self.header.succ_node
while node.data is not None:
data_list.append(node.data)
node = node.succ_node
return ', '.join(map(str, data_list))
def __swap(self, node_p, node_q):
p_pred = node_p.pred_node
p_succ = node_p.succ_node
q_pred = node_q.pred_node
q_succ = node_q.succ_node
# node_p and node_q are close to each other
if node_p.pred_node is node_q:
q_pred.succ_node = node_p
node_p.pred_node = q_pred
node_q.pred_node = node_p
node_p.succ_node = node_q
node_q.succ_node = p_succ
p_succ.pred_node = node_q
elif node_p.succ_node is node_q:
self.__swap(node_q, node_p)
else:
p_pred.succ_node = node_q
p_succ.pred_node = node_q
q_pred.succ_node = node_p
q_succ.pred_node = node_p
node_p.pred_node = q_pred
node_p.succ_node = q_succ
node_q.pred_node = p_pred
node_q.succ_node = p_succ
def reverse(self):
if (self.__size < 2):
return
i = self.__size
left = self.header.succ_node
right = self.tailer.pred_node
while i > 1:
left = left.succ_node
right = right.pred_node
self.__swap(left.pred_node, right.succ_node)
i -= 2
def insert_as_first(self, data):
self.__size += 1
return self.header.insert_as_succ(data)
def insert_as_last(self, data):
self.__size += 1
return self.tailer.insert_as_pred(data)
def insert_b(self, node, data):
self.__size += 1
return node.insert_as_pred(data)
def insert_a(self, node, data):
self.__size += 1
return node.insert_as_succ(data)
def size(self):
return self.__size
def deduplicate(self):
if self.__size < 2: return False
old_size = self.__size
node = self.header.succ_node
r = 0
while node.data is not None:
match = self.find(node.data, r, node)
if match is None:
r += 1
elif match.data == node.data:
self.remove(match)
node = node.succ_node
return old_size - self.__size
def uniquify(self):
old_size = self.__size
node = self.header.succ_node
while node.succ_node is not None:
if node.data == node.succ_node.data:
self.remove(node.succ_node)
else:
node = node.succ_node
return self.__size - old_size
def search(self, data, n=None, node=None):
node = node if node is not None else self.tailer
n = n if n is not None else self.__size
while n > 0 and node.pred_node.data is not None:
if node.pred_node.data <= data:
return node.pred_node
node = node.pred_node
n -= 1
return None
def find(self, data, n=None, node=None):
node = node if node is not None else self.tailer
n = n if n is not None else self.__size
while n > 0 and node.pred_node.data is not None:
if node.pred_node.data == data:
return node.pred_node
node = node.pred_node
n -= 1
return None
def disordered(self):
if self.__size < 2: return False
node = self.header.succ_node
while node.succ_node.data is not None:
if node.succ_node.data < node.data:
return True
node = node.succ_node
return False
def remove(self, node):
node.pred_node.succ_node = node.succ_node
node.succ_node.pred_node = node.pred_node
self.__size -= 1
return node.data
def sort(self, method):
if method == 'insert_sort':
self.insert_sort()
elif method == 'selection_sort':
self.selection_sort()
elif method == 'merge_sort':
self.merge_sort(self.header.succ_node, self.__size)
else:
return
def insert_sort(self):
if self.__size < 2: return
node = self.header.succ_node
r = 0
while r < self.__size:
match = self.search(node.data, r, node)
self.insert_a(self.header if match is None else match, node.data)
self.remove(node)
node = node.succ_node
r += 1
def selection_sort(self):
if self.__size < 2:
return
def select_max(r):
max_data = -sys.maxsize - 1
max_node = None
node = self.header.succ_node
n = 0
while n < r and node.data is not None:
if node.data > max_data:
max_node = node
max_data = node.data
node = node.succ_node
n += 1
return max_node
r = self.__size
node = self.tailer.pred_node
while r > 1:
match = select_max(r)
self.__swap(node, match)
node = match.pred_node
r -= 1
def __merge(self, node_p, n, node_q, m):
start = node_p.pred_node
while m > 0:
if n > 0 and node_p.data <= node_q.data:
if node_p.succ_node is node_q:
break
node_p = node_p.succ_node
n -= 1
else:
self.insert_b(node_p, self.remove(node_q))
node_q = node_q.succ_node
m -= 1
node_p = start.succ_node
def merge_sort(self, node_p, n):
if n < 2:
return
middle = n >> 1
node_q = node_p
for i in range(middle):
node_q = node_q.succ_node
self.merge_sort(node_p, middle)
self.merge_sort(node_q, n - middle)
self.__merge(node_p, middle, node_q, n - middle)
def first(self):
return self.header.succ_node
def last(self):
return self.tailer.pred_node
def empty(self):
return self.__size == 0
def set_size(self, size):
self.__size = size
def traverse(self, func):
if self.__size < 1:
return
node = self.header.succ_node
while node.data is not None:
func(node)
node = node.succ_node
def increase(self):
def addOne(node):
node.data += 1
self.traverse(addOne)
def half(self):
def halfValue(node):
node.data /= 2
self.traverse(halfValue)
def josephus(self, k):
if self.__size == 0:
return None
n = k
node = self.header.succ_node
while self.__size > 1:
while n > 1:
n -= 1
node = node.succ_node
if node.data is None:
node = self.header.succ_node
self.remove(node)
node = node.succ_node
n = k
return self.header.succ_node
|
|
# coding: utf-8
import urllib, json
from django.conf import settings
from django.shortcuts import get_object_or_404, redirect, render_to_response, render
from django.utils.translation import ugettext, ugettext_lazy as _
from django.core.urlresolvers import resolve, reverse
from django.db.models import Count
from django.db.models.signals import post_save
from django.template import RequestContext
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseForbidden, HttpResponseBadRequest
from django.contrib import auth
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib import messages
from django.dispatch import Signal
from django.core.files import File
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from annoying.decorators import render_to
from forms_builder.forms.signals import form_valid, form_invalid
from crowdataapp import models, forms
@render_to('document_set_index.html')
def document_set_index(request):
try:
document_sets = models.DocumentSet.objects.all().order_by('-created_at')
except:
document_sets = []
return { 'document_sets': document_sets, 'header_title': _('Choose one of this project') }
@render_to('document_set_landing.html')
def document_set_view(request, document_set):
document_set = get_object_or_404(models.DocumentSet,
slug=document_set)
return {
'document_set': document_set,
}
def form_detail(request, slug, template="forms/form_detail.html"):
form = get_object_or_404(models.DocumentSetForm, slug=slug)
request_context = RequestContext(request)
args = (form, request_context, request.POST or None)
form_for_form = forms.DocumentSetFormForForm(*args)
if request.method == 'POST':
if not form_for_form.is_valid():
form_invalid.send(sender=request, form=form_for_form)
return HttpResponseBadRequest(json.dumps(form_for_form.errors), content_type='application/json')
else:
entry = form_for_form.save()
form_valid.send(sender=request, form=form_for_form, entry=entry, document_id=request.session['document_id_for_entry'])
return HttpResponse('')
return render_to_response(template, { 'form': form }, request_context)
def show_document(request, document_set,document_id):
document_set = get_object_or_404(models.DocumentSet, slug=document_set)
document = get_object_or_404(models.Document, id=int(document_id))
return render(request,
'show_document.html',
{
'document': document,
'document_set': document_set,
'head_html': document.document_set.head_html
})
@render_to('document_set_ranking.html')
def ranking_all(request, document_set, ranking_id):
document_set = get_object_or_404(models.DocumentSet,
slug=document_set)
ranking = get_object_or_404(models.DocumentSetRankingDefinition,
pk=ranking_id)
return {
'document_set': document_set,
'ranking': ranking,
'ranking_definition_id': ranking_id,
'page': request.GET.get('page', '1'),
'search_term': request.REQUEST.get('q'),
}
@login_required
def transcription_new(request, document_set):
doc_set = get_object_or_404(models.DocumentSet, slug=document_set)
document = None
if request.GET.get('document_id') is not None and request.user.is_staff:
document = get_object_or_404(models.Document, pk=request.GET.get('document_id'),
document_set=doc_set)
else:
candidates = doc_set.get_pending_documents().exclude(form_entries__user=request.user)
if candidates.count() == 0:
# TODO Redirect to a message page: "you've gone through all the documents in this project!"
return render_to_response('no_more_documents.html',
{ 'document_set': doc_set },
context_instance=RequestContext(request))
document = candidates.order_by('?')[0]
# save the candidate document in the session, for later use
# in signals.create_entry
request.session['document_id_for_entry'] = document.id
return render(request,
'transcription_new.html',
{
'document': document,
'head_html': document.document_set.head_html,
'pending_documents_count': doc_set.get_pending_documents_count_for_user(request.user),
'verified_documents_count': doc_set.get_verified_documents_count_for_user(request.user),
'reviewed_documents_count': doc_set.get_reviewed_documents_count_for_user(request.user)
})
def autocomplete_field(request, document_set, field_name):
document_set = get_object_or_404(models.DocumentSet, slug=document_set)
field = document_set.form.all()[0].fields.get(slug=field_name)
q = request.REQUEST.get('q')
if q is not None:
verified_entries = models.DocumentSetFieldEntry.objects\
.order_by('value') \
.filter(field_id=field.pk, verified=True) \
.extra(
where=['unaccent(value) ilike %s'], params=["%%%s%%" % q]
) \
.prefetch_related('canonical_label')
else:
verified_entries = models.DocumentSetFieldEntry.objects\
.order_by('value') \
.filter(field_id=field.pk, verified=True)
return HttpResponse(json.dumps(map(lambda e: {'value': e.canonical_label.value if e.canonical_label is not None else e.value,
'tokens': e.value.split(' ') },
verified_entries)),
content_type='application/json')
@render_to('login_page.html')
def login(request):
next_page = request.REQUEST.get(auth.REDIRECT_FIELD_NAME, reverse('document_set_index'))
if request.user.is_authenticated():
return HttpResponseRedirect(next_page)
request.session['redirect_after_login'] = next_page
user = auth.authenticate(request=request)
if user is not None:
auth.login(request, user)
return HttpResponseRedirect(reverse('after_login'))
else:
return { }
def logout(request):
auth.logout(request)
return HttpResponseRedirect(reverse('document_set_index'))
@login_required
def after_login(request):
if 'redirect_after_login' in request.session:
redir = request.session['redirect_after_login']
del request.session['redirect_after_login']
return redirect(redir)
return redirect(reverse('document_set_index'))
@render_to('edit_profile.html')
@login_required
def edit_profile(request):
""" Profile Edit """
try:
profile = models.UserProfile.objects.get(user=request.user)
except models.UserProfile.DoesNotExist:
profile = None
if request.method == 'POST':
form = forms.UserProfileForm(data=request.POST, instance=profile)
if form.is_valid():
profile = form.save(commit=False)
profile.user = request.user
profile.save()
if 'redirect_after_login' in request.session:
redir = request.session['redirect_after_login']
del request.session['redirect_after_login']
return redirect(redir)
else:
return redirect(reverse('edit_profile'))
else:
form = forms.UserProfileForm(instance=profile)
return {
'profile_form': form
}
@render_to('shutdown.html')
def on_shutdown(request, document_set):
document_set = get_object_or_404(models.DocumentSet,slug=document_set)
return {
'document_set': document_set
}
@render_to('show_profile.html')
def user_profile(request, document_set, username):
""" Show User Profile """
document_set = get_object_or_404(models.DocumentSet, slug=document_set)
user = get_object_or_404(models.User, username=username)
organizations = user.organization_set.all()
try:
profile = models.UserProfile.objects.get(user=user)
except models.UserProfile.DoesNotExist:
profile = models.UserProfile(user=user, name=user.get_full_name())
profile.save()
return {
'document_set' : document_set,
'profile': profile,
'full_name' : profile.user.get_full_name(),
'verified_documents_count': document_set.get_verified_documents_count_for_user(profile.user),
'verified_documents' : document_set.get_verified_documents_for_user(profile.user),
'pending_documents_count' : document_set.get_pending_documents_count_for_user(profile.user),
'pending_documents' : document_set.get_pending_documents_for_user(profile.user),
'users_ranking_list' : document_set.userboard(profile.user.pk),
'page': request.GET.get('page', '1'),
'search_term': request.REQUEST.get('search'),
'organizations': organizations
}
@render_to("users_all.html")
def users_all(request, document_set):
""" Show all ranking of Users """
document_set = get_object_or_404(models.DocumentSet, slug=document_set)
return {
'document_set': document_set,
'users_list':document_set.leaderboard(),
'page': request.GET.get('page', '1'),
'search_term': request.REQUEST.get('search'),
}
@render_to("organizations_ranking.html")
def document_set_organizations_ranking(request, document_set):
""" Show all ranking of Organization """
document_set = get_object_or_404(models.DocumentSet, slug=document_set)
organizations = document_set.organization_board()
return {
'document_set': document_set,
'page': request.GET.get('page', '1'),
'organizations': organizations
}
@render_to("documents_by_entry_value.html")
def documents_by_entry_value(request, document_set, field_id, canon_id, ranking_id):
""" Show all documents that have a field value in the field_id"""
canon = get_object_or_404(models.CanonicalFieldEntryLabel, pk=canon_id)
document_set = get_object_or_404(models.DocumentSet, slug=document_set)
documents = canon.get_verified_documents_rankings(document_set, ranking_id)
return {
'entry_value': canon.value,
'field_name': models.DocumentSetFormField.objects.get(pk=field_id).label,
'documents': documents,
'document_set': document_set,
}
#/download/{{document_set.slug}}/"
def download_data(request, document_set):
""" Download all data by format"""
document_set = get_object_or_404(models.DocumentSet, slug=document_set)
f = open('/home/vozdata/crowdatastatic/data/%s.csv' % document_set.slug, 'rb')
data_file = File(f)
response = HttpResponse(data_file, mimetype='application/csv')
response['Content-Disposition'] = 'attachment; filename=%s.csv' % document_set.slug
return response
@login_required
def choose_current_organization(request):
""" Show which Organizations can be selected """
organizations = request.user.organization_set.all()
current_organization = None
try:
user_profile = request.user.get_profile()
except models.UserProfile.DoesNotExist:
user_profile = models.UserProfile(user=request.user, name=request.user.get_full_name())
user_profile.save()
if user_profile:
current_organization = user_profile.current_organization
template = 'choose_current_organization.html' if organizations.count() > 0 else 'without_organization.html'
return render_to_response(template, {
'organizations': organizations,
'current_organization': current_organization,
'organization_signup_link': settings.ORGANIZATION_SIGNUP_LINK
},
context_instance = RequestContext(request))
@render_to('organizations.html')
def organizations(request):
organizations = models.Organization.objects.all().order_by('name')
user_organizations = None
if request.user and request.user.is_authenticated():
user_organizations = request.user.organization_set.all()
return {
'organizations': organizations,
'user_organizations': user_organizations,
'organization_signup_link': settings.ORGANIZATION_SIGNUP_LINK
}
@login_required
def change_current_organization(request):
response = { 'success': False }
if request.method == 'POST':
organization_id = request.POST.get('organization_id')
if organization_id == "none":
organization_name = "none"
organization = None
else:
organization = get_object_or_404(models.Organization, id=organization_id)
organization_name = organization.name
user_profile = request.user.get_profile()
if user_profile:
user_profile.current_organization = organization
user_profile.save()
response['organization_name'] = organization_name
response['success'] = True
return HttpResponse(json.dumps(response), content_type='application/json')
@render_to('organization_profile.html')
def organization_profile(request, organization_slug):
""" Show Organization Profile """
organization = get_object_or_404(models.Organization, slug=organization_slug)
document_set_filter = request.GET.get('document_set_filter')
if document_set_filter:
try:
document_set_filter = int(document_set_filter)
document_form_entries_list = organization.documentsetformentry_set.filter(document__document_set__id = document_set_filter)
except ValueError:
document_form_entries_list = organization.documentsetformentry_set.all()
else:
document_form_entries_list = organization.documentsetformentry_set.all()
document_form_entries_list = document_form_entries_list.filter(document__document_set__published = True)
document_form_entries_paginator = Paginator(document_form_entries_list, 10)
reset_filter = request.GET.get('reset_filter')
if reset_filter:
organization_page = 1
else:
organization_page = request.GET.get('organization_page')
show_documents = request.GET.get('show_documents')
try:
document_form_entries = document_form_entries_paginator.page(organization_page)
except PageNotAnInteger:
document_form_entries = document_form_entries_paginator.page(1)
except EmptyPage:
document_form_entries = document_form_entries_paginator.page(document_form_entries_paginator.num_pages)
users_list = organization.users.all()
users_paginator = Paginator(users_list, 10)
user_page = request.GET.get('user_page')
show_users = request.GET.get('show_users')
try:
users = users_paginator.page(user_page)
except PageNotAnInteger:
users = users_paginator.page(1)
except EmptyPage:
users = users_paginator.page(users_paginator.num_pages)
already_member = None
documents_without_organization = None
if request.user and request.user.is_authenticated():
already_member = organization.users.filter(id = request.user.id).exists()
documents_without_organization = models.DocumentSetFormEntry.get_user_documents_without_organization(request.user).count()
document_sets = organization.documentsetformentry_set.filter(document__document_set__published = True).values('document__document_set__name', 'document__document_set__id').distinct()
return {
'organization' : organization,
'document_form_entries': document_form_entries,
'already_member': already_member,
'users': users,
'show_users': show_users,
'show_documents': show_documents,
'documents_without_organization': documents_without_organization,
'document_sets': document_sets,
'document_set_filter': document_set_filter
}
@login_required
def signup_organization(request):
response = { 'success': False }
if request.method == 'POST':
organization_slug = request.POST.get('organization_slug')
organization = get_object_or_404(models.Organization, slug=organization_slug)
organization.users.add(request.user)
organization.save()
response['success'] = True
return HttpResponse(json.dumps(response), content_type='application/json')
@login_required
def signout_organization(request):
user = request.user
response = { 'success': False }
if request.method == 'POST':
organization_slug = request.POST.get('organization_slug')
if organization_slug:
organization = user.organization_set.get(slug = organization_slug)
user.organization_set.remove(organization)
user_profile = user.get_profile()
if user_profile.current_organization == organization:
user_profile.current_organization = None
user_profile.save()
response['success'] = True
return HttpResponse(json.dumps(response), content_type='application/json')
@login_required
def assign_docs_to_organization(request):
user = request.user
response = { 'success': False }
if request.method == 'POST':
organization_slug = request.POST.get('organization_slug')
if organization_slug:
organization = user.organization_set.get(slug=organization_slug)
documents_without_organization = models.DocumentSetFormEntry.get_user_documents_without_organization(user)
if documents_without_organization.count() > 0:
documents_without_organization.update(organization = organization)
response['success'] = True
return HttpResponse(json.dumps(response), content_type='application/json')
def check_if_doc_set_is_private(doc_set, user):
if doc_set.is_private and not user.is_staff:
return HttpResponseRedirect(reverse('document_set_index',kwargs={}))
|
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras text vectorization preprocessing layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
import numpy as np
import six
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.layers.preprocessing import preprocessing_test_utils
from tensorflow.python.keras.layers.preprocessing import string_lookup
from tensorflow.python.keras.utils.generic_utils import CustomObjectScope
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
def _get_end_to_end_test_cases():
test_cases = (
{
"testcase_name": "test_strings_soft_vocab_cap",
# Create an array where 'earth' is the most frequent term, followed by
# 'wind', then 'and', then 'fire'. This ensures that the vocab
# accumulator is sorting by frequency.
"vocab_data":
np.array([["fire"], ["earth"], ["earth"], ["earth"], ["earth"],
["wind"], ["wind"], ["wind"], ["and"], ["and"]]),
"input_data":
np.array([["earth"], ["wind"], ["and"], ["fire"], ["fire"],
["and"], ["earth"], ["michigan"]]),
"kwargs": {
"max_tokens": None,
},
"expected_output": [[2], [3], [4], [5], [5], [4], [2], [1]],
"input_dtype":
dtypes.string
},
)
crossed_test_cases = []
# Cross above test cases with use_dataset in (True, False)
for use_dataset in (True, False):
for case in test_cases:
case = case.copy()
if use_dataset:
case["testcase_name"] = case["testcase_name"] + "_with_dataset"
case["use_dataset"] = use_dataset
crossed_test_cases.append(case)
return crossed_test_cases
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class StringLookupLayerTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
@parameterized.named_parameters(*_get_end_to_end_test_cases())
def test_layer_end_to_end_with_adapt(self, vocab_data, input_data, kwargs,
use_dataset, expected_output,
input_dtype):
cls = string_lookup.StringLookup
expected_output_dtype = dtypes.int64
input_shape = input_data.shape
if use_dataset:
# Keras APIs expect batched datasets.
# TODO(rachelim): `model.predict` predicts the result on each
# dataset batch separately, then tries to concatenate the results
# together. When the results have different shapes on the non-concat
# axis (which can happen in the output_mode = INT case for
# StringLookup), the concatenation fails. In real use cases, this may
# not be an issue because users are likely to pipe the preprocessing layer
# into other keras layers instead of predicting it directly. A workaround
# for these unit tests is to have the dataset only contain one batch, so
# no concatenation needs to happen with the result. For consistency with
# numpy input, we should make `predict` join differently shaped results
# together sensibly, with 0 padding.
input_data = dataset_ops.Dataset.from_tensor_slices(input_data).batch(
input_shape[0])
vocab_data = dataset_ops.Dataset.from_tensor_slices(vocab_data).batch(
input_shape[0])
with CustomObjectScope({"StringLookup": cls}):
output_data = testing_utils.layer_test(
cls,
kwargs=kwargs,
input_shape=input_shape,
input_data=input_data,
input_dtype=input_dtype,
expected_output_dtype=expected_output_dtype,
validate_training=False,
adapt_data=vocab_data)
self.assertAllClose(expected_output, output_data)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class StringLookupVocabularyTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest
):
def _write_to_temp_file(self, file_name, vocab_list):
vocab_path = os.path.join(self.get_temp_dir(), file_name + ".txt")
with gfile.GFile(vocab_path, "w") as writer:
for vocab in vocab_list:
writer.write(vocab + "\n")
writer.flush()
writer.close()
return vocab_path
def test_int_output_explicit_vocab(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = string_lookup.StringLookup(vocabulary=vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_int_output_explicit_vocab_with_special_tokens(self):
vocab_data = ["", "[UNK]", "earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = string_lookup.StringLookup(vocabulary=vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_no_vocab(self):
with self.assertRaisesRegex(
ValueError, "You must set the layer's vocabulary"):
layer = string_lookup.StringLookup()
layer([["a"]])
def test_binary_output(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[0, 1, 1, 1, 1], [1, 1, 0, 1, 1]]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = string_lookup.StringLookup(
vocabulary=vocab_data, output_mode="binary")
res = layer(input_data)
model = keras.Model(inputs=input_data, outputs=res)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_count_output(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "earth", "fire", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[0, 2, 0, 0, 2], [1, 1, 0, 1, 1]]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = string_lookup.StringLookup(
vocabulary=vocab_data, output_mode="count")
res = layer(input_data)
model = keras.Model(inputs=input_data, outputs=res)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_sparse_output(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = string_lookup.StringLookup(
vocabulary=vocab_data, output_mode="binary", sparse=True)
res = layer(input_data)
self.assertTrue(res.__class__.__name__, "SparseKerasTensor")
def test_get_vocab_returns_str(self):
vocab_data = ["earth", "wind", "and", "fire"]
expected_vocab = ["", "[UNK]", "earth", "wind", "and", "fire"]
layer = string_lookup.StringLookup(vocabulary=vocab_data)
layer_vocab = layer.get_vocabulary()
self.assertAllEqual(expected_vocab, layer_vocab)
self.assertIsInstance(layer_vocab[0], six.text_type)
inverse_layer = string_lookup.StringLookup(
vocabulary=layer.get_vocabulary(), invert=True)
layer_vocab = inverse_layer.get_vocabulary()
self.assertAllEqual(expected_vocab, layer_vocab)
self.assertIsInstance(layer_vocab[0], six.text_type)
def test_int_output_explicit_vocab_from_file(self):
vocab_list = ["earth", "wind", "and", "fire"]
vocab_path = self._write_to_temp_file("vocab_file", vocab_list)
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = string_lookup.StringLookup(vocabulary=vocab_path)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_int_output_explicit_vocab_from_file_via_setter(self):
vocab_list = ["earth", "wind", "and", "fire"]
vocab_path = self._write_to_temp_file("vocab_file", vocab_list)
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = string_lookup.StringLookup()
layer.set_vocabulary(vocab_path)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_non_unique_vocab_fails(self):
vocab_data = ["earth", "wind", "and", "fire", "fire"]
with self.assertRaisesRegex(ValueError, ".*repeated term.*fire.*"):
_ = string_lookup.StringLookup(vocabulary=vocab_data)
def test_non_unique_vocab_from_file_fails(self):
vocab_list = ["earth", "wind", "and", "fire", "earth"]
vocab_path = self._write_to_temp_file("repeat_vocab_file", vocab_list)
with self.assertRaisesRegex(ValueError, ".*repeated term.*earth.*"):
_ = string_lookup.StringLookup(vocabulary=vocab_path)
def test_inverse_layer(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([[2, 3, 4, 5], [5, 4, 2, 0]])
expected_output = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", ""]])
input_data = keras.Input(shape=(None,), dtype=dtypes.int64)
layer = string_lookup.StringLookup(vocabulary=vocab_data, invert=True)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_forward_backward_explicit_vocab(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "[UNK]"]])
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = string_lookup.StringLookup(vocabulary=vocab_data)
invert_layer = string_lookup.StringLookup(
vocabulary=vocab_data, invert=True)
int_data = layer(input_data)
out_data = invert_layer(int_data)
model = keras.Model(inputs=input_data, outputs=out_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_forward_backward_adapted_vocab(self):
adapt_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "[UNK]"]])
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = string_lookup.StringLookup()
layer.adapt(adapt_data)
invert_layer = string_lookup.StringLookup(
vocabulary=layer.get_vocabulary(), invert=True)
int_data = layer(input_data)
out_data = invert_layer(int_data)
model = keras.Model(inputs=input_data, outputs=out_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_ragged_string_input_multi_bucket(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = ragged_factory_ops.constant([["earth", "wind", "fire"],
["fire", "and", "earth",
"ohio"]])
expected_output = [[3, 4, 6], [6, 5, 3, 2]]
input_data = keras.Input(shape=(None,), dtype=dtypes.string, ragged=True)
layer = string_lookup.StringLookup(num_oov_indices=2)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
if __name__ == "__main__":
test.main()
|
|
#!/usr/local/sci/bin/python
#*****************************
#
# QC checks.
#
#
#************************************************************************
# SVN Info
#$Rev:: 55 $: Revision of last commit
#$Author:: rdunn $: Author of last commit
#$Date:: 2015-02-06 16:38:46 +0000 (Fri, 06 Feb 2015) $: Date of last commit
#************************************************************************
import numpy as np
import scipy as sp
import datetime as dt
# RJHD routines
import qc_utils as utils
#*********************************************
class OddCluster:
'''
Class for odd cluster information
'''
def __init__(self, start, end, length, locations, data_mdi, last_data):
self.start = start
self.end = end
self.length = length
self.locations = locations
self.data_mdi = data_mdi
self.last_data = last_data
def __str__(self):
return "odd cluster, starting {}, ending {}, length {}".format(self.start, self.end, self.length)
__repr__ = __str__
#*********************************************
def oc_plots(station, cluster, time, start, indata, variable):
'''
Plot each odd cluster highlighted against surrounding data
:param MetVar station: station object
:param OddCluster cluster: cluster object
:param int time: timestamp
:param datetime start: start of dataseries
:param masked array indata: input data
:param string variable: variable name
:returns:
'''
import matplotlib.pyplot as plt
YLABELS = {"temperatures":"Temperature (C)", "dewpoints":"Dewpoints (C)", "slp":"SLP (hPa)", "windspeeds":"Wind Speed (m/s)"}
plot_start, plot_end = cluster.locations[0] - 10*24 , time + 10*24
if plot_start < 0 : plot_start = 0
plot_times = utils.times_hours_to_datetime(station.time.data[plot_start: plot_end], start)
plt.clf()
plt.plot(plot_times, indata[plot_start: plot_end], 'bo')
plt.plot(plot_times[np.array(oc_details.locations) - plot_start], indata[oc_details.locations], 'ro')
plt.ylim(utils.sort_ts_ylim(filtered_data[plot_start: plot_end]))
plt.ylabel(YLABELS[variable])
plt.show()
return # oc_plots
#*********************************************
def occ_normal(cluster, obs_type, time, flags):
'''
Just a normal observation. (Not all inputs used here,
but required for consistency)
:param OddCluster cluster: cluster object
:param int obs_type: type determinator of observation
:param int time: timestamp
:param array flags: flag array
:returns:
cluster - updated cluster information
obs_type - updated observation type
'''
cluster.last_data = time
return cluster, obs_type # occ_normal
#*********************************************
def occ_in_cluster(cluster, obs_type, time, flags):
'''currently in a potential cluster. (Not all inputs used here,
but required for consistency)
:param OddCluster cluster: cluster object
:param int obs_type: type determinator of observation
:param int time: timestamp
:param array flags: flag array
:returns:
cluster - updated cluster information
obs_type - updated observation type
'''
if (cluster.length == 6) or (time - cluster.start > 24.):
'''longer than 6 hours or span over 24hrs --> not a cluster --> reset'''
mdi = cluster.data_mdi
# set all specifically
cluster.start = mdi
cluster.end = mdi
cluster.length = 0
cluster.locations = mdi
cluster.last_data = mdi
obs_type = 0
else:
'''in a cluster, less than 6hr and not over 24hr - increment '''
cluster.length += 1
cluster.locations += [time]
cluster.end = time
return cluster, obs_type # occ_in_cluster
#*********************************************
def occ_start_cluster(cluster, obs_type, time, flags):
'''
There has been a gap in the data, check if long enough to start cluster
(Not all inputs used here, but required for consistency)
:param OddCluster cluster: cluster object
:param int obs_type: type determinator of observation
:param int time: timestamp
:param array flags: flag array
:returns:
cluster - updated cluster information
obs_type - updated observation type
'''
if time - cluster.last_data >= 48:
'''If gap of 48hr, then start cluster increments '''
obs_type = 2
cluster.length += 1
cluster.start = time
cluster.end = time
cluster.locations = [time]
else:
'''Gap in data not sufficiently large '''
obs_type = 1
cluster.last_data = time
cluster.start = cluster.data_mdi
cluster.end = cluster.data_mdi
cluster.length = 0
cluster.locations = 0
return cluster, obs_type # occ_start_cluster
#*********************************************
def occ_after_cluster(cluster, obs_type, time, flags):
'''
There has been a gap in the data after a cluster;
check if long enough to mean cluster is sufficiently isolated.
If so, flag else reset
:param OddCluster cluster: cluster object
:param int obs_type: type determinator of observation
:param int time: timestamp
:param array flags: flag array
:returns:
cluster - updated cluster information
obs_type - updated observation type
'''
if time - cluster.end >= 48:
'''isolated cluster with 48hr gap either side'''
# plotting done outside of this def.
flags[cluster.locations] = 1
# as have had a 48hr gap, start a new cluster
cluster.last_data = cluster.end
cluster.start = time
cluster.end = time
cluster.locations = [time]
cluster.length = 1
obs_type = 2
elif (time - cluster.start <= 24) and (cluster.length < 6):
'''have data, but cluster is small and within thresholds --> increment'''
obs_type = 2
cluster.length += 1
cluster.locations += [time]
cluster.end = time
else:
'''actually it is now normal data, so reset'''
obs_type = 0
cluster.last_data = time
cluster.start = cluster.data_mdi
cluster.end = cluster.data_mdi
cluster.length = 0
cluster.locations = 0
return cluster, obs_type # occ_after_cluster
#*********************************************
def occ(station, variable_list, flag_col, datastart, logfile, diagnostics = False, plots = False, second = False):
'''
Check for odd clusters of data surrounded by missing
up to 6hr/24hr surrounded by at least 48 on each side
:param MetVar station: the station object
:param list variable_list: list of observational variables to process
:param list flag_col: the columns to set on the QC flag array
:param datetime datastart: dataset start time
:param file logfile: logfile to store outputs
:param bool diagnostics: do extra verbose output
:param bool plots: do plots
:param bool second: run for second time
:returns:
'''
# the four options of what to do with each observation
# the keys give values which are subroutines, and can be called
# all subroutines have to take the same set of inputs
options = {0 : occ_normal, 1 : occ_start_cluster, 2 : occ_in_cluster, 3 : occ_after_cluster}
for v,variable in enumerate(variable_list):
st_var = getattr(station, variable)
filtered_data = utils.apply_filter_flags(st_var)
var_flags = station.qc_flags[:,flag_col[v]]
prev_flag_number = 0
if second:
# count currently existing flags:
prev_flag_number = len(var_flags[var_flags != 0])
# using IDL copy as method to ensure reproducibility (initially)
oc_details = OddCluster(st_var.mdi, st_var.mdi, 0, st_var.mdi, st_var.mdi, -1)
obs_type = 1
for time in station.time.data:
if filtered_data.mask[time] == False:
# process observation point using subroutines, called from named tuple
if plots and (obs_type == 3) and (time - oc_details.end >= 48):
# do plotting if matches flagging criteria
oc_plots(station, oc_details, time, datastart, filtered_data, variable)
oc_details, obs_type = options[obs_type](oc_details, obs_type, time, var_flags)
else:
# have missing data,
if obs_type == 2:
obs_type = 3
elif obs_type == 0:
obs_type = 1
station.qc_flags[:,flag_col[v]] = var_flags
flag_locs = np.where(station.qc_flags[:, flag_col[v]] != 0)
if plots or diagnostics:
utils.print_flagged_obs_number(logfile, "Odd Cluster", variable, len(flag_locs[0]) - prev_flag_number, noWrite = True)
else:
utils.print_flagged_obs_number(logfile, "Odd Cluster", variable, len(flag_locs[0]) - prev_flag_number)
# copy flags into attribute
st_var.flags[flag_locs] = 1
# matches 032070 temperature 26/8/2014
station = utils.append_history(station, "Isolated Odd Cluster Check")
return # occ
#************************************************************************
if __name__ == "__main__":
print "Checking for Odd Clusters"
|
|
import json
import logging
import time
from django.conf import settings
from django.test import Client as DjangoClient
from django.utils import timezone
from ambulance.models import Ambulance, \
AmbulanceStatus
from emstrack.tests.util import point2str
from equipment.models import EquipmentItem
from hospital.models import Hospital
from login.models import Client, ClientStatus, ClientLog
from .client import MQTTTestCase, MQTTTestClient, TestMQTT
from .client import MQTTTestSubscribeClient as SubscribeClient
logger = logging.getLogger(__name__)
class TestMQTTPublish(TestMQTT, MQTTTestCase):
def test(self):
# Start client as admin
broker = {
'HOST': 'localhost',
'PORT': 1883,
'KEEPALIVE': 60,
'CLEAN_SESSION': True
}
# Start test client
broker.update(settings.MQTT)
broker['CLIENT_ID'] = 'test_mqtt_publish_admin'
client = MQTTTestClient(broker,
check_payload=False,
debug=True)
self.is_connected(client)
# subscribe to ambulance/+/data
topics = ('ambulance/{}/data'.format(self.a1.id),
'hospital/{}/data'.format(self.h1.id),
'equipment/{}/item/{}/data'.format(self.h1.equipmentholder.id,
self.e1.id))
self.is_subscribed(client)
# expect more ambulance
client.expect(topics[0])
# modify data in ambulance and save should trigger message
obj = Ambulance.objects.get(id=self.a1.id)
self.assertEqual(obj.status, AmbulanceStatus.UK.name)
obj.status = AmbulanceStatus.OS.name
obj.save()
# process messages
self.loop(client)
# assert change
obj = Ambulance.objects.get(id=self.a1.id)
self.assertEqual(obj.status, AmbulanceStatus.OS.name)
# expect more hospital and equipment
[client.expect(t) for t in topics[1:]]
# modify data in hospital and save should trigger message
obj = Hospital.objects.get(id=self.h1.id)
self.assertEqual(obj.comment, 'no comments')
obj.comment = 'yet no comments'
obj.save()
# modify data in hospital_equipment and save should trigger message
obj = EquipmentItem.objects.get(equipmentholder=self.h1.equipmentholder,
equipment=self.e1)
self.assertEqual(obj.value, 'True')
obj.value = 'False'
obj.save()
# process messages
self.loop(client)
client.wait()
# assert changes
obj = Hospital.objects.get(id=self.h1.id)
self.assertEqual(obj.comment, 'yet no comments')
obj = EquipmentItem.objects.get(equipmentholder=self.h1.equipmentholder,
equipment=self.e1)
self.assertEqual(obj.value, 'False')
# Start client as testuser1
broker = {
'HOST': 'localhost',
'PORT': 1883,
'KEEPALIVE': 60,
'CLEAN_SESSION': True
}
# Start test client
broker.update(settings.MQTT)
broker['CLIENT_ID'] = 'test_mqtt_publish_admin'
broker['USERNAME'] = 'testuser1'
broker['PASSWORD'] = 'top_secret'
client = MQTTTestClient(broker,
check_payload=False,
debug=False)
self.is_connected(client)
# subscribe to ambulance/+/data
topics = ('hospital/{}/data'.format(self.h1.id),
'equipment/{}/item/{}/data'.format(self.h1.equipmentholder.id,
self.e1.id))
self.is_subscribed(client)
# expect more hospital and equipment
[client.expect(t) for t in topics]
# modify data in hospital and save should trigger message
obj = Hospital.objects.get(id=self.h1.id)
self.assertEqual(obj.comment, 'yet no comments')
obj.comment = 'yet yet no comments'
obj.save()
# modify data in hospital_equipment and save should trigger message
obj = EquipmentItem.objects.get(equipmentholder=self.h1.equipmentholder,
equipment=self.e1)
self.assertEqual(obj.value, 'False')
obj.value = 'True'
obj.save()
# process messages
self.loop(client)
client.wait()
# assert changes
obj = Hospital.objects.get(id=self.h1.id)
self.assertEqual(obj.comment, 'yet yet no comments')
obj = EquipmentItem.objects.get(equipmentholder=self.h1.equipmentholder,
equipment=self.e1)
self.assertEqual(obj.value, 'True')
# Start client as testuser2
broker = {
'HOST': 'localhost',
'PORT': 1883,
'KEEPALIVE': 60,
'CLEAN_SESSION': True
}
# Start test client
broker.update(settings.MQTT)
broker['CLIENT_ID'] = 'test_mqtt_publish_admin'
broker['USERNAME'] = 'testuser2'
broker['PASSWORD'] = 'very_secret'
client = MQTTTestClient(broker,
check_payload=False,
debug=False)
self.is_connected(client)
# subscribe to ambulance/+/data
topics = ('ambulance/{}/data'.format(self.a3.id),
'hospital/{}/data'.format(self.h1.id),
'equipment/{}/item/{}/data'.format(self.h1.equipmentholder.id,
self.e1.id))
self.is_subscribed(client)
# expect more ambulance
client.expect(topics[0])
# modify data in ambulance and save should trigger message
obj = Ambulance.objects.get(id=self.a3.id)
self.assertEqual(obj.status, AmbulanceStatus.UK.name)
obj.status = AmbulanceStatus.OS.name
obj.save()
# process messages
self.loop(client)
# assert change
obj = Ambulance.objects.get(id=self.a3.id)
self.assertEqual(obj.status, AmbulanceStatus.OS.name)
# expect more hospital and equipment
[client.expect(t) for t in topics[1:]]
# modify data in hospital and save should trigger message
obj = Hospital.objects.get(id=self.h1.id)
self.assertEqual(obj.comment, 'yet yet no comments')
obj.comment = 'yet no comments'
obj.save()
# modify data in hospital_equipment and save should trigger message
obj = EquipmentItem.objects.get(equipmentholder=self.h1.equipmentholder,
equipment=self.e1)
self.assertEqual(obj.value, 'True')
obj.value = 'False'
obj.save()
# process messages
self.loop(client)
client.wait()
# assert changes
obj = Hospital.objects.get(id=self.h1.id)
self.assertEqual(obj.comment, 'yet no comments')
obj = EquipmentItem.objects.get(equipmentholder=self.h1.equipmentholder,
equipment=self.e1)
self.assertEqual(obj.value, 'False')
class TestMQTTSubscribe(TestMQTT, MQTTTestCase):
def test(self):
# Start client as admin
broker = {
'HOST': 'localhost',
'PORT': 1883,
'KEEPALIVE': 60,
'CLEAN_SESSION': True
}
# Start subscribe client
broker.update(settings.MQTT)
broker['CLIENT_ID'] = 'test_mqttclient'
subscribe_client = SubscribeClient(broker,
debug=True)
self.is_connected(subscribe_client)
self.is_subscribed(subscribe_client)
# Start test client
broker.update(settings.MQTT)
client_id = 'test_mqtt_subscribe_admin'
username = broker['USERNAME']
broker['CLIENT_ID'] = client_id
test_client = MQTTTestClient(broker,
check_payload=False,
debug=True)
self.is_connected(test_client)
# start django client
django_client = DjangoClient()
# login as admin
django_client.login(username=settings.MQTT['USERNAME'], password=settings.MQTT['PASSWORD'])
# handshake ambulance and hospital
response = django_client.post('/en/api/client/',
content_type='application/json',
data=json.dumps({
'client_id': client_id,
'status': ClientStatus.O.name,
'ambulance': self.a1.id,
'hospital': self.h1.id
}),
follow=True)
self.assertEqual(response.status_code, 201)
# check record
clnt = Client.objects.get(client_id=client_id)
self.assertEqual(clnt.status, ClientStatus.O.name)
self.assertEqual(clnt.ambulance, self.a1)
self.assertEqual(clnt.hospital, self.h1)
# Modify ambulance
# retrieve current ambulance status
obj = Ambulance.objects.get(id=self.a1.id)
self.assertEqual(obj.status, AmbulanceStatus.UK.name)
# expect update once
test_client.expect('ambulance/{}/data'.format(self.a1.id))
self.is_subscribed(test_client)
# publish change
test_client.publish('user/{}/client/{}/ambulance/{}/data'.format(self.u1.username,
client_id,
self.a1.id),
json.dumps({
'status': AmbulanceStatus.OS.name,
}), qos=0)
# process messages
self.loop(test_client, subscribe_client)
# verify change
obj = Ambulance.objects.get(id=self.a1.id)
self.assertEqual(obj.status, AmbulanceStatus.OS.name)
# Modify hospital
# retrieve current hospital status
obj = Hospital.objects.get(id=self.h1.id)
self.assertEqual(obj.comment, 'no comments')
# expect update once
test_client.expect('hospital/{}/data'.format(self.h1.id))
self.is_subscribed(test_client)
test_client.publish('user/{}/client/{}/hospital/{}/data'.format(self.u1.username,
client_id,
self.h1.id),
json.dumps({
'comment': 'no more comments',
}), qos=0)
# process messages
self.loop(test_client, subscribe_client)
# verify change
obj = Hospital.objects.get(id=self.h1.id)
self.assertEqual(obj.comment, 'no more comments')
# Modify hospital equipment
# retrieve current equipment value
obj = EquipmentItem.objects.get(equipmentholder=self.h1.equipmentholder,
equipment=self.e1)
self.assertEqual(obj.value, 'True')
# expect update once
test_client.expect('equipment/{}/item/{}/data'.format(self.h1.equipmentholder.id,
self.e1.id))
self.is_subscribed(test_client)
test_client.publish('user/{}/client/{}/equipment/{}/item/{}/data'.format(self.u1.username,
client_id,
self.h1.equipmentholder.id,
self.e1.id),
json.dumps({
'value': 'False',
}), qos=0)
# process messages
self.loop(test_client, subscribe_client)
# verify change
obj = EquipmentItem.objects.get(equipmentholder=self.h1.equipmentholder,
equipment=self.e1)
self.assertEqual(obj.value, 'False')
# test bulk ambulance update
# expect null client after logout
test_client.expect('ambulance/{}/data'.format(self.a1.id))
self.is_subscribed(test_client)
# handshake ambulance
response = django_client.post('/en/api/client/',
content_type='application/json',
data=json.dumps({
'client_id': client_id,
'status': ClientStatus.O.name,
'ambulance': self.a2.id,
}),
follow=True)
# result = JSONParser().parse(BytesIO(response.content))
# logger.debug(result)
self.assertEqual(response.status_code, 201)
# check record
clnt = Client.objects.get(client_id=client_id)
self.assertEqual(clnt.status, ClientStatus.O.name)
self.assertEqual(clnt.ambulance, self.a2)
self.assertEqual(clnt.hospital, self.h1)
# retrieve last ambulance
obj = Ambulance.objects.get(id=self.a1.id)
self.assertEqual(hasattr(obj, 'client'), False)
# retrieve current ambulance status
obj = Ambulance.objects.get(id=self.a2.id)
self.assertEqual(obj.status, AmbulanceStatus.UK.name)
location = {'latitude': -2., 'longitude': 7.}
timestamp = timezone.now()
data = [
{
'status': AmbulanceStatus.OS.name,
},
{
'status': AmbulanceStatus.AV.name,
'location': location,
},
{
'status': AmbulanceStatus.PB.name,
'timestamp': str(timestamp)
}
]
# expect update once
test_client.expect('ambulance/{}/data'.format(self.a2.id))
self.is_subscribed(test_client)
test_client.publish('user/{}/client/{}/ambulance/{}/data'.format(self.u1.username,
client_id,
self.a2.id),
json.dumps(data), qos=0)
# process messages
self.loop(test_client, subscribe_client)
# verify change
obj = Ambulance.objects.get(id=self.a2.id)
self.assertEqual(obj.status, AmbulanceStatus.PB.name)
self.assertEqual(obj.timestamp, timestamp)
self.assertEqual(point2str(obj.location), point2str(location))
# Client handshake
test_client.publish('user/{}/client/{}/status'.format(username, client_id), ClientStatus.F.name)
# process messages
self.loop(test_client, subscribe_client)
# check record
clnt = Client.objects.get(client_id=client_id)
self.assertEqual(clnt.status, ClientStatus.F.name)
self.assertEqual(clnt.ambulance, None)
self.assertEqual(clnt.hospital, None)
# wait for disconnect
test_client.wait()
subscribe_client.wait()
django_client.logout()
class TestMQTTWill(TestMQTT, MQTTTestCase):
def test(self):
# Start client as admin
broker = {
'HOST': 'localhost',
'PORT': 1883,
'KEEPALIVE': 60,
'CLEAN_SESSION': True
}
# Start test client
broker.update(settings.MQTT)
broker['CLIENT_ID'] = 'test_mqtt_will_admin'
broker['WILL'] = {
'topic': 'user/{}/client/{}/status'.format(broker['USERNAME'],
broker['CLIENT_ID']),
'payload': ClientStatus.D.name
}
client = MQTTTestClient(broker,
check_payload=False,
debug=False)
self.is_connected(client)
# Publish client status
client.publish('user/{}/client/{}/status'.format(broker['USERNAME'],
broker['CLIENT_ID']),
ClientStatus.O.name,
qos=1,
retain=False)
# process messages
self.loop(client)
# reconnecting with same client-id will trigger will
client.expect('user/{}/client/{}/status'.format(broker['USERNAME'],
broker['CLIENT_ID']),
ClientStatus.D.name)
self.is_subscribed(client)
client = MQTTTestClient(broker,
check_payload=False,
debug=False)
self.is_connected(client)
# process messages
self.loop(client)
# wait for disconnect
client.wait()
class TestMQTTHandshakeDisconnect(TestMQTT, MQTTTestCase):
def test(self):
# Start client as admin
broker = {
'HOST': 'localhost',
'PORT': 1883,
'KEEPALIVE': 60,
'CLEAN_SESSION': True
}
# Start subscribe client
broker.update(settings.MQTT)
broker['CLIENT_ID'] = 'test_mqttclient'
subscribe_client = SubscribeClient(broker,
debug=True)
self.is_connected(subscribe_client)
self.is_subscribed(subscribe_client)
# Start test client
broker.update(settings.MQTT)
client_id = 'test_mqtt_subscribe_admin'
username = broker['USERNAME']
broker['CLIENT_ID'] = client_id
broker['WILL'] = {
'topic': 'user/{}/client/{}/status'.format(username, client_id),
'payload': ClientStatus.D.name
}
test_client = MQTTTestClient(broker,
check_payload=False,
debug=True)
self.is_connected(test_client)
# Client handshake: online
test_client.publish('user/{}/client/{}/status'.format(username, client_id), ClientStatus.O.name)
# process messages
self.loop(test_client, subscribe_client)
# check record
clnt = Client.objects.get(client_id=client_id)
self.assertEqual(clnt.status, ClientStatus.O.name)
# check record log
obj = ClientLog.objects.get(client=clnt)
self.assertEqual(obj.status, ClientStatus.O.name)
# Client handshake: force disconnected to trigger will
test_client.client._sock.close()
# process messages
subscribe_client.loop()
subscribe_client.loop()
time.sleep(1)
# process messages
subscribe_client.loop()
time.sleep(1)
# process messages
subscribe_client.loop()
time.sleep(1)
# wait for disconnect
subscribe_client.wait()
# check record
clnt = Client.objects.get(client_id=client_id)
self.assertEqual(clnt.status, ClientStatus.D.name)
# check record log
obj = ClientLog.objects.filter(client=clnt).order_by('updated_on')
self.assertEqual(len(obj), 2)
self.assertEqual(obj[0].status, ClientStatus.O.name)
self.assertEqual(obj[1].status, ClientStatus.D.name)
class TestMQTTHandshakeReconnect(TestMQTT, MQTTTestCase):
def test(self):
# Start client as admin
broker = {
'HOST': 'localhost',
'PORT': 1883,
'KEEPALIVE': 60,
'CLEAN_SESSION': True
}
# Start subscribe client
broker.update(settings.MQTT)
broker['CLIENT_ID'] = 'test_mqttclient'
subscribe_client = SubscribeClient(broker,
debug=True)
self.is_connected(subscribe_client)
self.is_subscribed(subscribe_client)
# Start test client
broker.update(settings.MQTT)
client_id = 'test_mqtt_subscribe_admin'
username = broker['USERNAME']
broker['CLIENT_ID'] = client_id
broker['WILL'] = {
'topic': 'user/{}/client/{}/status'.format(username, client_id),
'payload': ClientStatus.D.name
}
test_client = MQTTTestClient(broker,
check_payload=False,
debug=True)
self.is_connected(test_client)
# Client handshake: online
test_client.publish('user/{}/client/{}/status'.format(username, client_id), ClientStatus.O.name)
# process messages
self.loop(test_client, subscribe_client)
# check record
clnt = Client.objects.get(client_id=client_id)
self.assertEqual(clnt.status, ClientStatus.O.name)
# check record log
obj = ClientLog.objects.get(client=clnt)
self.assertEqual(obj.status, ClientStatus.O.name)
# reconnecting with same client-id, forces a disconnect
test_client = MQTTTestClient(broker,
check_payload=False,
debug=False)
self.is_connected(test_client)
# Client handshake: online
test_client.publish('user/{}/client/{}/status'.format(username, client_id), ClientStatus.O.name)
# process messages
self.loop(test_client, subscribe_client)
# check record
clnt = Client.objects.get(client_id=client_id)
self.assertEqual(clnt.status, ClientStatus.O.name)
# check record log
obj = ClientLog.objects.filter(client=clnt).order_by('updated_on')
self.assertEqual(len(obj), 2)
self.assertEqual(obj[0].status, ClientStatus.O.name)
self.assertEqual(obj[1].status, ClientStatus.O.name)
# Client handshake: offline
test_client.publish('user/{}/client/{}/status'.format(username, client_id), ClientStatus.F.name)
# process messages
self.loop(test_client, subscribe_client)
# check record
clnt = Client.objects.get(client_id=client_id)
self.assertEqual(clnt.status, ClientStatus.F.name)
# check record log
obj = ClientLog.objects.filter(client=clnt).order_by('updated_on')
self.assertEqual(len(obj), 4)
self.assertEqual(obj[0].status, ClientStatus.O.name)
self.assertEqual(obj[1].status, ClientStatus.O.name)
self.assertEqual(obj[2].status, ClientStatus.D.name)
self.assertEqual(obj[3].status, ClientStatus.F.name)
# wait for disconnect
test_client.wait()
subscribe_client.wait()
|
|
# -*- encoding: utf-8 -*-
import os
import re
import requests
from requests.auth import HTTPBasicAuth
import xlrd
import xml.etree.ElementTree as ET
import json
import urllib2
from ftplib import FTP
from os.path import dirname
from urlparse import urlparse
from django.db import models
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.gis.gdal import DataSource
from django.core.files.base import ContentFile
from django.template.loader import render_to_string
from django.utils import translation
from django.utils.translation import ugettext as _
from django.utils.encoding import force_text
from modeltranslation.fields import TranslationField
from modeltranslation.translator import translator, NotRegistered
from paperclip.models import Attachment, attachment_upload
from geotrek.common.models import FileType
class ImportError(Exception):
pass
class GlobalImportError(ImportError):
pass
class RowImportError(ImportError):
pass
class ValueImportError(ImportError):
pass
class Parser(object):
label = None
filename = None
url = None
simplify_tolerance = 0 # meters
update_only = False
delete = False
duplicate_eid_allowed = False
warn_on_missing_fields = False
warn_on_missing_objects = False
separator = '+'
eid = None
fields = None
m2m_fields = {}
constant_fields = {}
m2m_constant_fields = {}
non_fields = {}
natural_keys = {}
field_options = {}
def __init__(self, progress_cb=None):
self.warnings = {}
self.line = 0
self.nb_success = 0
self.nb_created = 0
self.nb_updated = 0
self.nb_unmodified = 0
self.progress_cb = progress_cb
try:
mto = translator.get_options_for_model(self.model)
except NotRegistered:
self.translated_fields = []
else:
self.translated_fields = mto.fields.keys()
if self.fields is None:
self.fields = {
f.name: force_text(f.verbose_name)
for f in self.model._meta.fields
if not isinstance(f, TranslationField)
}
self.m2m_fields = {
f.name: force_text(f.verbose_name)
for f in self.model._meta.many_to_many
}
def normalize_field_name(self, name):
return name.upper()
def normalize_src(self, src):
if hasattr(src, '__iter__'):
return [self.normalize_field_name(subsrc) for subsrc in src]
else:
return self.normalize_field_name(src)
def add_warning(self, msg):
key = _(u"Line {line}".format(line=self.line))
warnings = self.warnings.setdefault(key, [])
warnings.append(msg)
def get_val(self, row, dst, src):
if hasattr(src, '__iter__'):
val = []
for subsrc in src:
try:
val.append(self.get_val(row, dst, subsrc))
except ValueImportError as warning:
if self.warn_on_missing_fields:
self.add_warning(unicode(warning))
val.append(None)
else:
val = row
for part in src.split('.'):
try:
if part.isdigit():
val = val[int(part)]
else:
val = val[part]
except (KeyError, IndexError):
required = u"required " if self.field_options.get(dst, {}).get('required', False) else ""
raise ValueImportError(_(u"Missing {required}field '{src}'").format(required=required, src=src))
return val
def apply_filter(self, dst, src, val):
field = self.model._meta.get_field_by_name(dst)[0]
if (isinstance(field, models.ForeignKey) or isinstance(field, models.ManyToManyField)):
if dst not in self.natural_keys:
raise ValueImportError(_(u"Destination field '{dst}' not in natural keys configuration").format(dst=dst))
to = field.rel.to
natural_key = self.natural_keys[dst]
kwargs = self.field_options.get(dst, {})
if isinstance(field, models.ForeignKey):
val = self.filter_fk(src, val, to, natural_key, **kwargs)
else:
val = self.filter_m2m(src, val, to, natural_key, **kwargs)
return val
def parse_non_field(self, dst, src, val):
"""Returns True if modified"""
if hasattr(self, 'save_{0}'.format(dst)):
return getattr(self, 'save_{0}'.format(dst))(src, val)
def set_value(self, dst, src, val):
field = self.model._meta.get_field_by_name(dst)[0]
if val is None and not field.null:
if field.blank and (isinstance(field, models.CharField) or isinstance(field, models.TextField)):
val = u""
else:
raise RowImportError(_(u"Null value not allowed for field '{src}'".format(src=src)))
if val == u"" and not field.blank:
raise RowImportError(_(u"Blank value not allowed for field '{src}'".format(src=src)))
setattr(self.obj, dst, val)
def parse_field(self, dst, src, val):
"""Returns True if modified"""
if hasattr(self, 'filter_{0}'.format(dst)):
try:
val = getattr(self, 'filter_{0}'.format(dst))(src, val)
except ValueImportError as warning:
self.add_warning(unicode(warning))
return False
else:
try:
val = self.apply_filter(dst, src, val)
except ValueImportError as warning:
self.add_warning(unicode(warning))
return False
if hasattr(self.obj, dst):
if dst in self.m2m_fields or dst in self.m2m_constant_fields:
old = set(getattr(self.obj, dst).all())
val = set(val)
else:
old = getattr(self.obj, dst)
if isinstance(old, float) and isinstance(val, float):
old = round(old, 10)
val = round(val, 10)
if old != val:
self.set_value(dst, src, val)
return True
else:
return False
else:
self.set_value(dst, src, val)
return True
def parse_fields(self, row, fields, non_field=False):
updated = []
for dst, src in fields.items():
if dst in self.constant_fields or dst in self.m2m_constant_fields:
val = src
else:
src = self.normalize_src(src)
try:
val = self.get_val(row, dst, src)
except ValueImportError as warning:
if self.field_options.get(dst, {}).get('required', False):
raise RowImportError(warning)
if self.warn_on_missing_fields:
self.add_warning(unicode(warning))
continue
if non_field:
modified = self.parse_non_field(dst, src, val)
else:
modified = self.parse_field(dst, src, val)
if modified:
updated.append(dst)
if dst in self.translated_fields:
lang = translation.get_language()
updated.append('{field}_{lang}'.format(field=dst, lang=lang))
return updated
def parse_obj(self, row, operation):
try:
update_fields = self.parse_fields(row, self.fields)
update_fields += self.parse_fields(row, self.constant_fields)
except RowImportError as warnings:
self.add_warning(unicode(warnings))
return
if operation == u"created":
self.obj.save()
else:
self.obj.save(update_fields=update_fields)
update_fields += self.parse_fields(row, self.m2m_fields)
update_fields += self.parse_fields(row, self.m2m_constant_fields)
update_fields += self.parse_fields(row, self.non_fields, non_field=True)
if operation == u"created":
self.nb_created += 1
elif update_fields:
self.nb_updated += 1
else:
self.nb_unmodified += 1
def get_eid_kwargs(self, row):
try:
eid_src = self.fields[self.eid]
except KeyError:
raise GlobalImportError(_(u"Eid field '{eid_dst}' missing in parser configuration").format(eid_dst=self.eid))
eid_src = self.normalize_field_name(eid_src)
try:
eid_val = self.get_val(row, self.eid, eid_src)
except KeyError:
raise GlobalImportError(_(u"Missing id field '{eid_src}'").format(eid_src=eid_src))
if hasattr(self, 'filter_{0}'.format(self.eid)):
eid_val = getattr(self, 'filter_{0}'.format(self.eid))(eid_src, eid_val)
self.eid_src = eid_src
self.eid_val = eid_val
return {self.eid: eid_val}
def parse_row(self, row):
self.eid_val = None
self.line += 1
if self.eid is None:
eid_kwargs = {}
objects = self.model.objects.none()
else:
try:
eid_kwargs = self.get_eid_kwargs(row)
except RowImportError as warnings:
self.add_warning(unicode(warnings))
return
objects = self.model.objects.filter(**eid_kwargs)
if len(objects) == 0 and self.update_only:
if self.warn_on_missing_objects:
self.add_warning(_(u"Bad value '{eid_val}' for field '{eid_src}'. No trek with this identifier").format(eid_val=self.eid_val, eid_src=self.eid_src))
return
elif len(objects) == 0:
objects = [self.model(**eid_kwargs)]
operation = u"created"
elif len(objects) >= 2 and not self.duplicate_eid_allowed:
self.add_warning(_(u"Bad value '{eid_val}' for field '{eid_src}'. Multiple treks with this identifier").format(eid_val=self.eid_val, eid_src=self.eid_src))
return
else:
operation = u"updated"
for self.obj in objects:
self.parse_obj(row, operation)
self.to_delete.discard(self.obj.pk)
self.nb_success += 1 # FIXME
if self.progress_cb:
self.progress_cb(float(self.line) / self.nb, self.line, self.eid_val)
def report(self, output_format='txt'):
context = {
'nb_success': self.nb_success,
'nb_lines': self.line,
'nb_created': self.nb_created,
'nb_updated': self.nb_updated,
'nb_deleted': len(self.to_delete) if self.delete else 0,
'nb_unmodified': self.nb_unmodified,
'warnings': self.warnings,
}
return render_to_string('common/parser_report.{output_format}'.format(output_format=output_format), context)
def get_mapping(self, src, val, mapping, partial):
if partial:
found = False
for i, j in mapping.iteritems():
if i in val:
val = j
found = True
break
if not found:
self.add_warning(_(u"Bad value '{val}' for field {src}. Should contain {values}").format(val=val, src=src, separator=self.separator, values=', '.join(mapping.keys())))
return None
else:
if mapping is not None:
if val not in mapping.keys():
self.add_warning(_(u"Bad value '{val}' for field {src}. Should be {values}").format(val=val, src=src, separator=self.separator, values=', '.join(mapping.keys())))
return None
val = mapping[val]
return val
def filter_fk(self, src, val, model, field, mapping=None, partial=False, create=False):
val = self.get_mapping(src, val, mapping, partial)
if val is None:
return None
if create:
val, created = model.objects.get_or_create(**{field: val})
if created:
self.add_warning(_(u"{model} '{val}' did not exist in Geotrek-Admin and was automatically created").format(model=model._meta.verbose_name.title(), val=val))
return val
try:
return model.objects.get(**{field: val})
except model.DoesNotExist:
self.add_warning(_(u"{model} '{val}' does not exists in Geotrek-Admin. Please add it").format(model=model._meta.verbose_name.title(), val=val))
return None
def filter_m2m(self, src, val, model, field, mapping=None, partial=False, create=False):
if not val:
return []
val = val.split(self.separator)
dst = []
for subval in val:
subval = subval.strip()
subval = self.get_mapping(src, subval, mapping, partial)
if subval is None:
continue
if create:
subval, created = model.objects.get_or_create(**{field: subval})
if created:
self.add_warning(_(u"{model} '{val}' did not exist in Geotrek-Admin and was automatically created").format(model=model._meta.verbose_name.title(), val=subval))
dst.append(subval)
continue
try:
dst.append(model.objects.get(**{field: subval}))
except model.DoesNotExist:
self.add_warning(_(u"{model} '{val}' does not exists in Geotrek-Admin. Please add it").format(model=model._meta.verbose_name.title(), val=subval))
continue
return dst
def start(self):
self.to_delete = set(self.model.objects.values_list('pk', flat=True))
def end(self):
if self.delete:
self.model.objects.filter(pk__in=self.to_delete).delete()
def parse(self, filename=None, limit=None):
if filename:
self.filename = filename
if not self.url and not self.filename:
raise GlobalImportError(_(u"Filename is required"))
if self.filename and not os.path.exists(self.filename):
raise GlobalImportError(_(u"File does not exists at: {filename}").format(filename=self.filename))
self.start()
for i, row in enumerate(self.next_row()):
if limit and i >= limit:
break
try:
self.parse_row(row)
except Exception as e:
self.add_warning(unicode(e))
if settings.DEBUG:
raise
self.end()
class ShapeParser(Parser):
encoding = 'utf-8'
def next_row(self):
datasource = DataSource(self.filename, encoding=self.encoding)
layer = datasource[0]
self.nb = len(layer)
for i, feature in enumerate(layer):
row = {self.normalize_field_name(field.name): field.value for field in feature}
try:
ogrgeom = feature.geom
except:
print _(u"Invalid geometry pointer"), i
geom = None
else:
ogrgeom.coord_dim = 2 # Flatten to 2D
geom = ogrgeom.geos
if self.simplify_tolerance and geom is not None:
geom = geom.simplify(self.simplify_tolerance)
row[self.normalize_field_name('geom')] = geom
yield row
def normalize_field_name(self, name):
"""Shapefile field names length is 10 char max"""
name = super(ShapeParser, self).normalize_field_name(name)
return name[:10]
class ExcelParser(Parser):
def next_row(self):
workbook = xlrd.open_workbook(self.filename)
sheet = workbook.sheet_by_index(0)
header = [self.normalize_field_name(cell.value) for cell in sheet.row(0)]
self.nb = sheet.nrows - 1
for i in range(1, sheet.nrows):
values = [cell.value for cell in sheet.row(i)]
row = dict(zip(header, values))
yield row
class AtomParser(Parser):
ns = {
'Atom': 'http://www.w3.org/2005/Atom',
'georss': 'http://www.georss.org/georss',
}
def flatten_fields(self, fields):
return reduce(lambda x, y: x + (list(y) if hasattr(y, '__iter__') else [y]), fields.values(), [])
def next_row(self):
srcs = self.flatten_fields(self.fields)
srcs += self.flatten_fields(self.m2m_fields)
srcs += self.flatten_fields(self.non_fields)
tree = ET.parse(self.filename)
entries = tree.getroot().findall('Atom:entry', self.ns)
self.nb = len(entries)
for entry in entries:
row = {self.normalize_field_name(src): entry.find(src, self.ns).text for src in srcs}
yield row
class AttachmentParserMixin(object):
base_url = ''
delete_attachments = False
filetype_name = u"Photographie"
non_fields = {
'attachments': _(u"Attachments"),
}
def start(self):
super(AttachmentParserMixin, self).start()
try:
self.filetype = FileType.objects.get(type=self.filetype_name)
except FileType.DoesNotExist:
raise GlobalImportError(_(u"FileType '{name}' does not exists in Geotrek-Admin. Please add it").format(name=self.filetype_name))
self.creator, created = get_user_model().objects.get_or_create(username='import', defaults={'is_active': False})
self.attachments_to_delete = {obj.pk: set(Attachment.objects.attachments_for_object(obj)) for obj in self.model.objects.all()}
def end(self):
if self.delete_attachments:
for atts in self.attachments_to_delete.itervalues():
for att in atts:
att.delete()
super(AttachmentParserMixin, self).end()
def filter_attachments(self, src, val):
if not val:
return []
return [(subval.strip(), '', '') for subval in val.split(self.separator)]
def has_size_changed(self, url, attachment):
try:
parsed_url = urlparse(url)
if parsed_url.scheme == 'ftp':
directory = dirname(parsed_url.path)
ftp = FTP(parsed_url.hostname)
ftp.login(user=parsed_url.username, passwd=parsed_url.password)
ftp.cwd(directory)
size = ftp.size(parsed_url.path.split('/')[-1:][0])
return size != attachment.attachment_file.size
if parsed_url.scheme == 'http' or parsed_url.scheme == 'https':
http = urllib2.urlopen(url)
size = http.headers.getheader('content-length')
return int(size) != attachment.attachment_file.size
except:
return False
return True
def save_attachments(self, src, val):
updated = False
for url, legend, author in self.filter_attachments(src, val):
url = self.base_url + url
legend = legend or u""
author = author or u""
name = os.path.basename(url)
found = False
for attachment in self.attachments_to_delete.get(self.obj.pk, set()):
upload_name, ext = os.path.splitext(attachment_upload(attachment, name))
existing_name = attachment.attachment_file.name
if re.search(ur"^{name}(_\d+)?{ext}$".format(name=upload_name, ext=ext), existing_name) and not self.has_size_changed(url, attachment):
found = True
self.attachments_to_delete[self.obj.pk].remove(attachment)
if author != attachment.author or legend != attachment.legend:
attachment.author = author
attachment.legend = legend
attachment.save()
updated = True
break
if found:
continue
if url[:6] == 'ftp://':
try:
response = urllib2.urlopen(url)
except:
self.add_warning(_(u"Failed to download '{url}'").format(url=url))
continue
content = response.read()
else:
response = requests.get(url)
if response.status_code != requests.codes.ok:
self.add_warning(_(u"Failed to download '{url}'").format(url=url))
continue
content = response.content
f = ContentFile(content)
attachment = Attachment()
attachment.content_object = self.obj
attachment.attachment_file.save(name, f, save=False)
attachment.filetype = self.filetype
attachment.creator = self.creator
attachment.author = author
attachment.legend = legend
attachment.save()
updated = True
return updated
class TourInSoftParser(AttachmentParserMixin, Parser):
@property
def items(self):
return self.root['d']['results']
def next_row(self):
skip = 0
while True:
params = {
'$format': 'json',
'$inlinecount': 'allpages',
'$top': 1000,
'$skip': skip,
}
response = requests.get(self.url, params=params)
if response.status_code != 200:
raise GlobalImportError(_(u"Failed to download {url}. HTTP status code {status_code}").format(url=self.url, status_code=response.status_code))
self.root = response.json()
self.nb = int(self.root['d']['__count'])
for row in self.items:
yield {self.normalize_field_name(src): val for src, val in row.iteritems()}
skip += 1000
if skip >= self.nb:
return
def filter_attachments(self, src, val):
if not val:
return []
return [subval.split('||') for subval in val.split('##') if subval.split('||') != ['', '', '']]
class TourismSystemParser(AttachmentParserMixin, Parser):
@property
def items(self):
return self.root['data']
def next_row(self):
size = 1000
skip = 0
while True:
params = {
'size': size,
'start': skip,
}
response = requests.get(self.url, params=params, auth=HTTPBasicAuth(self.user, self.password))
if response.status_code != 200:
raise GlobalImportError(_(u"Failed to download {url}. HTTP status code {status_code}").format(url=self.url, status_code=response.status_code))
self.root = response.json()
self.nb = int(self.root['metadata']['total'])
for row in self.items:
yield {self.normalize_field_name(src): val for src, val in row.iteritems()}
skip += size
if skip >= self.nb:
return
def filter_attachments(self, src, val):
result = []
for subval in val or []:
try:
name = subval['name']['fr']
except KeyError:
name = None
result.append((subval['URL'], name, None))
return result
def normalize_field_name(self, name):
return name
class SitraParser(AttachmentParserMixin, Parser):
url = 'http://api.sitra-tourisme.com/api/v002/recherche/list-objets-touristiques/'
@property
def items(self):
return self.root['objetsTouristiques']
def next_row(self):
size = 100
skip = 0
while True:
params = {
'apiKey': self.api_key,
'projetId': self.project_id,
'selectionIds': [self.selection_id],
'count': size,
'first': skip,
}
response = requests.get(self.url, params={'query': json.dumps(params)})
if response.status_code != 200:
raise GlobalImportError(_(u"Failed to download {url}. HTTP status code {status_code}").format(url=self.url, status_code=response.status_code))
self.root = response.json()
self.nb = int(self.root['numFound'])
for row in self.items:
yield row
skip += size
if skip >= self.nb:
return
def filter_attachments(self, src, val):
result = []
for subval in val or []:
if 'nom' in subval:
name = subval['nom']['libelleFr']
else:
name = None
result.append((subval['traductionFichiers'][0]['url'], name, None))
return result
def normalize_field_name(self, name):
return name
class OpenSystemParser(Parser):
url = 'http://proxy-xml.open-system.fr/rest.aspx'
def next_row(self):
params = {
'Login': self.login,
'Pass': self.password,
'Action': 'concentrateur_liaisons',
}
response = requests.get(self.url, params=params)
if response.status_code != 200:
raise GlobalImportError(_(u"Failed to download {url}. HTTP status code {status_code}").format(url=self.url, status_code=response.status_code))
self.root = ET.fromstring(response.content).find('Resultat').find('Objets')
self.nb = len(self.root)
for row in self.root:
id_sitra = row.find('ObjetCle').find('Cle').text
for liaison in row.find('Liaisons'):
yield {
'id_sitra': id_sitra,
'id_opensystem': liaison.find('ObjetOS').find('CodeUI').text,
}
def normalize_field_name(self, name):
return name
|
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 3 21:09:10 2013
@author: matt
# based on MyPlayer3_Callback (which is newer than MyPlayer3.py)
"""
from __future__ import division
import time, math, logging
import numpy as np
from threading import Lock, Thread
import itertools
# not sure I've added correct path in launchd.conf
# and export doesn't obviously work
import sys
sys.path.append('/Users/matt/Dropbox/personal/dev/PythonLibs/')
try:
from uidecorators import ui_decorators
use_ui = True
except ImportError:
# a bit nasty. We'll create an object were all members
# return a decorator function returning a decorator that does nothing!
class FakeUIDec:
def __getattr__(self, name):
def no_wrap(*args, **kwargs):
def wrap_creator(func):
def w(*args, **kwargs):
func(*args, **kwargs)
return w
return wrap_creator
return no_wrap
ui_decorators = FakeUIDec()
use_ui=False
try:
import pyaudio
p = pyaudio.PyAudio()
has_pyaudio = True
except ImportError:
logging.warn("PyAudio not found! - Will not be able to output any audio!")
has_pyaudio = False
def play_waveform(w):
def callback(in_data, frame_count, time_info, status):
# this requests upto 1024 frames?
with w.datalock:
ndata = w.data
if ndata is not None:
return (np.hstack([ndata]*(frame_count//1024)), pyaudio.paContinue)
else:
return (None, pyaudio.paComplete)
if has_pyaudio:
# open stream using callback (3)
play_waveform.stream = p.open(format=pyaudio.paInt16,
channels=1,
rate=w.rate,
output=True,
frames_per_buffer=w.size,
stream_callback=callback)
play_waveform.stream = None
max_frequency = 22100 # we stop making notes above this
note_types = {
"PureTone": lambda harmonic: 1 if harmonic==0 else 0,
"Poisson0.5": lambda harmonic: poisson(0.5, harmonic),
"Poisson1": lambda harmonic: poisson(1, harmonic),
"Poisson2": lambda harmonic: poisson(2, harmonic),
"Poisson3": lambda harmonic: poisson(3, harmonic),
"Lorentz1": lambda harmonic: 1.0/(1.0+harmonic**2),
"Lorentz10": lambda harmonic: 10.0/(10.0+harmonic**2),
"Equal": lambda harmonic: 1,
"EqualOdd": lambda harmonic: 1 if harmonic%2==1 or harmonic==0 else 0,
"EqualEven": lambda harmonic: 1 if harmonic%2==0 else 0,
"OneOverX": lambda harmonic: 1.0/(harmonic+1.0)
}
equal_temperament_notes = [2 ** (x / 12.0) for x in range(12)]
just_intonation_notes = [1, 16 / 15., 9 / 8., 6 / 5., 5 / 4., 4 / 3., 45 / 32., 3 / 2., 8 / 5., 5 / 3., 16 / 9., 15 / 8.]
twelve_tone_names = ["I", "IIb", "II", "IIIb", "III", "IV", "IV#", "V", "VIb", "VI", "VIIb", "VII"]
class Waveform(object):
def __init__(self, size=1024*16, rate=44100):
self.size = size
self.rate = rate
self.data = np.zeros((size), dtype=np.int16)
self.datalock = Lock()
self.volume_amp = 0.1
self.form = lambda note: poisson(2, note)
self.notetype="Poisson1"
self.notefreq=440
self.on_notes_changed=[]
self._harmonics_slice = None
self.clear_notes()
def clear_notes(self):
self.notes = []
self()
def set_notes(self, notes):
self.clear_notes()
self.add_notes(notes)
self()
def add_notes(self, notes):
self.notes.append(list(notes))
self()
def __call__(self):
newdata = np.zeros((self.size), dtype=np.complex64)
for notegroup in self.notes:
for freq, mag in notegroup:
dphase=int (freq*self.size / self.rate )
logging.info("Adding note at pixel %s", dphase)
if dphase > len(newdata)/2:
continue # this is nyquist, can't go any higher
#let's scale mag by number of notes
newdata[dphase]=self.volume_amp*mag*32765/2
#make ft real
newdata[-dphase] = np.conj(newdata[dphase])
sqrtsumsq = math.sqrt((newdata**2).sum())
if sqrtsumsq:
newdata *= self.volume_amp * 2.0 * 32767.0 / sqrtsumsq
printimag = 0
if printimag:
complex_d=np.imag(np.fft.fft(newdata));
print "imag magnitude: ", np.sqrt(np.sum(complex_d**2))
newdata = np.asarray(np.real(np.fft.fft(newdata)), dtype=np.int16)
with self.datalock:
self.data = newdata
for f in self.on_notes_changed:
f()
def get_volume(self):
v = math.log(self.volume_amp, 10)*20
return v
@ui_decorators.slider(getfunc=get_volume, maximum=0, minimum=-50, scale=1)
def volume(self, value):
self.volume_amp = 10**(value/20.0)
self()
def get_note_type(self):
return self.notetype
@ui_decorators.combobox(
getfunc=get_note_type,
options=note_types.keys())
def note_type(self, t):
self.notetype = t
def get_harmonics_slice(self):
if self._harmonics_slice:
return ",".join(self._harmonics_slice)
else:
return ""
@ui_decorators.textbox(getfunc=get_harmonics_slice)
def harmonics_slice(self, n):
"""
Sets the harmonics to display
Should be either [start:]stop[:step]
or else a,b,c where a,b,c are indices to choose
"""
if n=="":
self._harmonics_slice = None
return
if ':' in n:
sc = [int(x or "0") for x in n.split(":")]
if len(sc)==1:
self._harmonics_slice = (None, sc[0], None)
elif len(sc) == 2:
self._harmonics_slice = (sc[0], sc[1], None)
else:
self._harmonics_slice = (sc[0], sc[1], sc[2])
else:
self._harmonics_slice = [int(x or "-1") for x in n.split(',')]
def get_root_frequency(self):
return self.notefreq
@ui_decorators.textbox(getfunc=get_root_frequency)
def root_frequency(self, val):
self.notefreq = float(val)
self()
def add_form(self, root):
if isinstance(self._harmonics_slice, list):
all_notes = list(notes_from_func(note_types[self.notetype], root))
notes = []
for i in self._harmonics_slice:
notes.append(all_notes[i])
else:
slice_args = self._harmonics_slice or (None,)
notes = itertools.islice(
notes_from_func(note_types[self.notetype], root),
*slice_args)
self.add_notes(notes)
@ui_decorators.button
def clear(self):
self.clear_notes()
@ui_decorators.button
def note_root(self):
self.add_form(self.notefreq)
self()
@ui_decorators.button
def note_major3rd(self):
self.add_form(self.notefreq*5.0/4.0)
self()
@ui_decorators.button
def note_fifth(self):
self.add_form(self.notefreq*6.0/4.0)
self()
@ui_decorators.button
def play_major_chord(self):
self.play_threaded_chord([self.notefreq,
self.notefreq*5.0/4.0,
self.notefreq*6.0/4.0])
@ui_decorators.button
def test(self):
self.play_threaded_chord([self.notefreq,
self.notefreq*7.0/8.0,
self.notefreq*6.0/4.0])
@ui_decorators.button
def play_minor_chord(self):
self.play_threaded_chord([self.notefreq,
self.notefreq*12.0/10.0,
self.notefreq*15.0/10.0])
@ui_decorators.button
def play_minor_chord_fifth(self):
self.play_threaded_chord([self.notefreq,
self.notefreq*4.0/3.0,
self.notefreq*8.0/5.0])
def play_threaded_chord(self, roots):
def run_through():
for i,n in enumerate(roots):
self.clear_notes()
[self.add_form([]) for t in range(i)]
self.add_form(n)
time.sleep(1.5)
self.clear_notes()
for n in roots:
self.add_form(n)
Thread(target=run_through).start()
# run in interactive shell and use set_notes to play?
def poisson(l, n):
return math.exp(-l)*l**n/math.factorial(n)
def notes_from_func(func, root):
for h in itertools.count():
mag = func(h)
# we cut off until we reach max_frequency
if root+root*h > max_frequency:
return
yield root+root*h, mag
def cleanup():
if has_pyaudio:
play_waveform.stream.close()
p.terminate()
######################## UI Stuff ############################
# this could go in a separate file, but keeping it here for the
# moment
# creating a UI Options class for modifying the visualisation using
# out qt decorators
class UIOptions:
def __init__(self):
self._linear_freq_in_octaves = True
self.virtual_size = 1500,1500
self._inverse = True
self._show_just_notes = True
self._show_base_spiral = True
self._show_ET_notes = False # ET=equal temperament
def get_linear_freq_in_octaves(self):
return self._linear_freq_in_octaves
@ui_decorators.checkbox(getfunc=get_linear_freq_in_octaves)
def linear_freq_in_octaves(self, newval):
self._linear_freq_in_octaves = newval
notes_changed()
def get_show_base_spiral(self):
return self._show_base_spiral
@ui_decorators.checkbox(getfunc=get_show_base_spiral)
def show_base_spiral(self, newval):
self._show_base_spiral = newval
notes_changed()
def get_inverse(self):
return self._inverse
@ui_decorators.checkbox(getfunc=get_inverse)
def inverse(self, newval):
self._inverse = newval
notes_changed()
def get_show_just_notes(self):
return self._show_just_notes
@ui_decorators.checkbox(getfunc=get_show_just_notes)
def show_just_notes(self, newval):
self._show_just_notes = newval
notes_changed()
def get_show_ET_notes(self):
return self._show_ET_notes
@ui_decorators.checkbox(getfunc=get_show_ET_notes)
def show_ET_notes(self, newval):
self._show_ET_notes = newval
notes_changed()
def make_note_lines(root, named_notes, width, radius):
"""
For the dictionary named_notes, draws thin lines for each note
adding the key for the note to the SVG.
This way we can overlay scales on the diagrams.
"""
lines = []
for name, freq in named_notes.iteritems():
(x1, y1), theta = get_pos_theta_for_note(freq, root, 0, 0)
font_size = radius/16.0
lines.append(
'<line x1="{x1}" y1="{y1}" x2="{x2}" y2="{y2}" stroke-width="{width}"/>'.format(
x1=x1, x2=x1 + 2 * radius * math.sin(theta),
y1=y1, y2=y1 - 2 * radius * math.cos(theta),
width=width))
lines.append('<text x="{x}" y="{y}" font-size="{fs}">{text}</text>'.format(
x=x1 + radius * math.sin(theta),
y=y1 - radius * math.cos(theta),
text=name, fs=font_size))
return "\n".join(lines)
def get_pos_theta_for_note(f, root, root_radius, length):
"""
Return (x,y),theta where (x,y) is the starting position of the note
and theta is the angle the note should have
"""
# first, we calculate the octave and theta for the root
logf = math.log(f / root, 2)
note_fraction, octave = math.modf(logf)
if ui_opts.get_linear_freq_in_octaves():
note = (2**note_fraction - 1)
else:
note = note_fraction
theta = note * 2.0 * math.pi
centerx, centery = (x / 2 for x in ui_opts.virtual_size)
r = root_radius + (octave + note_fraction) * length
x = centerx + r * math.sin(theta)
y = centery - r * math.cos(theta)
return (x,y), theta
def make_spiral_lines_from_notes(root, notes,
length=75, root_radius=100,
stroke_width_scale=15):
"""
Is there a way to represent notes where octaves are still seperated but
we can see notes of the same pitch?
We could draw a spiral, where an octave is 360 degrees and on the next
ring out.
There's a similar idea here:
http://nastechservices.com/Spectrograms.html
How should we represent a 3:2 ratio? If wejust take log(x,2)*2*pi
then 3/2 is at 210deg (or 3.67rad). Is it worth making the scale linear,
and putting 3/2 at 180deg? We could also spiral so that 3/2f gets us to 180
deg then we stretch out the remaining part of the curve?
We'll try the linear for now.
It works, but not all 3/2 notes are 180deg from each other
(if the higher note is past the root, it's not)
Is there a way to do this? Maybe not, eg we make 5th = 3r/2 opposite root
and 3/2r = 9/4 != root and yet root still needs to be 180deg from it
"""
width_gamma = 0.2 # we use width^this as the width
centerx, centery = (x / 2 for x in ui_opts.virtual_size)
lines = []
for f, m in notes:
# we split the note into octave and note (0 - 1)
width = stroke_width_scale * math.pow(m, width_gamma)
(x1, y1), theta = get_pos_theta_for_note(f, root, root_radius, length)
x2 = x1 + 0.9 * length * math.sin(theta)
y2 = y1 - 0.9 * length * math.cos(theta)
lines.append('<line x1="{x1}" y1="{y1}" x2="{x2}" y2="{y2}" stroke-width="{width}"/>'.format(
x1=x1, x2=x2, y1=y1, y2=y2,
width=width))
return "\n".join(lines)
def make_spiral_octave_lines(root, length=75, root_radius=100, max_f=22100):
"""
Starting with the root note, draw the spiral on which
any higher frequency notes will sit. This way we can count
harmonics more easily
"""
width = 0.5
(x1, y1), _ = get_pos_theta_for_note(root, root, root_radius, length)
lines = []
step = int(root/50) or 1
for f in range(int(root), int(max_f), step):
(x2, y2), theta = get_pos_theta_for_note(f, root, root_radius, length)
lines.append('<line x1="{x1}" y1="{y1}" x2="{x2}" y2="{y2}" stroke-width="{width}"/>'.format(
x1=x1, x2=x2, y1=y1, y2=y2,
width=width))
x1, y1 = x2, y2
return "\n".join(lines)
rgb_colors = [0xFF0000, 0x00FF00, 0x0000FF]
cym_colors = [0x00FFFF, 0xFF00FF, 0xFFFF00]
white = 0xFFFFFFFF
black = 0xFF000000
# some QT specific stuff follows:
import PySide.QtCore
import PySide.QtGui
import PySide.QtSvg
def render_svg(svg, qp):
r = PySide.QtSvg.QSvgRenderer()
w,h = ui_opts.virtual_size
ret = '<svg xmlns="http://www.w3.org/2000/svg" version="1.1" width="{}" height="{}">'.format(w, h)
ret += svg
ret += "</svg>"
# print ret
r.load(PySide.QtCore.QByteArray(ret))
assert r.isValid()
r.render(qp)
def raw_svg_to_group(svg, color, extras=""):
ret = '<g stroke="#{0:06X}" fill="#{0:06X}" {1}>'.format(
color & 0xFFFFFF, extras)
ret += svg
ret += "</g>"
return ret
from uidecorators.qt_framework import Framework
def notes_changed(*args):
mode = "inverse" if ui_opts.get_inverse() else "normal"
qim = PySide.QtGui.QImage(d.widget().width(), d.widget().height(), PySide.QtGui.QImage.Format.Format_ARGB32)
qp = PySide.QtGui.QPainter(qim)
qp.setRenderHint(qp.Antialiasing)
qp.setRenderHint(qp.SmoothPixmapTransform)
if mode == "inverse":
#qim.fill(white)
qp.setCompositionMode(qp.CompositionMode.CompositionMode_Darken)
colors = cym_colors
default_foreground = black
default_background = white
mode = "darken"
else:
#qim.fill(black)
qp.setCompositionMode(qp.CompositionMode.CompositionMode_Lighten)
colors = rgb_colors
default_foreground = white
default_background = black
mode = "lighten"
default_foreground = 0x888888
root = w.get_root_frequency()
all_svgs=[]
num_octaves = math.log(max_frequency / root, 2)
# let's scale note height and width with number of octaves we're drawing
note_length = 400.0 / num_octaves
note_width = 500 / 2**num_octaves
# we'll set the background with a svg rect
svg = raw_svg_to_group('<rect width="1500" height="1500" />', default_background)
all_svgs.append(svg)
for check, notes in [(ui_opts.get_show_just_notes, just_intonation_notes),
(ui_opts.get_show_ET_notes, equal_temperament_notes)]:
if check():
overlay = make_note_lines(
root,
{i: f * root for i, f in zip(twelve_tone_names, notes)},
0.5, 600)
svg = raw_svg_to_group(overlay, default_foreground)
all_svgs.append(svg)
if ui_opts.get_show_base_spiral():
overlay = make_spiral_octave_lines(root, length=note_length)
svg = raw_svg_to_group(overlay, default_foreground)
all_svgs.append(svg)
theta = 0
width, height = ui_opts.virtual_size
for notegroup, col in zip(w.notes, colors):
notegrp_svg = make_spiral_lines_from_notes(
root, notegroup, length=note_length, stroke_width_scale=note_width)
notegrp_svg += '<circle r="{}" cx="{}" cy="{}"/>'.format(
width / 30.0, width / 10.0 + width / 45.0 * math.sin(theta),
width / 10.0 + width / 45.0 * math.cos(theta))
theta += math.pi*2.0/len(w.notes)
# convert to a svg group with some extra tags to make inkscape happy
svg = raw_svg_to_group(
notegrp_svg, col,
extras='inkscape:groupmode="layer" filter="url(#blend)"')
all_svgs.append(svg)
# finally we'll render tham all
for svg in all_svgs:
render_svg(svg, qp)
# try to save an inkscape compatible svg file.
# we can add a darken/lighten filter, and we need to add
# enable-background="new" to the svg header and the
# inkscape ns:
with open("out.svg", 'w') as f:
f.write('<svg xmlns="http://www.w3.org/2000/svg" '
'xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" '
'version="1.1" width="{}" height="{}" '
'enable-background="new">'.format(width, height))
f.write('<filter id="blend">'
'<feBlend in2="BackgroundImage" mode="{0}" />'
'</filter>'.format(mode))
f.write("\n".join(all_svgs))
f.write("</svg>")
d.widget().setPixmap(PySide.QtGui.QPixmap.fromImage(qim))
# qim.save("out.png", 'PNG')
qp = None # we have to make sure qim is deleted before QPainter?
if __name__=="__main__":
w=Waveform()
play_waveform(w)
if use_ui:
ui_opts = UIOptions()
f = Framework()
f.get_main_window().resize(800,600)
d=PySide.QtGui.QDockWidget("Note Visualization")
d.setWidget(PySide.QtGui.QLabel())
f.get_main_window().addDockWidget(PySide.QtCore.Qt.RightDockWidgetArea, d)
# play notes is threaded, so we need to call notes_changed from the
# ui thread.
w.on_notes_changed.append(lambda: f.run_on_ui_thread(notes_changed))
f.display_widgets([f.get_obj_widget(w), f.get_obj_widget(ui_opts)])
f.close()
|
|
import os
from keen.client import KeenClient
from keen.exceptions import InvalidEnvironmentError
__author__ = 'dkador'
_client = None
project_id = None
write_key = None
read_key = None
master_key = None
base_url = None
def _initialize_client_from_environment():
''' Initialize a KeenCLient instance using environment variables. '''
global _client, project_id, write_key, read_key, master_key, base_url
if _client is None:
# check environment for project ID and keys
project_id = project_id or os.environ.get("KEEN_PROJECT_ID")
write_key = write_key or os.environ.get("KEEN_WRITE_KEY")
read_key = read_key or os.environ.get("KEEN_READ_KEY")
master_key = master_key or os.environ.get("KEEN_MASTER_KEY")
base_url = base_url or os.environ.get("KEEN_BASE_URL")
if not project_id:
raise InvalidEnvironmentError("Please set the KEEN_PROJECT_ID environment variable or set keen.project_id!")
_client = KeenClient(project_id,
write_key=write_key,
read_key=read_key,
master_key=master_key,
base_url=base_url)
def add_event(event_collection, body, timestamp=None):
""" Adds an event.
Depending on the persistence strategy of the client,
this will either result in the event being uploaded to Keen
immediately or will result in saving the event to some local cache.
:param event_collection: the name of the collection to insert the
event to
:param body: dict, the body of the event to insert the event to
:param timestamp: datetime, optional, the timestamp of the event
"""
_initialize_client_from_environment()
_client.add_event(event_collection, body, timestamp=timestamp)
def add_events(events):
""" Adds a batch of events.
Depending on the persistence strategy of the client,
this will either result in the event being uploaded to Keen
immediately or will result in saving the event to some local cache.
:param events: dictionary of events
"""
_initialize_client_from_environment()
_client.add_events(events)
def generate_image_beacon(event_collection, body, timestamp=None):
""" Generates an image beacon URL.
:param event_collection: the name of the collection to insert the
event to
:param body: dict, the body of the event to insert the event to
:param timestamp: datetime, optional, the timestamp of the event
"""
_initialize_client_from_environment()
return _client.generate_image_beacon(event_collection, body, timestamp=timestamp)
def count(event_collection, timeframe=None, timezone=None, interval=None, filters=None, group_by=None, max_age=None):
""" Performs a count query
Counts the number of events that meet the given criteria.
:param event_collection: string, the name of the collection to query
:param timeframe: string or dict, the timeframe in which the events
happened example: "previous_7_days"
:param timezone: int, the timezone you'd like to use for the timeframe
and interval in seconds
:param interval: string, the time interval used for measuring data over
time example: "daily"
:param filters: array of dict, contains the filters you'd like to apply to the data
example: [{"property_name":"device", "operator":"eq", "property_value":"iPhone"}]
:param group_by: string or array of strings, the name(s) of the properties you would
like to group you results by. example: "customer.id" or ["browser","operating_system"]
:param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're
willing to trade for increased query performance, in seconds
"""
_initialize_client_from_environment()
return _client.count(event_collection=event_collection, timeframe=timeframe, timezone=timezone,
interval=interval, filters=filters, group_by=group_by, max_age=max_age)
def sum(event_collection, target_property, timeframe=None, timezone=None, interval=None, filters=None,
group_by=None, max_age=None):
""" Performs a sum query
Adds the values of a target property for events that meet the given criteria.
:param event_collection: string, the name of the collection to query
:param target_property: string, the name of the event property you would like use
:param timeframe: string or dict, the timeframe in which the events
happened example: "previous_7_days"
:param timezone: int, the timezone you'd like to use for the timeframe
and interval in seconds
:param interval: string, the time interval used for measuring data over
time example: "daily"
:param filters: array of dict, contains the filters you'd like to apply to the data
example: [{"property_name":"device", "operator":"eq", "property_value":"iPhone"}]
:param group_by: string or array of strings, the name(s) of the properties you would
like to group you results by. example: "customer.id" or ["browser","operating_system"]
:param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're
willing to trade for increased query performance, in seconds
"""
_initialize_client_from_environment()
return _client.sum(event_collection=event_collection, timeframe=timeframe, timezone=timezone,
interval=interval, filters=filters, group_by=group_by,
target_property=target_property, max_age=max_age)
def minimum(event_collection, target_property, timeframe=None, timezone=None, interval=None, filters=None,
group_by=None, max_age=None):
""" Performs a minimum query
Finds the minimum value of a target property for events that meet the given criteria.
:param event_collection: string, the name of the collection to query
:param target_property: string, the name of the event property you would like use
:param timeframe: string or dict, the timeframe in which the events
happened example: "previous_7_days"
:param timezone: int, the timezone you'd like to use for the timeframe
and interval in seconds
:param interval: string, the time interval used for measuring data over
time example: "daily"
:param filters: array of dict, contains the filters you'd like to apply to the data
example: [{"property_name":"device", "operator":"eq", "property_value":"iPhone"}]
:param group_by: string or array of strings, the name(s) of the properties you would
like to group you results by. example: "customer.id" or ["browser","operating_system"]
:param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're
willing to trade for increased query performance, in seconds
"""
_initialize_client_from_environment()
return _client.minimum(event_collection=event_collection, timeframe=timeframe, timezone=timezone,
interval=interval, filters=filters, group_by=group_by,
target_property=target_property, max_age=max_age)
def maximum(event_collection, target_property, timeframe=None, timezone=None, interval=None, filters=None,
group_by=None, max_age=None):
""" Performs a maximum query
Finds the maximum value of a target property for events that meet the given criteria.
:param event_collection: string, the name of the collection to query
:param target_property: string, the name of the event property you would like use
:param timeframe: string or dict, the timeframe in which the events
happened example: "previous_7_days"
:param timezone: int, the timezone you'd like to use for the timeframe
and interval in seconds
:param interval: string, the time interval used for measuring data over
time example: "daily"
:param filters: array of dict, contains the filters you'd like to apply to the data
example: [{"property_name":"device", "operator":"eq", "property_value":"iPhone"}]
:param group_by: string or array of strings, the name(s) of the properties you would
like to group you results by. example: "customer.id" or ["browser","operating_system"]
:param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're
willing to trade for increased query performance, in seconds
"""
_initialize_client_from_environment()
return _client.maximum(event_collection=event_collection, timeframe=timeframe, timezone=timezone,
interval=interval, filters=filters, group_by=group_by,
target_property=target_property, max_age=max_age)
def average(event_collection, target_property, timeframe=None, timezone=None, interval=None, filters=None,
group_by=None, max_age=None):
""" Performs a average query
Finds the average of a target property for events that meet the given criteria.
:param event_collection: string, the name of the collection to query
:param target_property: string, the name of the event property you would like use
:param timeframe: string or dict, the timeframe in which the events
happened example: "previous_7_days"
:param timezone: int, the timezone you'd like to use for the timeframe
and interval in seconds
:param interval: string, the time interval used for measuring data over
time example: "daily"
:param filters: array of dict, contains the filters you'd like to apply to the data
example: [{"property_name":"device", "operator":"eq", "property_value":"iPhone"}]
:param group_by: string or array of strings, the name(s) of the properties you would
like to group you results by. example: "customer.id" or ["browser","operating_system"]
:param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're
willing to trade for increased query performance, in seconds
"""
_initialize_client_from_environment()
return _client.average(event_collection=event_collection, timeframe=timeframe, timezone=timezone,
interval=interval, filters=filters, group_by=group_by,
target_property=target_property, max_age=max_age)
def median(event_collection, target_property, timeframe=None, timezone=None, interval=None, filters=None,
group_by=None, max_age=None):
""" Performs a median query
Finds the median of a target property for events that meet the given criteria.
:param event_collection: string, the name of the collection to query
:param target_property: string, the name of the event property you would like use
:param timeframe: string or dict, the timeframe in which the events
happened example: "previous_7_days"
:param timezone: int, the timezone you'd like to use for the timeframe
and interval in seconds
:param interval: string, the time interval used for measuring data over
time example: "daily"
:param filters: array of dict, contains the filters you'd like to apply to the data
example: [{"property_name":"device", "operator":"eq", "property_value":"iPhone"}]
:param group_by: string or array of strings, the name(s) of the properties you would
like to group you results by. example: "customer.id" or ["browser","operating_system"]
:param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're
willing to trade for increased query performance, in seconds
"""
_initialize_client_from_environment()
return _client.median(event_collection=event_collection, timeframe=timeframe, timezone=timezone,
interval=interval, filters=filters, group_by=group_by,
target_property=target_property, max_age=max_age)
def percentile(event_collection, target_property, percentile, timeframe=None, timezone=None, interval=None,
filters=None, group_by=None, max_age=None):
""" Performs a percentile query
Finds the percentile of a target property for events that meet the given criteria.
:param event_collection: string, the name of the collection to query
:param target_property: string, the name of the event property you would like use
:param percentile: float, the specific percentile you wish to calculate,
supporting 0-100 with two decimal places of precision for example, 99.99
:param timeframe: string or dict, the timeframe in which the events
happened example: "previous_7_days"
:param timezone: int, the timezone you'd like to use for the timeframe
and interval in seconds
:param interval: string, the time interval used for measuring data over
time example: "daily"
:param filters: array of dict, contains the filters you'd like to apply to the data
example: [{"property_name":"device", "operator":"eq", "property_value":"iPhone"}]
:param group_by: string or array of strings, the name(s) of the properties you would
like to group you results by. example: "customer.id" or ["browser","operating_system"]
:param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're
willing to trade for increased query performance, in seconds
"""
_initialize_client_from_environment()
return _client.percentile(
event_collection=event_collection,
timeframe=timeframe,
percentile=percentile,
timezone=timezone,
interval=interval,
filters=filters,
group_by=group_by,
target_property=target_property,
max_age=max_age,
)
def count_unique(event_collection, target_property, timeframe=None, timezone=None, interval=None,
filters=None, group_by=None, max_age=None):
""" Performs a count unique query
Counts the unique values of a target property for events that meet the given criteria.
:param event_collection: string, the name of the collection to query
:param target_property: string, the name of the event property you would like use
:param timeframe: string or dict, the timeframe in which the events
happened example: "previous_7_days"
:param timezone: int, the timezone you'd like to use for the timeframe
and interval in seconds
:param interval: string, the time interval used for measuring data over
time example: "daily"
:param filters: array of dict, contains the filters you'd like to apply to the data
example: [{"property_name":"device", "operator":"eq", "property_value":"iPhone"}]
:param group_by: string or array of strings, the name(s) of the properties you would
like to group you results by. example: "customer.id" or ["browser","operating_system"]
:param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're
willing to trade for increased query performance, in seconds
"""
_initialize_client_from_environment()
return _client.count_unique(event_collection=event_collection, timeframe=timeframe, timezone=timezone,
interval=interval, filters=filters, group_by=group_by,
target_property=target_property, max_age=max_age)
def select_unique(event_collection, target_property, timeframe=None, timezone=None, interval=None,
filters=None, group_by=None, max_age=None):
""" Performs a select unique query
Returns an array of the unique values of a target property for events that meet the given criteria.
:param event_collection: string, the name of the collection to query
:param target_property: string, the name of the event property you would like use
:param timeframe: string or dict, the timeframe in which the events
happened example: "previous_7_days"
:param timezone: int, the timezone you'd like to use for the timeframe
and interval in seconds
:param interval: string, the time interval used for measuring data over
time example: "daily"
:param filters: array of dict, contains the filters you'd like to apply to the data
example: [{"property_name":"device", "operator":"eq", "property_value":"iPhone"}]
:param group_by: string or array of strings, the name(s) of the properties you would
like to group you results by. example: "customer.id" or ["browser","operating_system"]
:param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're
willing to trade for increased query performance, in seconds
"""
_initialize_client_from_environment()
return _client.select_unique(event_collection=event_collection, timeframe=timeframe, timezone=timezone,
interval=interval, filters=filters, group_by=group_by,
target_property=target_property, max_age=max_age)
def extraction(event_collection, timeframe=None, timezone=None, filters=None, latest=None, email=None,
property_names=None):
""" Performs a data extraction
Returns either a JSON object of events or a response
indicating an email will be sent to you with data.
:param event_collection: string, the name of the collection to query
:param timeframe: string or dict, the timeframe in which the events
happened example: "previous_7_days"
:param timezone: int, the timezone you'd like to use for the timeframe
and interval in seconds
:param filters: array of dict, contains the filters you'd like to apply to the data
example: [{"property_name":"device", "operator":"eq", "property_value":"iPhone"}]
:param latest: int, the number of most recent records you'd like to return
:param email: string, optional string containing an email address to email results to
:param property_names: string or list of strings, used to limit the properties returned
"""
_initialize_client_from_environment()
return _client.extraction(event_collection=event_collection, timeframe=timeframe, timezone=timezone,
filters=filters, latest=latest, email=email, property_names=property_names)
def funnel(*args, **kwargs):
""" Performs a Funnel query
Returns an object containing the results for each step of the funnel.
:param steps: array of dictionaries, one for each step. example:
[{"event_collection":"signup","actor_property":"user.id"},
{"event_collection":"purchase","actor_property:"user.id"}]
:param timeframe: string or dict, the timeframe in which the events
happened example: "previous_7_days"
:param timezone: int, the timezone you'd like to use for the timeframe
and interval in seconds
:param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're
willing to trade for increased query performance, in seconds
"""
_initialize_client_from_environment()
return _client.funnel(*args, **kwargs)
def multi_analysis(event_collection, analyses, timeframe=None, interval=None,
timezone=None, filters=None, group_by=None, max_age=None):
""" Performs a multi-analysis query
Returns a dictionary of analysis results.
:param event_collection: string, the name of the collection to query
:param analyses: dict, the types of analyses you'd like to run. example:
{"total money made":{"analysis_type":"sum","target_property":"purchase.price",
"average price":{"analysis_type":"average","target_property":"purchase.price"}
:param timeframe: string or dict, the timeframe in which the events
happened example: "previous_7_days"
:param interval: string, the time interval used for measuring data over
time example: "daily"
:param timezone: int, the timezone you'd like to use for the timeframe
and interval in seconds
:param filters: array of dict, contains the filters you'd like to apply to the data
example: [{"property_name":"device", "operator":"eq", "property_value":"iPhone"}]
:param group_by: string or array of strings, the name(s) of the properties you would
like to group you results by. example: "customer.id" or ["browser","operating_system"]
:param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're
willing to trade for increased query performance, in seconds
"""
_initialize_client_from_environment()
return _client.multi_analysis(event_collection=event_collection, timeframe=timeframe,
interval=interval, timezone=timezone, filters=filters,
group_by=group_by, analyses=analyses, max_age=max_age)
def delete_events(*args, **kwargs):
""" Performs a delete for events.
Returns true upon success.
:param event_collection: string, the event collection from which event are being deleted
:param timeframe: string or dict, the timeframe in which the events
happened example: "previous_7_days"
:param timezone: int, the timezone you'd like to use for the timeframe
and interval in seconds
:param filters: array of dict, contains the filters you'd like to apply to the data
example: [{"property_name":"device", "operator":"eq", "property_value":"iPhone"}]
"""
_initialize_client_from_environment()
return _client.delete_events(*args, **kwargs)
def get_collection(*args, **kwargs):
""" Returns event collection schema
:param event_collection: string, the event collection from which schema is to be returned,
if left blank will return schema for all collections
"""
_initialize_client_from_environment()
return _client.get_collection(*args, **kwargs)
def get_all_collections():
""" Returns event collection schema for all events
"""
_initialize_client_from_environment()
return _client.get_all_collections()
|
|
"""Unit tests of cataloging managers."""
import pytest
from ..utilities.general import is_never_authz, is_no_authz, uses_cataloging, uses_filesystem_only
from dlkit.abstract_osid.osid import errors
from dlkit.abstract_osid.type.objects import TypeList as abc_type_list
from dlkit.primordium.id.primitives import Id
from dlkit.primordium.type.primitives import Type
from dlkit.runtime import PROXY_SESSION, proxy_example
from dlkit.runtime.managers import Runtime
REQUEST = proxy_example.SimpleRequest()
CONDITION = PROXY_SESSION.get_proxy_condition()
CONDITION.set_http_request(REQUEST)
PROXY = PROXY_SESSION.get_proxy(CONDITION)
DEFAULT_TYPE = Type(**{'identifier': 'DEFAULT', 'namespace': 'DEFAULT', 'authority': 'DEFAULT'})
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def cataloging_profile_class_fixture(request):
request.cls.service_config = request.param
request.cls.mgr = Runtime().get_service_manager(
'CATALOGING',
proxy=PROXY,
implementation=request.cls.service_config)
@pytest.fixture(scope="function")
def cataloging_profile_test_fixture(request):
pass
@pytest.mark.usefixtures("cataloging_profile_class_fixture", "cataloging_profile_test_fixture")
class TestCatalogingProfile(object):
"""Tests for CatalogingProfile"""
def test_supports_catalog_lookup(self):
"""Tests supports_catalog_lookup"""
assert isinstance(self.mgr.supports_catalog_lookup(), bool)
def test_supports_catalog_query(self):
"""Tests supports_catalog_query"""
assert isinstance(self.mgr.supports_catalog_query(), bool)
def test_supports_catalog_admin(self):
"""Tests supports_catalog_admin"""
assert isinstance(self.mgr.supports_catalog_admin(), bool)
def test_supports_catalog_hierarchy(self):
"""Tests supports_catalog_hierarchy"""
assert isinstance(self.mgr.supports_catalog_hierarchy(), bool)
def test_supports_catalog_hierarchy_design(self):
"""Tests supports_catalog_hierarchy_design"""
assert isinstance(self.mgr.supports_catalog_hierarchy_design(), bool)
def test_get_catalog_record_types(self):
"""Tests get_catalog_record_types"""
assert isinstance(self.mgr.get_catalog_record_types(), abc_type_list)
def test_get_catalog_search_record_types(self):
"""Tests get_catalog_search_record_types"""
assert isinstance(self.mgr.get_catalog_search_record_types(), abc_type_list)
class NotificationReceiver(object):
# Implemented from resource.ResourceManager
pass
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def cataloging_manager_class_fixture(request):
# Implemented from resource.ResourceManager
request.cls.service_config = request.param
request.cls.svc_mgr = Runtime().get_service_manager(
'CATALOGING',
implementation=request.cls.service_config)
if not is_never_authz(request.cls.service_config):
create_form = request.cls.svc_mgr.get_catalog_form_for_create([])
create_form.display_name = 'Test Catalog'
create_form.description = 'Test Catalog for cataloging manager tests'
catalog = request.cls.svc_mgr.create_catalog(create_form)
request.cls.catalog_id = catalog.get_id()
request.cls.receiver = NotificationReceiver()
else:
request.cls.catalog_id = Id('resource.Resource%3A000000000000000000000000%40DLKIT.MIT.EDU')
def class_tear_down():
if not is_never_authz(request.cls.service_config):
request.cls.svc_mgr.delete_catalog(request.cls.catalog_id)
request.addfinalizer(class_tear_down)
@pytest.fixture(scope="function")
def cataloging_manager_test_fixture(request):
# Implemented from resource.ResourceManager
pass
@pytest.mark.usefixtures("cataloging_manager_class_fixture", "cataloging_manager_test_fixture")
class TestCatalogingManager(object):
"""Tests for CatalogingManager"""
def test_get_catalog_lookup_session(self):
"""Tests get_catalog_lookup_session"""
# From tests_templates/resource.py::ResourceManager::get_resource_admin_session_template
if self.svc_mgr.supports_catalog_lookup():
self.svc_mgr.get_catalog_lookup_session()
def test_get_catalog_query_session(self):
"""Tests get_catalog_query_session"""
# From tests_templates/resource.py::ResourceManager::get_resource_admin_session_template
if self.svc_mgr.supports_catalog_query():
self.svc_mgr.get_catalog_query_session()
def test_get_catalog_admin_session(self):
"""Tests get_catalog_admin_session"""
# From tests_templates/resource.py::ResourceManager::get_resource_admin_session_template
if self.svc_mgr.supports_catalog_admin():
self.svc_mgr.get_catalog_admin_session()
def test_get_catalog_hierarchy_session(self):
"""Tests get_catalog_hierarchy_session"""
# From tests_templates/resource.py::ResourceManager::get_resource_admin_session_template
if self.svc_mgr.supports_catalog_hierarchy():
self.svc_mgr.get_catalog_hierarchy_session()
def test_get_catalog_hierarchy_design_session(self):
"""Tests get_catalog_hierarchy_design_session"""
# From tests_templates/resource.py::ResourceManager::get_resource_admin_session_template
if self.svc_mgr.supports_catalog_hierarchy_design():
self.svc_mgr.get_catalog_hierarchy_design_session()
def test_get_cataloging_rules_manager(self):
"""Tests get_cataloging_rules_manager"""
# From tests_templates/resource.py::ResourceManager::get_resource_batch_manager_template
if self.svc_mgr.supports_cataloging_rules():
self.svc_mgr.get_cataloging_rules_manager()
class NotificationReceiver(object):
# Implemented from resource.ResourceProxyManager
pass
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def cataloging_proxy_manager_class_fixture(request):
# Implemented from resource.ResourceProxyManager
request.cls.service_config = request.param
request.cls.svc_mgr = Runtime().get_service_manager(
'CATALOGING',
proxy=PROXY,
implementation=request.cls.service_config)
if not is_never_authz(request.cls.service_config):
create_form = request.cls.svc_mgr.get_catalog_form_for_create([])
create_form.display_name = 'Test Catalog'
create_form.description = 'Test Catalog for cataloging proxy manager tests'
catalog = request.cls.svc_mgr.create_catalog(create_form)
request.cls.catalog_id = catalog.get_id()
else:
request.cls.catalog_id = Id('resource.Resource%3A000000000000000000000000%40DLKIT.MIT.EDU')
request.cls.receiver = NotificationReceiver()
def class_tear_down():
if not is_never_authz(request.cls.service_config):
request.cls.svc_mgr.delete_catalog(request.cls.catalog_id)
request.addfinalizer(class_tear_down)
@pytest.fixture(scope="function")
def cataloging_proxy_manager_test_fixture(request):
# Implemented from resource.ResourceProxyManager
pass
@pytest.mark.usefixtures("cataloging_proxy_manager_class_fixture", "cataloging_proxy_manager_test_fixture")
class TestCatalogingProxyManager(object):
"""Tests for CatalogingProxyManager"""
def test_get_catalog_lookup_session(self):
"""Tests get_catalog_lookup_session"""
# From tests_templates/resource.py::ResourceProxyManager::get_resource_admin_session_template
if self.svc_mgr.supports_catalog_lookup():
self.svc_mgr.get_catalog_lookup_session(PROXY)
with pytest.raises(errors.NullArgument):
self.svc_mgr.get_catalog_lookup_session()
def test_get_catalog_query_session(self):
"""Tests get_catalog_query_session"""
# From tests_templates/resource.py::ResourceProxyManager::get_resource_admin_session_template
if self.svc_mgr.supports_catalog_query():
self.svc_mgr.get_catalog_query_session(PROXY)
with pytest.raises(errors.NullArgument):
self.svc_mgr.get_catalog_query_session()
def test_get_catalog_admin_session(self):
"""Tests get_catalog_admin_session"""
# From tests_templates/resource.py::ResourceProxyManager::get_resource_admin_session_template
if self.svc_mgr.supports_catalog_admin():
self.svc_mgr.get_catalog_admin_session(PROXY)
with pytest.raises(errors.NullArgument):
self.svc_mgr.get_catalog_admin_session()
def test_get_catalog_hierarchy_session(self):
"""Tests get_catalog_hierarchy_session"""
# From tests_templates/resource.py::ResourceProxyManager::get_resource_admin_session_template
if self.svc_mgr.supports_catalog_hierarchy():
self.svc_mgr.get_catalog_hierarchy_session(PROXY)
with pytest.raises(errors.NullArgument):
self.svc_mgr.get_catalog_hierarchy_session()
def test_get_catalog_hierarchy_design_session(self):
"""Tests get_catalog_hierarchy_design_session"""
# From tests_templates/resource.py::ResourceProxyManager::get_resource_admin_session_template
if self.svc_mgr.supports_catalog_hierarchy_design():
self.svc_mgr.get_catalog_hierarchy_design_session(PROXY)
with pytest.raises(errors.NullArgument):
self.svc_mgr.get_catalog_hierarchy_design_session()
def test_get_cataloging_rules_proxy_manager(self):
"""Tests get_cataloging_rules_proxy_manager"""
# From tests_templates/resource.py::ResourceProxyManager::get_resource_batch_proxy_manager_template
if self.svc_mgr.supports_cataloging_rules():
self.svc_mgr.get_cataloging_rules_proxy_manager()
|
|
import datetime
import itertools
import unittest
import tkp.db
from tkp.db import associations as dbass
from tkp.db import general as dbgen
from tkp.db import nulldetections as dbnd
from tkp.db.orm import DataSet
from tkp.testutil import db_subs
from tkp.testutil.decorators import requires_database
@requires_database()
class TestForcedFit(unittest.TestCase):
"""
These tests will check whether null detections are picked up across bands
"""
def shortDescription(self):
"""http://www.saltycrane.com/blog/2012/07/how-prevent-nose-unittest-using-docstring-when-verbosity-2/"""
return None
def tearDown(self):
tkp.db.rollback()
def test_nullDetection(self):
data = {'description': "null detection:" + self._testMethodName}
dataset = DataSet(data=data)
# Three timesteps, each with 4 bands -> 12 images.
taustart_tss = [datetime.datetime(2013, 8, 1),
datetime.datetime(2013, 9, 1),
datetime.datetime(2013, 10, 1)]
freq_effs = [124, 149, 156, 185]
freq_effs = [f * 1e6 for f in freq_effs]
im_params = db_subs.generate_timespaced_dbimages_data(len(freq_effs)
* len(taustart_tss))
timestamps = itertools.repeat(taustart_tss, len(freq_effs))
for im, freq, ts in zip(im_params, itertools.cycle(freq_effs),
itertools.chain.from_iterable(zip(*timestamps))):
im['freq_eff'] = freq
im['taustart_ts'] = ts
images = []
for im in im_params:
image = tkp.db.Image(dataset=dataset, data=im)
images.append(image)
# Arbitrary parameters, except that they fall inside our image.
src0 = db_subs.example_extractedsource_tuple(ra=122.5, dec=9.5)
src1 = db_subs.example_extractedsource_tuple(ra=123.5, dec=10.5)
# Group images in blocks of 4, corresponding to all frequency bands at
# a given timestep.
for images in zip(*(iter(images),) * len(freq_effs)):
for image in images:
# The first source is only seen at timestep 0, band 0.
# The second source is only seen at timestep 1, band 3.
if (image.taustart_ts == taustart_tss[0] and
image.freq_eff == freq_effs[0]):
dbgen.insert_extracted_sources(image.id, [src0], 'blind')
elif (image.taustart_ts == taustart_tss[1] and
image.freq_eff == freq_effs[3]):
dbgen.insert_extracted_sources(image.id, [src1], 'blind')
else:
pass
for image in images:
dbass.associate_extracted_sources(image.id, deRuiter_r=5.68,
new_source_sigma_margin=3)
nd_ids_pos = dbnd.get_nulldetections(image.id)
# The null_detections are the positional inputs for the forced
# fits, which on their turn return additional parameters,
# e.g. from src0, src1
if image.taustart_ts == taustart_tss[0]:
# There are no null detections at the first timestep
self.assertEqual(len(nd_ids_pos), 0)
elif image.taustart_ts == taustart_tss[1]:
# src0 is a null detection at the second timestep
self.assertEqual(len(nd_ids_pos), 1)
dbgen.insert_extracted_sources(image.id, [src0], 'ff_nd',
ff_runcat_ids=[ids for ids, ra, decl in nd_ids_pos])
else:
# All other images have two null detections.
self.assertEqual(len(nd_ids_pos), 2)
dbgen.insert_extracted_sources(image.id, [src0, src1],
'ff_nd',
ff_runcat_ids=[ids for ids, ra, decl in nd_ids_pos])
# And here we have to associate the null detections with the
# runcat sources...
dbnd.associate_nd(image.id)
query = """\
SELECT id
,datapoints
FROM runningcatalog r
WHERE dataset = %(dataset_id)s
ORDER BY datapoints
"""
cursor = tkp.db.execute(query, {'dataset_id': dataset.id})
result = cursor.fetchall()
# We should have two runningcatalog sources, with a datapoint for
# every image in which the sources were seen.
self.assertEqual(len(result), 2)
query = """\
SELECT r.id
,rf.band
,rf.f_datapoints
FROM runningcatalog r
,runningcatalog_flux rf
WHERE r.dataset = %(dataset_id)s
AND rf.runcat = r.id
ORDER BY r.id
,rf.band
"""
cursor = tkp.db.execute(query, {'dataset_id': dataset.id})
result = cursor.fetchall()
# We should have eight runningcatalog_flux entries,
# one for every source in every band, i.e. 2 x 4.
# The number of flux datapoints differ per source, though
self.assertEqual(len(result), 8)
# Source 1: inserted into timestep 0, band 0.
# Force-fits in band 0 images at next timesteps,
# so 1+2 for band 0.
self.assertEqual(result[0][2], 3)
# Source 1: inserted into timestep 0, band 0.
# Force-fits in bands 1,2,3 images at next timesteps.
# so 0+2 for bands 1,2,3.
self.assertEqual(result[1][2], 2)
self.assertEqual(result[2][2], 2)
self.assertEqual(result[3][2], 2)
# Source 2: inserted into timestep 1, band 3.
# Force-fits in band 0,1,2 images at next timestep,
# so 1 for band 0,1,2
self.assertEqual(result[4][2], 1)
self.assertEqual(result[5][2], 1)
self.assertEqual(result[6][2], 1)
# Source 2: inserted into timestep 1, band 3.
# Force-fit in band 3 image at next timestep,
# so 1+1 for band 3
self.assertEqual(result[7][2], 2)
# We should also have two lightcurves for both sources,
# where source 1 has 3 datapoints in band0 (t1,t2,t3)
# and 2 datapoints for the other three bands (t2,t3).
# Source 2 has two datapoints for band3 (t2,t3) and
# one for the other three bands (t3).
query = """\
SELECT a.runcat
,a.xtrsrc
,a.type
,i.band
,i.taustart_ts
FROM assocxtrsource a
,extractedsource x
,image i
WHERE a.xtrsrc = x.id
AND x.image = i.id
AND i.dataset = %(dataset_id)s
ORDER BY a.runcat
,i.band
,i.taustart_ts
"""
cursor = tkp.db.execute(query, {'dataset_id': dataset.id})
result = cursor.fetchall()
# 9 + 5 entries for source 1 and 2 resp.
self.assertEqual(len(result), 14)
# The individual light-curve datapoints
# Source1: new at t1, band0
self.assertEqual(result[0][2], 4)
self.assertEqual(result[0][4], taustart_tss[0])
# Source1: Forced fit at t2, same band
self.assertEqual(result[1][2], 7)
self.assertEqual(result[1][3], result[0][3])
self.assertEqual(result[1][4], taustart_tss[1])
# Source1: Forced fit at t3, same band
self.assertEqual(result[2][2], 7)
self.assertEqual(result[2][3], result[1][3])
self.assertEqual(result[2][4], taustart_tss[2])
# Source1: Forced fit at t2, band1
self.assertEqual(result[3][2], 7)
self.assertTrue(result[3][3] > result[2][3])
self.assertEqual(result[3][4], taustart_tss[1])
# Source1: Forced fit at t3, band1
self.assertEqual(result[4][2], 7)
self.assertEqual(result[4][3], result[3][3])
self.assertEqual(result[4][4], taustart_tss[2])
# Source1: Forced fit at t2, band2
self.assertEqual(result[5][2], 7)
self.assertTrue(result[5][3] > result[4][3])
self.assertEqual(result[5][4], taustart_tss[1])
# Source1: Forced fit at t3, band2
self.assertEqual(result[6][2], 7)
self.assertEqual(result[6][3], result[5][3])
self.assertEqual(result[6][4], taustart_tss[2])
# Source1: Forced fit at t2, band3
self.assertEqual(result[7][2], 7)
self.assertTrue(result[7][3] > result[6][3])
self.assertEqual(result[7][4], taustart_tss[1])
# Source1: Forced fit at t3, band3
self.assertEqual(result[8][2], 7)
self.assertEqual(result[8][3], result[7][3])
self.assertEqual(result[8][4], taustart_tss[2])
# Source2: Forced fit at t3, band0
self.assertEqual(result[9][2], 7)
self.assertEqual(result[9][3], result[0][3])
self.assertEqual(result[9][4], taustart_tss[2])
# Source2: Forced fit at t3, band1
self.assertEqual(result[10][2], 7)
self.assertTrue(result[10][3] > result[9][3])
self.assertEqual(result[10][4], taustart_tss[2])
# Source2: Forced fit at t3, band2
self.assertEqual(result[11][2], 7)
self.assertTrue(result[11][3] > result[10][3])
self.assertEqual(result[11][4], taustart_tss[2])
# Source2: new at t2, band3
self.assertEqual(result[12][2], 4)
self.assertTrue(result[12][3] > result[11][3])
self.assertEqual(result[12][4], taustart_tss[1])
# Source2: Forced fit at t3, band3
self.assertEqual(result[13][2], 7)
self.assertEqual(result[13][3], result[12][3])
self.assertEqual(result[13][4], taustart_tss[2])
def test_1to1_nullDetection(self):
"""
This tests that the two sources are associated if they were
detected at different timesteps. The positions are used in
the next test as well.
"""
data = {'description': "null detection:" + self._testMethodName}
dataset = DataSet(data=data)
# Two timesteps, just 1 band -> 2 images.
taustart_tss = [datetime.datetime(2013, 8, 1),
datetime.datetime(2013, 9, 1)]
freq_effs = [124]
freq_effs = [f * 1e6 for f in freq_effs]
im_params = db_subs.generate_timespaced_dbimages_data(len(freq_effs)
* len(taustart_tss))
timestamps = itertools.repeat(taustart_tss, len(freq_effs))
for im, freq, ts in zip(im_params, itertools.cycle(freq_effs),
itertools.chain.from_iterable(zip(*timestamps))):
im['freq_eff'] = freq
im['taustart_ts'] = ts
images = []
for im in im_params:
image = tkp.db.Image(dataset=dataset, data=im)
images.append(image)
# Arbitrary parameters, except that they fall inside our image
# and close together (see next test)
src0 = db_subs.example_extractedsource_tuple(ra=122.985, dec=10.5)
src1 = db_subs.example_extractedsource_tuple(ra=123.015, dec=10.5)
# Group images in blocks of 4, corresponding to all frequency bands at
# a given timestep.
for images in zip(*(iter(images),) * len(freq_effs)):
for image in images:
# The sources are only seen at timestep 0
if (image.taustart_ts == taustart_tss[0]):
dbgen.insert_extracted_sources(image.id, [src0], 'blind')
elif (image.taustart_ts == taustart_tss[1]):
dbgen.insert_extracted_sources(image.id, [src1], 'blind')
else:
pass
for image in images:
dbass.associate_extracted_sources(image.id, deRuiter_r=5.68,
new_source_sigma_margin=3)
query = """\
SELECT id
,datapoints
FROM runningcatalog r
WHERE dataset = %(dataset_id)s
ORDER BY datapoints
"""
cursor = tkp.db.execute(query, {'dataset_id': dataset.id})
result = cursor.fetchall()
# We should have one runningcatalog sources, with two datapoints
# for the images in which the sources were seen.
self.assertEqual(len(result), 1)
self.assertEqual(result[0][1], 2)
query = """\
SELECT r.id
,rf.band
,rf.f_datapoints
FROM runningcatalog r
,runningcatalog_flux rf
WHERE r.dataset = %(dataset_id)s
AND rf.runcat = r.id
ORDER BY r.id
,rf.band
"""
cursor = tkp.db.execute(query, {'dataset_id': dataset.id})
result = cursor.fetchall()
# We should have one runningcatalog_flux entry,
# where the source has two flux datapoints
self.assertEqual(len(result), 1)
self.assertEqual(result[0][2], 2)
def test_m2m_nullDetection(self):
"""
This tests that two sources (close-by to be associated if they were
detected at different timesteps) which are not seen in the next
image and thus have forced fits, will have separate light curves.
The postions are from the previous test.
"""
data = {'description': "null detection:" + self._testMethodName}
dataset = DataSet(data=data)
# Three timesteps, just 1 band -> 3 images.
taustart_tss = [datetime.datetime(2013, 8, 1),
datetime.datetime(2013, 9, 1),
datetime.datetime(2013, 10, 1)]
freq_effs = [124]
freq_effs = [f * 1e6 for f in freq_effs]
im_params = db_subs.generate_timespaced_dbimages_data(len(freq_effs)
* len(taustart_tss))
timestamps = itertools.repeat(taustart_tss, len(freq_effs))
for im, freq, ts in zip(im_params, itertools.cycle(freq_effs),
itertools.chain.from_iterable(zip(*timestamps))):
im['freq_eff'] = freq
im['taustart_ts'] = ts
images = []
for im in im_params:
image = tkp.db.Image(dataset=dataset, data=im)
images.append(image)
# Arbitrary parameters, except that they fall inside our image
# and close together (see previous test)
src0 = db_subs.example_extractedsource_tuple(ra=122.985, dec=10.5)
src1 = db_subs.example_extractedsource_tuple(ra=123.015, dec=10.5)
# Group images in blocks of 4, corresponding to all frequency bands at
# a given timestep.
for images in zip(*(iter(images),) * len(freq_effs)):
for image in images:
# The sources are only seen at timestep 0
if (image.taustart_ts == taustart_tss[0]):
dbgen.insert_extracted_sources(image.id, [src0,src1],
'blind')
else:
pass
for image in images:
dbass.associate_extracted_sources(image.id, deRuiter_r=5.68,
new_source_sigma_margin=3)
nd_ids_pos = dbnd.get_nulldetections(image.id)
# The null_detections are the positional inputs for the forced
# fits, which on their turn return additional parameters,
# e.g. from src0, src1
if image.taustart_ts == taustart_tss[0]:
# There are no null detections at the first timestep
self.assertEqual(len(nd_ids_pos), 0)
elif image.taustart_ts == taustart_tss[1]:
# src0 & src1 are null detections at the second timestep
self.assertEqual(len(nd_ids_pos), 2)
dbgen.insert_extracted_sources(image.id, [src0,src1],
'ff_nd',
ff_runcat_ids=[ids for ids, ra, decl in nd_ids_pos])
else:
# All other images have two null detections.
self.assertEqual(len(nd_ids_pos), 2)
dbgen.insert_extracted_sources(image.id, [src0, src1],
'ff_nd',
ff_runcat_ids=[ids for ids, ra, decl in nd_ids_pos])
# And here we have to associate the null detections with the
# runcat sources...
dbnd.associate_nd(image.id)
query = """\
SELECT id
,datapoints
FROM runningcatalog r
WHERE dataset = %(dataset_id)s
ORDER BY datapoints
"""
cursor = tkp.db.execute(query, {'dataset_id': dataset.id})
result = cursor.fetchall()
# We should have two runningcatalog sources, with a datapoint for
# every image in which the sources were seen.
self.assertEqual(len(result), 2)
query = """\
SELECT r.id
,rf.band
,rf.f_datapoints
FROM runningcatalog r
,runningcatalog_flux rf
WHERE r.dataset = %(dataset_id)s
AND rf.runcat = r.id
ORDER BY r.id
,rf.band
"""
cursor = tkp.db.execute(query, {'dataset_id': dataset.id})
result = cursor.fetchall()
# We should have two runningcatalog_flux entries,
# one for every source in the band, i.e. 2 x 1.
self.assertEqual(len(result), 2)
# Source 0: inserted into timestep 0.
# Force-fits in images at next timesteps,
# so 1+2 for band 0.
self.assertEqual(result[0][2], 3)
# Source 1: inserted into timestep 0
# Force-fits in images at next timesteps.
# so 1+2 for bands 0
self.assertEqual(result[1][2], 3)
#self.assertEqual(result[2][2], 2)
#self.assertEqual(result[3][2], 2)
# We should also have two lightcurves for both sources,
# where source 1 has 3 datapoints in band0 (t1,t2,t3).
# Source 2 also has 3 datapoints for band0 (t1,t2,t3).
query = """\
SELECT a.runcat
,a.xtrsrc
,a.type
,i.band
,i.taustart_ts
FROM assocxtrsource a
,extractedsource x
,image i
WHERE a.xtrsrc = x.id
AND x.image = i.id
AND i.dataset = %(dataset_id)s
ORDER BY a.runcat
,i.band
,i.taustart_ts
"""
cursor = tkp.db.execute(query, {'dataset_id': dataset.id})
result = cursor.fetchall()
# 3 + 3 entries for source 0 and 1 resp.
self.assertEqual(len(result), 6)
# The individual light-curve datapoints
# Source1: new at t1, band0
self.assertEqual(result[0][2], 4)
self.assertEqual(result[0][4], taustart_tss[0])
# Source1: Forced fit at t2, same band
self.assertEqual(result[1][2], 7)
self.assertEqual(result[1][3], result[0][3])
self.assertEqual(result[1][4], taustart_tss[1])
# Source1: Forced fit at t3, same band
self.assertEqual(result[2][2], 7)
self.assertEqual(result[2][3], result[1][3])
self.assertEqual(result[2][4], taustart_tss[2])
# Source2: new at t1, band0
self.assertEqual(result[3][2], 4)
self.assertEqual(result[3][3], result[1][3])
self.assertEqual(result[3][4], taustart_tss[0])
# Source2: Forced fit at t2, band0
self.assertEqual(result[4][2], 7)
self.assertEqual(result[4][3], result[3][3])
self.assertEqual(result[4][4], taustart_tss[1])
# Source2: Forced fit at t3, band0
self.assertEqual(result[5][2], 7)
self.assertEqual(result[5][3], result[4][3])
self.assertEqual(result[5][4], taustart_tss[2])
|
|
# coding=utf-8
# This is a modified example originally from The Google AI Language Team
# Authors and The HuggingFace Inc. team.
# Modified by Richard Liaw.
# Copyright 2018 The Google AI Language Team Authors,
# The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE (
Bert, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa)."""
import argparse
import logging
import json
import os
import time
from filelock import FileLock
from dataclasses import dataclass, field
from typing import Optional
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler
from tqdm import trange
import torch.distributed as dist
from transformers import (MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
ALL_PRETRAINED_CONFIG_ARCHIVE_MAP, AdamW, AutoConfig,
AutoModelForSequenceClassification, AutoTokenizer,
get_linear_schedule_with_warmup, HfArgumentParser,
TrainingArguments)
from transformers import glue_output_modes as output_modes
from transformers import glue_processors as processors
import ray
from ray.util.sgd.torch import TrainingOperator
from ray.util.sgd import TorchTrainer
from ray.util.sgd.torch.examples.transformers.utils import (
evaluate, load_and_cache_examples, save_and_evaluate_checkpoints)
try:
from apex import amp
except ImportError:
amp = None
MODEL_CONFIG_CLASSES = list(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
ALL_MODELS = sum(
(tuple(key for key in ALL_PRETRAINED_CONFIG_ARCHIVE_MAP.keys()
if key.startswith(conf.model_type))
for conf in MODEL_CONFIG_CLASSES),
(),
)
logger = logging.getLogger(__name__)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
def announce_training(args, dataset_len, t_total):
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", dataset_len)
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per device = %d",
args.per_device_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accum) = %d",
args.per_device_train_batch_size * args.gradient_accumulation_steps *
args.num_workers,
)
logger.info(" Gradient Accumulation steps = %d",
args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
class TransformerOperator(TrainingOperator):
def setup(self, config):
self.args = args = config["args"]
start = time.time()
self.tokenizer = AutoTokenizer.from_pretrained(
args.tokenizer_name
if args.tokenizer_name else args.model_name_or_path,
cache_dir=args.cache_dir if args.cache_dir else None,
)
logger.info(f"tokenizer instantiation time: {time.time() - start}")
# Load data.
train_dataset = load_and_cache_examples(
args, args.task_name, self.tokenizer, evaluate=False)
train_sampler = RandomSampler(
train_dataset) if not dist.is_initialized() else None
train_loader = DataLoader(
train_dataset,
sampler=train_sampler,
batch_size=args.per_device_train_batch_size)
# Create model.
with FileLock(os.path.expanduser("~/.download.lock")):
processor = processors[args.task_name]()
label_list = processor.get_labels()
num_labels = len(label_list)
model_config = AutoConfig.from_pretrained(
args.config_name
if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = AutoModelForSequenceClassification.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=model_config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
# Create optimizer.
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p for n, p in model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": args.weight_decay,
},
{
"params": [
p for n, p in model.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0
},
]
optimizer = AdamW(
optimizer_grouped_parameters,
lr=args.learning_rate,
eps=args.adam_epsilon)
# Register components.
self.model, self.optimizer = self.register(
models=model,
optimizers=optimizer,
train_loader=train_loader,
validation_loader=None)
self.train_data_len = len(self.train_loader)
self._warmup_scheduler = get_linear_schedule_with_warmup(
self.optimizer,
num_warmup_steps=args.warmup_steps,
num_training_steps=self.calculate_t_total())
self._global_step = 0
announce_training(args, self.train_data_len, self.calculate_t_total())
def train_batch(self, batch, batch_info=None):
args = self.args
model = self.model
optimizer = self.optimizer
step = batch_info["batch_idx"]
model.train()
batch = tuple(t.to(self.device) for t in batch)
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3]
}
if args.model_type != "distilbert":
# XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
inputs["token_type_ids"] = (batch[2] if args.model_type in [
"bert", "xlnet", "albert"
] else None)
outputs = model(**inputs)
# model outputs are always tuple in transformers (see doc)
loss = outputs[0]
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
batch_loss = loss.item()
# last step in epoch but step is always smaller
# than gradient_accumulation_steps
ending = (self.train_data_len <= args.gradient_accumulation_steps
and (step + 1) == self.train_data_len)
if (step + 1) % args.gradient_accumulation_steps == 0 or ending:
if args.fp16:
torch.nn.utils.clip_grad_norm_(
amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(),
args.max_grad_norm)
self.optimizer.step()
self._warmup_scheduler.step() # Update learning rate schedule
model.zero_grad()
self._global_step += 1
learning_rate_scalar = self._warmup_scheduler.get_lr()[0]
return {"learning_rate": learning_rate_scalar, "loss": batch_loss}
def calculate_t_total(self):
args = self.args
grad_accum_steps = args.gradient_accumulation_steps
train_data_len = len(self.train_loader)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (
train_data_len // grad_accum_steps) + 1
else:
t_total = (
train_data_len // grad_accum_steps * args.num_train_epochs)
return t_total
@dataclass
class ModelArguments:
"""Arguments pertaining to model/config/tokenizer."""
model_name_or_path: str = field(
metadata=dict(help="Path to pre-trained model or shortcut name "
"selected in the list: " + ", ".join(ALL_MODELS)))
model_type: str = field(
metadata=dict(help="Model type selected "
"in the list: " + ", ".join(MODEL_TYPES)))
config_name: Optional[str] = field(
default=None,
metadata=dict(
help="Pretrained config name or path if not the same as model_name"
))
tokenizer_name: Optional[str] = field(
default=None,
metadata=dict(help="Pretrained tokenizer name or path "
"if not the same as model_name"))
cache_dir: Optional[str] = field(
default=None,
metadata=dict(help="Where do you want to store the pre-trained "
"models downloaded from s3"))
@dataclass
class DataProcessingArguments:
task_name: str = field(
metadata=dict(help="The name of the task to train selected "
"in the list: " + ", ".join(processors.keys())))
data_dir: str = field(
metadata=dict(help="The input data dir. Should contain "
"the .tsv files (or other data files) for the task."))
max_seq_length: int = field(
default=128,
metadata=dict(help="The maximum total input sequence length "
"after tokenization. Sequences longer "
"than this will be truncated, sequences "
"shorter will be padded."))
overwrite_cache: bool = field(
default=False,
metadata={"help": "Overwrite the cached training and evaluation sets"})
@dataclass
class RayArguments:
num_workers: int = field(
default=1,
metadata={"help": "Number of data-parallel workers to use."})
address: str = field(
default=None,
metadata={"help": "Address of the Ray cluster to connect to."})
def main():
parser = HfArgumentParser((ModelArguments, DataProcessingArguments,
TrainingArguments, RayArguments))
all_args = parser.parse_args_into_dataclasses()
model_args, dataprocessing_args, training_args, ray_args = all_args
# For now, let's merge all the sets of args into one,
# but soon, we'll keep distinct sets of args, with a
# cleaner separation of concerns.
args = argparse.Namespace(**vars(model_args), **vars(dataprocessing_args),
**vars(training_args), **vars(ray_args))
if (os.path.exists(args.output_dir) and os.listdir(args.output_dir)
and args.do_train and not args.overwrite_output_dir):
raise ValueError(
"Output directory ({}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome.".format(args.output_dir))
use_gpu = torch.cuda.is_available() and not args.no_cuda
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError(f"Task not found: {args.task_name}")
args.output_mode = output_modes[args.task_name]
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO)
logger.info("Training/evaluation parameters %s", args)
ray.init(address=args.address)
# Training
trainer = TorchTrainer(
training_operator_cls=TransformerOperator,
use_fp16=args.fp16,
apex_args={"opt_level": args.fp16_opt_level},
num_workers=args.num_workers,
use_gpu=use_gpu,
use_tqdm=True,
config={"args": args})
args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tokenizer = trainer.get_local_operator().tokenizer
local_model = trainer.get_model()
epochs_trained = 0
train_iterator = trange(
epochs_trained,
int(args.num_train_epochs),
desc="Epoch",
)
trainer.apply_all_workers(lambda: set_seed(args))
if args.do_train:
for _ in train_iterator:
stats = trainer.train()
print("Training stats:", stats)
logs = evaluate(args, local_model, tokenizer)
print(json.dumps(logs))
# Post-training validation
save_and_evaluate_checkpoints(args, local_model, tokenizer)
if __name__ == "__main__":
main()
|
|
# -*- coding: UTF-8 -*-
from django import forms
from django import http
from django.conf import settings
from django.conf.urls import url, patterns
from django.contrib import admin
from django.core import urlresolvers
from assopy import admin as aadmin
from assopy import models as amodels
from conference import admin as cadmin
from conference import models as cmodels
from conference import dataaccess as cdata
from conference import settings as csettings
from p3 import models
from p3 import dataaccess
from p3 import forms as pforms
# Add support for translations for some form or admin fields
from django.utils.translation import ugettext_lazy as _
_TICKET_CONFERENCE_COPY_FIELDS = ('shirt_size', 'python_experience', 'diet', 'tagline', 'days', 'badge_image')
def ticketConferenceForm():
class _(forms.ModelForm):
class Meta:
model = models.TicketConference
fields = _().fields
class TicketConferenceForm(forms.ModelForm):
shirt_size = fields['shirt_size']
python_experience = fields['python_experience']
diet = fields['diet']
tagline = fields['tagline']
days = fields['days']
badge_image = fields['badge_image']
class Meta:
model = cmodels.Ticket
def __init__(self, *args, **kw):
if 'instance' in kw:
o = kw['instance']
try:
p3c = o.p3_conference
except models.TicketConference.DoesNotExist:
p3c = None
if p3c:
initial = kw.pop('initial', {})
for k in _TICKET_CONFERENCE_COPY_FIELDS:
initial[k] = getattr(p3c, k)
kw['initial'] = initial
return super(TicketConferenceForm, self).__init__(*args, **kw)
return TicketConferenceForm
class TicketConferenceAdmin(cadmin.TicketAdmin):
list_display = cadmin.TicketAdmin.list_display + ('_order', '_assigned', '_tagline',)
list_filter = cadmin.TicketAdmin.list_filter + ('orderitem__order___complete',)
form = ticketConferenceForm()
class Media:
js = ('p5/j/jquery-flot/jquery.flot.js',)
def _order(self, o):
return o.orderitem.order.code
def _assigned(self, o):
if o.p3_conference:
return o.p3_conference.assigned_to
else:
return ''
def _tagline(self, o):
try:
p3c = o.p3_conference
except models.TicketConference.DoesNotExist:
return ''
html = p3c.tagline
if p3c.badge_image:
i = ['<img src="%s" width="24" />' % p3c.badge_image.url] * p3c.python_experience
html += '<br />' + ' '.join(i)
return html
_tagline.allow_tags = True
def save_model(self, request, obj, form, change):
obj.save()
try:
p3c = obj.p3_conference
except models.TicketConference.DoesNotExist:
p3c = None
if p3c is None:
p3c = models.TicketConference(ticket=obj)
data = form.cleaned_data
for k in _TICKET_CONFERENCE_COPY_FIELDS:
setattr(p3c, k, data.get(k))
p3c.save()
def changelist_view(self, request, extra_context=None):
if not request.GET:
q = request.GET.copy()
q['fare__conference'] = settings.CONFERENCE_CONFERENCE
q['fare__ticket_type__exact'] = 'conference'
q['orderitem__order___complete__exact'] = 1
request.GET = q
request.META['QUERY_STRING'] = request.GET.urlencode()
return super(TicketConferenceAdmin,self).changelist_view(request, extra_context=extra_context)
def queryset(self, request):
qs = super(TicketConferenceAdmin, self).queryset(request)
qs = qs.select_related('orderitem__order', 'p3_conference', 'user', 'fare', )
return qs
def get_urls(self):
urls = super(TicketConferenceAdmin, self).get_urls()
my_urls = patterns('',
url(r'^stats/data/$', self.admin_site.admin_view(self.stats_data), name='p3-ticket-stats-data'),
)
return my_urls + urls
def stats_data(self, request):
from conference.views import json_dumps
from django.db.models import Q
from collections import defaultdict
from microblog.models import PostContent
import datetime
import random
conferences = cmodels.Conference.objects \
.exclude(code__startswith='ep') \
.order_by('conference_start')
output = {}
for c in conferences:
tickets = cmodels.Ticket.objects \
.filter(fare__conference=c) \
.filter(Q(orderitem__order___complete=True) | Q(orderitem__order__method__in=('bank', 'admin'))) \
.select_related('fare', 'orderitem__order')
data = {
'conference': defaultdict(lambda: 0),
'partner': defaultdict(lambda: 0),
'event': defaultdict(lambda: 0),
'other': defaultdict(lambda: 0),
}
for t in tickets:
tt = t.fare.ticket_type
date = t.orderitem.order.created.date()
offset = date - c.conference_start
data[tt][offset.days] += 1
for k, v in data.items():
data[k] = sorted(v.items())
dlimit = datetime.date(c.conference_start.year, 1, 1)
deadlines = cmodels.DeadlineContent.objects \
.filter(language='en') \
.filter(deadline__date__lte=c.conference_start, deadline__date__gte=dlimit) \
.select_related('deadline') \
.order_by('deadline__date')
markers = [((d.deadline.date - c.conference_start).days, 'CAL: ' + (d.headline or d.body)) for d in
deadlines]
posts = PostContent.objects \
.filter(language='en') \
.filter(post__date__lte=c.conference_start, post__date__gte=dlimit) \
.filter(post__status='P') \
.select_related('post') \
.order_by('post__date')
markers += [((d.post.date.date() - c.conference_start).days, 'BLOG: ' + d.headline) for d in posts]
output[c.code] = {
'data': data,
'markers': markers,
}
plot_data = output
def accumulate_tickets(list_of_lists):
x = 0
b = []
for el in list_of_lists:
b.append([el[0], el[1] + x])
x += el[1]
return b
series = []
for k, v in plot_data.iteritems():
for k1, v1 in plot_data[k]['data'].iteritems():
series.append({'name': k + '-' + k1, 'type': 'area', 'data': accumulate_tickets(v1)})
markers = []
for k, v in plot_data.iteritems():
markers.append({'labelOptions': {
'shape': 'connector',
'align': 'right',
'justify': 'false',
'crop': 'true',
'style': {
'fontSize': '0.8em',
'textOutline': '1px white'
}},
'labels': [
{'point': {'xAxis': 0, 'yAxis': 0, 'x': el[0], 'y': random.randint(0, 600)},
'text': k + '-' + el[1]} for el in plot_data[k]['markers']
]})
lines = []
for k, v in plot_data.items():
for el in plot_data[k]['markers']:
lines.append({'color': '#e6e6e6', 'value': el[0], 'width': 1})
output = {'series': series, 'lines': lines, 'markers': markers}
return http.HttpResponse(json_dumps(output), 'text/javascript')
admin.site.unregister(cmodels.Ticket)
admin.site.register(cmodels.Ticket, TicketConferenceAdmin)
class SpeakerAdmin(cadmin.SpeakerAdmin):
def queryset(self, request):
# XXX: waiting to upgrade to django 1.4, I'm implementing
# this bad hack filter to keep only speakers of current conference.
qs = super(SpeakerAdmin, self).queryset(request)
qs = qs.filter(user__in=(
cmodels.TalkSpeaker.objects \
.filter(talk__conference=settings.CONFERENCE_CONFERENCE) \
.values('speaker')
))
return qs
def get_paginator(self, request, queryset, per_page, orphans=0, allow_empty_first_page=True):
sids = queryset.values_list('user', flat=True)
profiles = dataaccess.profiles_data(sids)
self._profiles = dict(zip(sids, profiles))
return super(SpeakerAdmin, self).get_paginator(request, queryset, per_page, orphans, allow_empty_first_page)
def _avatar(self, o):
return '<img src="%s" height="32" />' % (self._profiles[o.user_id]['image'],)
_avatar.allow_tags = True
admin.site.unregister(cmodels.Speaker)
admin.site.register(cmodels.Speaker, SpeakerAdmin)
from conference import forms as cforms
class TalkConferenceAdminForm(cadmin.TalkAdminForm):
def __init__(self, *args, **kwargs):
super(TalkConferenceAdminForm, self).__init__(*args, **kwargs)
self.fields['tags'].required = False
class TalkConferenceAdmin(cadmin.TalkAdmin):
multilingual_widget = cforms.MarkEditWidget
form = TalkConferenceAdminForm
admin.site.unregister(cmodels.Talk)
admin.site.register(cmodels.Talk, TalkConferenceAdmin)
class DonationAdmin(admin.ModelAdmin):
list_display = ('_name', 'date', 'amount')
list_select_related = True
search_fields = ('user__user__first_name', 'user__user__last_name', 'user__user__email')
date_hierarchy = 'date'
def _name(self, o):
return o.user.name()
_name.short_description = 'name'
_name.admin_order_field = 'user__user__first_name'
admin.site.register(models.Donation, DonationAdmin)
class HotelBookingAdmin(admin.ModelAdmin):
list_display = ('conference', 'booking_start', 'booking_end', 'minimum_night')
admin.site.register(models.HotelBooking, HotelBookingAdmin)
class HotelRoomAdmin(admin.ModelAdmin):
list_display = ('_conference', 'room_type', 'quantity', 'amount',)
list_editable = ('quantity', 'amount',)
list_filter = ('booking__conference',)
list_select_related = True
def _conference(self, o):
return o.booking.conference_id
def get_urls(self):
urls = super(HotelRoomAdmin, self).get_urls()
my_urls = patterns('',
url(r'^tickets/$', self.admin_site.admin_view(self.ticket_list), name='p3-hotelrooms-tickets-data'),
)
return my_urls + urls
def ticket_list(self, request):
from conference.views import json_dumps
day_ix = int(request.GET['day'])
room_type = request.GET['type']
rdays = models.TicketRoom.objects.reserved_days()
day = rdays[day_ix]
qs = models.TicketRoom.objects.valid_tickets() \
.filter(room_type__room_type=room_type, checkin__lte=day, checkout__gte=day) \
.select_related('ticket__user', 'ticket__orderitem__order') \
.order_by('ticket__orderitem__order__created')
output = []
for row in qs:
user = row.ticket.user
order = row.ticket.orderitem.order
name = u'{0} {1}'.format(user.first_name, user.last_name)
if row.ticket.name and row.ticket.name != name:
name = u'{0} ({1})'.format(row.ticket.name, name)
output.append({
'user': {
'id': user.id,
'name': name,
},
'order': {
'id': order.id,
'code': order.code,
'method': order.method,
'complete': order._complete,
},
'period': (row.checkin, row.checkout, row.checkout == day),
})
return http.HttpResponse(json_dumps(output), 'text/javascript')
admin.site.register(models.HotelRoom, HotelRoomAdmin)
class TicketRoomAdmin(admin.ModelAdmin):
list_display = ('_user', '_room_type', 'ticket_type', 'checkin', 'checkout', '_order_code', '_order_date', '_order_confirmed')
list_select_related = True
search_fields = ('ticket__user__first_name', 'ticket__user__last_name', 'ticket__user__email', 'ticket__orderitem__order__code')
raw_id_fields = ('ticket', )
list_filter = ('room_type__room_type',)
def _user(self, o):
return o.ticket.user
def _room_type(self, o):
return o.room_type.get_room_type_display()
def _order_code(self, o):
return o.ticket.orderitem.order.code
def _order_date(self, o):
return o.ticket.orderitem.order.created
def _order_confirmed(self, o):
return o.ticket.orderitem.order._complete
_order_confirmed.boolean = True
admin.site.register(models.TicketRoom, TicketRoomAdmin)
# Admin Manager for P3Talk Class
class P3TalkAdminForm(forms.ModelForm):
class Meta:
model = models.P3Talk
fields = ['sub_community']
# talk_url = forms.URLField(_('Talk'), required=False)
talk_url = forms.ModelChoiceField(required=False, queryset=cmodels.Talk.objects.filter(
conference=settings.CONFERENCE_CONFERENCE))
def __init__(self, *args, **kwargs):
super(P3TalkAdminForm, self).__init__(*args, **kwargs)
if self.instance and self.instance.pk:
self.fields['talk_url'].widget = pforms.HTMLAnchorWidget(title=self.instance.talk.title)
self.fields['talk_url'].initial = str(urlresolvers.reverse('admin:conference_talk_change',
args=(self.instance.talk.pk,)))
class P3TalkAdmin(admin.ModelAdmin):
list_display = ('_title', '_conference', '_duration',
'sub_community', '_speakers',
'_status', '_slides', '_video')
list_filter = ('talk__conference', 'talk__status', 'sub_community')
ordering = ('-talk__conference', 'talk__title')
search_fields = ('talk__title',)
form = P3TalkAdminForm
def _title(self, obj):
return obj.talk.title
_title.short_description = 'Talk Title'
_title.admin_order_field = 'talk__title'
def _conference(self, obj):
return obj.talk.conference
_conference.short_description = 'Conference'
_conference.admin_order_field = 'talk__conference'
def _duration(self, obj):
return obj.talk.duration
_duration.short_description = 'Duration'
_duration.admin_order_field = 'talk__duration'
def _status(self, obj):
return obj.talk.status
_status.short_description = 'Status'
_status.admin_order_field = 'talk__conference'
def _slides(self, obj):
return bool(obj.talk.slides)
_slides.boolean = True
def _video(self, obj):
return bool(obj.talk.video_type) and \
(bool(obj.talk.video_url) or
bool(obj.talk.video_file))
_video.boolean = True
def get_paginator(self, request, queryset, per_page, orphans=0, allow_empty_first_page=True):
# Cloned
# from conference.admin.TalkAdmin
talks = cdata.talks_data(queryset.values_list('talk__id', flat=True))
self.cached_talks = dict([(x['id'], x) for x in talks])
sids = [s['id'] for t in talks for s in t['speakers']]
profiles = cdata.profiles_data(sids)
self.cached_profiles = dict([(x['id'], x) for x in profiles])
return super(P3TalkAdmin, self).get_paginator(request, queryset, per_page,
orphans, allow_empty_first_page)
def changelist_view(self, request, extra_context=None):
"""
Cloned (and adapted) from conference.admin.TalkAdmin
"""
if not request.GET.has_key('conference') and not request.GET.has_key('conference__exact'):
q = request.GET.copy()
q['talk__conference'] = csettings.CONFERENCE
request.GET = q
request.META['QUERY_STRING'] = request.GET.urlencode()
return super(P3TalkAdmin, self).changelist_view(request, extra_context=extra_context)
def _speakers(self, obj):
# Slightly adapted from conference.admin.TalkAdmin
# (basically the same method!)
# We may consider to remove this method from this model Admin
# or some Refactoring is needed to remove this useless duplication
data = self.cached_talks.get(obj.talk.id)
output = []
for x in data['speakers']:
args = {
'url': urlresolvers.reverse('admin:conference_speaker_change', args=(x['id'],)),
'name': x['name'],
'mail': self.cached_profiles[x['id']]['email'],
}
output.append(
'<a href="%(url)s">%(name)s</a> (<a href="mailto:%(mail)s">mail</a>)' % args)
return '<br />'.join(output)
_speakers.allow_tags = True
admin.site.register(models.P3Talk, P3TalkAdmin)
class InvoiceAdmin(aadmin.InvoiceAdmin):
"""
Specializzazione per gestire il download delle fatture generate con genro
"""
def _invoice(self, i):
if i.assopy_id:
fake = not i.payment_date
view = urlresolvers.reverse('genro-legacy-invoice', kwargs={'assopy_id': i.assopy_id})
return '<a href="%s">View</a> %s' % (view, '[Not payed]' if fake else '')
else:
return super(InvoiceAdmin, self)._invoice(i)
_invoice.allow_tags = True
_invoice.short_description = 'Download'
admin.site.unregister(amodels.Invoice)
admin.site.register(amodels.Invoice, InvoiceAdmin)
|
|
# coding: utf-8
from __future__ import unicode_literals
import unittest
import responses
from admitad.items import StatisticWebsites, StatisticCampaigns,\
StatisticDays, StatisticMonths, StatisticActions, StatisticSubIds,\
StatisticSources, StatisticKeywords
from admitad.constants import DEFAULT_PAGINATION_LIMIT, DEFAULT_PAGINATION_OFFSET
from admitad.tests.base import BaseTestCase
class StatisticWebsitesTestCase(BaseTestCase):
def test_get_statistic_websites_request(self):
with responses.RequestsMock() as resp:
resp.add(
resp.GET,
self.prepare_url(StatisticWebsites.URL, params={
'date_start': '01.01.2010',
'date_end': '01.02.2010',
'website': 10,
'campaign': 20,
'subid': '1234567890987654321',
'total': 200,
'order_by': ['cr'],
'limit': DEFAULT_PAGINATION_LIMIT,
'offset': DEFAULT_PAGINATION_OFFSET
}),
match_querystring=True,
json={'status': 'ok'},
status=200
)
result = self.client.StatisticWebsites.get(
date_start='01.01.2010',
date_end='01.02.2010',
website=10,
campaign=20,
subid='1234567890987654321',
total=200,
order_by=['cr']
)
self.assertIn('status', result)
class StatisticCampaignTestCase(BaseTestCase):
def test_get_statistic_campaign_request(self):
with responses.RequestsMock() as resp:
resp.add(
resp.GET,
self.prepare_url(StatisticCampaigns.URL, params={
'date_start': '01.01.2010',
'date_end': '01.02.2010',
'website': 10,
'campaign': 20,
'subid': '1234567890987654321',
'total': 200,
'order_by': ['cr'],
'limit': DEFAULT_PAGINATION_LIMIT,
'offset': DEFAULT_PAGINATION_OFFSET
}),
match_querystring=True,
json={'status': 'ok'},
status=200
)
result = self.client.StatisticCampaigns.get(
date_start='01.01.2010',
date_end='01.02.2010',
website=10,
campaign=20,
subid='1234567890987654321',
total=200,
order_by=['cr']
)
self.assertIn('status', result)
class StatisticDaysTestCase(BaseTestCase):
def test_get_statistic_days_request(self):
with responses.RequestsMock() as resp:
resp.add(
resp.GET,
self.prepare_url(StatisticDays.URL, params={
'date_start': '01.01.2010',
'date_end': '01.02.2010',
'website': 10,
'campaign': 20,
'subid': '1234567890987654321',
'total': 200,
'order_by': ['cr'],
'limit': DEFAULT_PAGINATION_LIMIT,
'offset': DEFAULT_PAGINATION_OFFSET
}),
match_querystring=True,
json={'status': 'ok'},
status=200
)
result = self.client.StatisticDays.get(
date_start='01.01.2010',
date_end='01.02.2010',
website=10,
campaign=20,
subid='1234567890987654321',
total=200,
order_by=['cr']
)
self.assertIn('status', result)
class StatisticMonthsTestCase(BaseTestCase):
def test_get_statistic_months_request(self):
with responses.RequestsMock() as resp:
resp.add(
resp.GET,
self.prepare_url(StatisticMonths.URL, params={
'date_start': '01.01.2010',
'date_end': '01.02.2010',
'website': 10,
'campaign': 20,
'subid': '1234567890987654321',
'total': 200,
'order_by': ['cr'],
'limit': DEFAULT_PAGINATION_LIMIT,
'offset': DEFAULT_PAGINATION_OFFSET
}),
match_querystring=True,
json={'status': 'ok'},
status=200
)
result = self.client.StatisticMonths.get(
date_start='01.01.2010',
date_end='01.02.2010',
website=10,
campaign=20,
subid='1234567890987654321',
total=200,
order_by=['cr']
)
self.assertIn('status', result)
class StatisticActionsTestCase(BaseTestCase):
def test_get_statistic_actions_request(self):
with responses.RequestsMock() as resp:
resp.add(
resp.GET,
self.prepare_url(StatisticActions.URL, params={
'date_start': '01.01.2010',
'date_end': '01.02.2010',
'closing_date_start': '01.01.2010',
'closing_date_end': '01.02.2010',
'status_updated_start': '01.01.2010 10:10:10',
'status_updated_end': '01.02.2010 10:10:10',
'website': 10,
'campaign': 20,
'subid': '1234567890987654321',
'subid1': '1234567890987654321',
'subid4': '1234567890987654321',
'status': 1,
'keyword': 'foo',
'action': 'lead',
'action_type': 'lead',
'action_id': 27,
'order_by': ['status'],
'limit': DEFAULT_PAGINATION_LIMIT,
'offset': DEFAULT_PAGINATION_OFFSET
}),
match_querystring=True,
json={'status': 'ok'},
status=200
)
result = self.client.StatisticActions.get(
date_start='01.01.2010',
date_end='01.02.2010',
closing_date_start='01.01.2010',
closing_date_end='01.02.2010',
status_updated_start='01.01.2010 10:10:10',
status_updated_end='01.02.2010 10:10:10',
website=10,
campaign=20,
subid='1234567890987654321',
subid1='1234567890987654321',
subid4='1234567890987654321',
status=1,
keyword='foo',
action='lead',
action_type='lead',
action_id=27,
order_by=['status']
)
self.assertIn('status', result)
class StatisticSubIdsTestCase(BaseTestCase):
def test_get_statistic_sub_ids_request(self):
with responses.RequestsMock() as resp:
resp.add(
resp.GET,
self.prepare_url(StatisticSubIds.URL, subid_number='', params={
'date_start': '01.01.2010',
'date_end': '01.02.2010',
'website': 10,
'campaign': 20,
'subid1': '123567',
'limit': DEFAULT_PAGINATION_LIMIT,
'offset': DEFAULT_PAGINATION_OFFSET
}),
match_querystring=True,
json={'status': 'ok'},
status=200
)
result = self.client.StatisticSubIds.get(
date_start='01.01.2010',
date_end='01.02.2010',
website=10,
campaign=20,
subid1='123567'
)
self.assertIn('status', result)
class StatisticSourcesTestCase(BaseTestCase):
def test_get_statistic_sources_request(self):
with responses.RequestsMock() as resp:
resp.add(
resp.GET,
self.prepare_url(StatisticSources.URL, params={
'date_start': '01.01.2010',
'date_end': '01.02.2010',
'website': 10,
'campaign': 22,
'limit': DEFAULT_PAGINATION_LIMIT,
'offset': DEFAULT_PAGINATION_OFFSET
}),
match_querystring=True,
json={'status': 'ok'},
status=200
)
result = self.client.StatisticSources.get(
date_start='01.01.2010',
date_end='01.02.2010',
website=10,
campaign=22
)
self.assertIn('status', result)
class StatisticKeywordsTestCase(BaseTestCase):
def test_get_statistic_keywords_request(self):
with responses.RequestsMock() as resp:
resp.add(
resp.GET,
self.prepare_url(StatisticKeywords.URL, params={
'date_start': '01.01.2010',
'date_end': '01.02.2010',
'website': 10,
'campaign': 20,
'source': 'g',
'order_by': ['cr', 'ecpc'],
'limit': DEFAULT_PAGINATION_LIMIT,
'offset': DEFAULT_PAGINATION_OFFSET
}),
match_querystring=True,
json={'status': 'ok'},
status=200
)
result = self.client.StatisticKeywords.get(
date_start='01.01.2010',
date_end='01.02.2010',
website=10,
campaign=20,
source='g',
order_by=['cr', 'ecpc']
)
self.assertIn('status', result)
if __name__ == '__main__':
unittest.main()
|
|
import re
from django.db.migrations import operations
from django.db.migrations.migration import Migration
from django.db.migrations.questioner import MigrationQuestioner
class MigrationAutodetector(object):
"""
Takes a pair of ProjectStates, and compares them to see what the
first would need doing to make it match the second (the second
usually being the project's current state).
Note that this naturally operates on entire projects at a time,
as it's likely that changes interact (for example, you can't
add a ForeignKey without having a migration to add the table it
depends on first). A user interface may offer single-app usage
if it wishes, with the caveat that it may not always be possible.
"""
def __init__(self, from_state, to_state, questioner=None):
self.from_state = from_state
self.to_state = to_state
self.questioner = questioner or MigrationQuestioner()
def changes(self, graph, trim_to_apps=None):
"""
Main entry point to produce a list of appliable changes.
Takes a graph to base names on and an optional set of apps
to try and restrict to (restriction is not guaranteed)
"""
changes = self._detect_changes()
changes = self._arrange_for_graph(changes, graph)
if trim_to_apps:
changes = self._trim_to_apps(changes, trim_to_apps)
return changes
def _detect_changes(self):
"""
Returns a dict of migration plans which will achieve the
change from from_state to to_state. The dict has app labels
as keys and a list of migrations as values.
The resulting migrations aren't specially named, but the names
do matter for dependencies inside the set.
"""
# We'll store migrations as lists by app names for now
self.migrations = {}
old_app_cache = self.from_state.render()
new_app_cache = self.to_state.render()
# Prepare lists of old/new model keys that we care about
# (i.e. ignoring proxy ones)
old_model_keys = [
(al, mn)
for al, mn in self.from_state.models.keys()
if not old_app_cache.get_model(al, mn)._meta.proxy
]
new_model_keys = [
(al, mn)
for al, mn in self.to_state.models.keys()
if not new_app_cache.get_model(al, mn)._meta.proxy
]
# Adding models. Phase 1 is adding models with no outward relationships.
added_models = set(new_model_keys) - set(old_model_keys)
pending_add = {}
for app_label, model_name in added_models:
model_state = self.to_state.models[app_label, model_name]
# Are there any relationships out from this model? if so, punt it to the next phase.
related_fields = []
for field in new_app_cache.get_model(app_label, model_name)._meta.fields:
if field.rel:
if field.rel.to:
related_fields.append((field.name, field.rel.to._meta.app_label.lower(), field.rel.to._meta.object_name.lower()))
if hasattr(field.rel, "through") and not field.rel.though._meta.auto_created:
related_fields.append((field.name, field.rel.through._meta.app_label.lower(), field.rel.through._meta.object_name.lower()))
if related_fields:
pending_add[app_label, model_name] = related_fields
else:
self.add_to_migration(
app_label,
operations.CreateModel(
name=model_state.name,
fields=model_state.fields,
options=model_state.options,
bases=model_state.bases,
)
)
# Phase 2 is progressively adding pending models, splitting up into two
# migrations if required.
pending_new_fks = []
while pending_add:
# Is there one we can add that has all dependencies satisfied?
satisfied = [(m, rf) for m, rf in pending_add.items() if all((al, mn) not in pending_add for f, al, mn in rf)]
if satisfied:
(app_label, model_name), related_fields = sorted(satisfied)[0]
model_state = self.to_state.models[app_label, model_name]
self.add_to_migration(
app_label,
operations.CreateModel(
name=model_state.name,
fields=model_state.fields,
options=model_state.options,
bases=model_state.bases,
)
)
for field_name, other_app_label, other_model_name in related_fields:
if app_label != other_app_label:
self.add_dependency(app_label, other_app_label)
del pending_add[app_label, model_name]
# Ah well, we'll need to split one. Pick deterministically.
else:
(app_label, model_name), related_fields = sorted(pending_add.items())[0]
model_state = self.to_state.models[app_label, model_name]
# Work out the fields that need splitting out
bad_fields = dict((f, (al, mn)) for f, al, mn in related_fields if (al, mn) in pending_add)
# Create the model, without those
self.add_to_migration(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[(n, f) for n, f in model_state.fields if n not in bad_fields],
options=model_state.options,
bases=model_state.bases,
)
)
# Add the bad fields to be made in a phase 3
for field_name, (other_app_label, other_model_name) in bad_fields.items():
pending_new_fks.append((app_label, model_name, field_name, other_app_label))
del pending_add[app_label, model_name]
# Phase 3 is adding the final set of FKs as separate new migrations
for app_label, model_name, field_name, other_app_label in pending_new_fks:
model_state = self.to_state.models[app_label, model_name]
self.add_to_migration(
app_label,
operations.AddField(
model_name=model_name,
name=field_name,
field=model_state.get_field_by_name(field_name),
),
new=True,
)
if app_label != other_app_label:
self.add_dependency(app_label, other_app_label)
# Removing models
removed_models = set(old_model_keys) - set(new_model_keys)
for app_label, model_name in removed_models:
model_state = self.from_state.models[app_label, model_name]
self.add_to_migration(
app_label,
operations.DeleteModel(
model_state.name,
)
)
# Changes within models
kept_models = set(old_model_keys).intersection(new_model_keys)
for app_label, model_name in kept_models:
old_model_state = self.from_state.models[app_label, model_name]
new_model_state = self.to_state.models[app_label, model_name]
# New fields
old_field_names = set(x for x, y in old_model_state.fields)
new_field_names = set(x for x, y in new_model_state.fields)
for field_name in new_field_names - old_field_names:
field = new_model_state.get_field_by_name(field_name)
# Scan to see if this is actually a rename!
field_dec = field.deconstruct()[1:]
found_rename = False
for removed_field_name in (old_field_names - new_field_names):
if old_model_state.get_field_by_name(removed_field_name).deconstruct()[1:] == field_dec:
if self.questioner.ask_rename(model_name, removed_field_name, field_name, field):
self.add_to_migration(
app_label,
operations.RenameField(
model_name=model_name,
old_name=removed_field_name,
new_name=field_name,
)
)
old_field_names.remove(removed_field_name)
new_field_names.remove(field_name)
found_rename = True
break
if found_rename:
continue
# You can't just add NOT NULL fields with no default
if not field.null and not field.has_default():
field = field.clone()
field.default = self.questioner.ask_not_null_addition(field_name, model_name)
self.add_to_migration(
app_label,
operations.AddField(
model_name=model_name,
name=field_name,
field=field,
preserve_default=False,
)
)
else:
self.add_to_migration(
app_label,
operations.AddField(
model_name=model_name,
name=field_name,
field=field,
)
)
# Old fields
for field_name in old_field_names - new_field_names:
self.add_to_migration(
app_label,
operations.RemoveField(
model_name=model_name,
name=field_name,
)
)
# The same fields
for field_name in old_field_names.intersection(new_field_names):
# Did the field change?
old_field_dec = old_model_state.get_field_by_name(field_name).deconstruct()
new_field_dec = new_model_state.get_field_by_name(field_name).deconstruct()
if old_field_dec != new_field_dec:
self.add_to_migration(
app_label,
operations.AlterField(
model_name=model_name,
name=field_name,
field=new_model_state.get_field_by_name(field_name),
)
)
# unique_together changes
if old_model_state.options.get("unique_together", set()) != new_model_state.options.get("unique_together", set()):
self.add_to_migration(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=new_model_state.options.get("unique_together", set()),
)
)
# Alright, now add internal dependencies
for app_label, migrations in self.migrations.items():
for m1, m2 in zip(migrations, migrations[1:]):
m2.dependencies.append((app_label, m1.name))
# Clean up dependencies
for app_label, migrations in self.migrations.items():
for migration in migrations:
migration.dependencies = list(set(migration.dependencies))
return self.migrations
def add_to_migration(self, app_label, operation, new=False):
migrations = self.migrations.setdefault(app_label, [])
if not migrations or new:
subclass = type("Migration", (Migration,), {"operations": [], "dependencies": []})
instance = subclass("auto_%i" % (len(migrations) + 1), app_label)
migrations.append(instance)
migrations[-1].operations.append(operation)
def add_dependency(self, app_label, other_app_label):
"""
Adds a dependency to app_label's newest migration on
other_app_label's latest migration.
"""
if self.migrations.get(other_app_label, []):
dependency = (other_app_label, self.migrations[other_app_label][-1].name)
else:
dependency = (other_app_label, "__first__")
self.migrations[app_label][-1].dependencies.append(dependency)
def _arrange_for_graph(self, changes, graph):
"""
Takes in a result from changes() and a MigrationGraph,
and fixes the names and dependencies of the changes so they
extend the graph from the leaf nodes for each app.
"""
leaves = graph.leaf_nodes()
name_map = {}
for app_label, migrations in list(changes.items()):
if not migrations:
continue
# Find the app label's current leaf node
app_leaf = None
for leaf in leaves:
if leaf[0] == app_label:
app_leaf = leaf
break
# Do they want an initial migration for this app?
if app_leaf is None and not self.questioner.ask_initial(app_label):
# They don't.
for migration in migrations:
name_map[(app_label, migration.name)] = (app_label, "__first__")
del changes[app_label]
# Work out the next number in the sequence
if app_leaf is None:
next_number = 1
else:
next_number = (self.parse_number(app_leaf[1]) or 0) + 1
# Name each migration
for i, migration in enumerate(migrations):
if i == 0 and app_leaf:
migration.dependencies.append(app_leaf)
if i == 0 and not app_leaf:
new_name = "0001_initial"
else:
new_name = "%04i_%s" % (next_number, self.suggest_name(migration.operations))
name_map[(app_label, migration.name)] = (app_label, new_name)
migration.name = new_name
# Now fix dependencies
for app_label, migrations in changes.items():
for migration in migrations:
migration.dependencies = [name_map.get(d, d) for d in migration.dependencies]
return changes
def _trim_to_apps(self, changes, app_labels):
"""
Takes changes from arrange_for_graph and set of app labels and
returns a modified set of changes which trims out as many migrations
that are not in app_labels as possible.
Note that some other migrations may still be present, as they may be
required dependencies.
"""
# Gather other app dependencies in a first pass
app_dependencies = {}
for app_label, migrations in changes.items():
for migration in migrations:
for dep_app_label, name in migration.dependencies:
app_dependencies.setdefault(app_label, set()).add(dep_app_label)
required_apps = set(app_labels)
# Keep resolving till there's no change
old_required_apps = None
while old_required_apps != required_apps:
old_required_apps = set(required_apps)
for app_label in list(required_apps):
required_apps.update(app_dependencies.get(app_label, set()))
# Remove all migrations that aren't needed
for app_label in list(changes.keys()):
if app_label not in required_apps:
del changes[app_label]
return changes
@classmethod
def suggest_name(cls, ops):
"""
Given a set of operations, suggests a name for the migration
they might represent. Names not guaranteed to be unique; they
must be prefixed by a number or date.
"""
if len(ops) == 1:
if isinstance(ops[0], operations.CreateModel):
return ops[0].name.lower()
elif isinstance(ops[0], operations.DeleteModel):
return "delete_%s" % ops[0].name.lower()
elif isinstance(ops[0], operations.AddField):
return "%s_%s" % (ops[0].model_name.lower(), ops[0].name.lower())
elif isinstance(ops[0], operations.RemoveField):
return "remove_%s_%s" % (ops[0].model_name.lower(), ops[0].name.lower())
elif all(isinstance(o, operations.CreateModel) for o in ops):
return "_".join(sorted(o.name.lower() for o in ops))
return "auto"
@classmethod
def parse_number(cls, name):
"""
Given a migration name, tries to extract a number from the
beginning of it. If no number found, returns None.
"""
if re.match(r"^\d+_", name):
return int(name.split("_")[0])
return None
|
|
"""
A Python script to convert excel files into JSON.
"""
import json
import re
import sys
import codecs
import os
import constants
from errors import PyXFormError
from xls2json_backends import xls_to_dict, csv_to_dict
# the following are the three sheet names that this program expects
SURVEY_SHEET = u"survey"
CHOICES = u"choices"
SETTINGS = u"settings"
CHOICES_SHEET_NAMES = [u"choices", u"choices and columns"]
COLUMNS = u"columns"
COLUMNS_SHEET = u"columns" #Not used
TYPES_SHEET = u"question types"
LIST_NAME = u"list name"
# Special reserved values for type column that allow the user to set
# the form's title or id.
SET_TITLE = u"set form title"
SET_ID = u"set form id"
SET_DEFAULT_LANG = u"set default language"
group_name_conversions = {
"looped group": u"repeat"
}
sheet_name_conversions = {
u"choices" : u"lists",
u"choices and columns" : u"lists",
u"columns" : u"lists"
}
def print_pyobj_to_json(pyobj, path):
fp = codecs.open(path, mode="w", encoding="utf-8")
json.dump(pyobj, fp=fp, ensure_ascii=False, indent=4)
fp.close()
#Actually I shouldn't separate this out because I will also want to apply various transformations to the sheet before and during validation.
def validate_spreadsheet(spreadsheet_dict):
"""
spreadsheet_dict -- a dealiased spreadsheet dictionary
I'm choosing to validate at the spreadsheet stage as opposed to the json or xform stage
because it will enable us to give the clearest feedback to the user. (i.e. say "row 19 has a duplicate name")
"""
if SURVEY_SHEET not in spreadsheet_dict:
raise PyXFormError("You must have a sheet named: " + SURVEY_SHEET)
#Check for duplicate names
name_set = set()
for sheet_name, dicts in spreadsheet_dict.items():
for dicty in dicts:
if constants.NAME not in dicty:
#TODO: Warn
continue
if dicty[constants.NAME] in name_set:
raise PyXFormError("Duplicate name: " + dicty[constants.NAME])
def group_dictionaries(spreadsheet_dict):
"""
For each row in the worksheet, group all keys that contain a
colon. So {"text:english": "hello", "text:french" :
"bonjour"} becomes {"text": {"english": "hello", "french" :
"bonjour"}.
"""
DICT_CHAR = u":"
for sheet_name, dicts in spreadsheet_dict.items():
for dicty in dicts:
groups = {}
for k, v in dicty.items():
l = k.split(DICT_CHAR)
if len(l) >= 2:
if l[0] not in groups:
groups[l[0]] = {}
groups[l[0]][DICT_CHAR.join(l[1:])] = v
del dicty[k]
for k, v in groups.items():
assert k not in dicty
dicty[k] = v
class SpreadsheetReader(object):
def __init__(self, path_or_file):
if isinstance(path_or_file, basestring):
self._file_object = None
path = path_or_file
else:
self._file_object = path_or_file
path = self._file_object.name
(filepath, filename) = os.path.split(path)
(shortname, extension) = os.path.splitext(filename)
self.filetype = None
if extension == ".xlsx":
raise PyXFormError("XLSX files are not supported at this time. Please save the spreadsheet as an XLS file (97).")
elif extension == ".xls":
self.filetype = "xls"
elif extension == ".csv":
self.filetype = "csv"
self._path = path
self._name = unicode(shortname)
self._print_name = unicode(shortname)
self._title = unicode(shortname)
self._id = unicode(shortname)
self._def_lang = unicode("English")
self._parse_input()
def _parse_input(self):
if self.filetype == None:
raise PyXFormError("File was not recognized")
elif self.filetype == "xls":
self._dict = xls_to_dict(self._file_object if self._file_object is not None else self._path)
elif self.filetype == "csv":
self._dict = csv_to_dict(self._file_object if self._file_object is not None else self._path)
self._sheet_names = self._dict.keys()
self._set_choices_and_columns_sheet_name()
self._strip_unicode_values()
self._fix_int_values()
group_dictionaries(self._dict)
def _set_choices_and_columns_sheet_name(self):
"""
If the xls file has a sheet with a name in CHOICES_SHEET_NAMES
_lists_sheet_name is set to it.
"""
sheet_names = self._dict.keys()
self._lists_sheet_name = None
for sheet_name in sheet_names:
if sheet_name in CHOICES_SHEET_NAMES:
self._lists_sheet_name = sheet_name
def _strip_unicode_values(self):
for sheet_name, dicts in self._dict.items():
for d in dicts:
for k, v in d.items():
if type(v) == unicode:
d[k] = v.strip()
def _fix_int_values(self):
"""
Excel only has floats, but we really want integer values to be
ints.
"""
for sheet_name, dicts in self._dict.items():
for d in dicts:
for k, v in d.items():
if type(v) == float and v == int(v):
d[k] = int(v)
def to_json_dict(self):
return self._dict
#TODO: Make sure the unicode chars don't show up
def print_json_to_file(self, filename=""):
if not filename:
filename = self._path[:-4] + ".json"
print_pyobj_to_json(self.to_json_dict(), filename)
#Aliases
yes_no_conversions = {
"yes": "true()",
"Yes": "true()",
"YES": "true()",
"true": "true()",
"True": "true()",
"TRUE": "true()",
"no": "false()",
"No": "false()",
"NO": "false()",
"false": "false()",
"False": "false()",
"FALSE": "false()"
}
control_conversions = {
u"group" : u"group",
u"lgroup" : u"group",
u"repeat": u"repeat",
u"loop": u"repeat",
u"looped group": u"repeat"
}
def dealias_headers(dict_array):
"""
Copies dict_array so this isn't super efficient.
"""
#TODO: Check on bind prefix.
column_header_conversions = {
u"constraint_message" : u"constraint message",
u"read_only" : u"read only",
u"select_one" : u"select one",
u"select_multiple" : u"select multiple",
u"list_name" : u"list name"
}
out_dict_array = list()
for row in dict_array:
out_row = dict()
for key in row.keys():
if key in column_header_conversions.keys():
out_row[column_header_conversions[key]] = row[key]
else:
out_row[key] = row[key]
out_dict_array.append(out_row)
return out_dict_array
class ParseQuestionException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class SurveyReader(SpreadsheetReader):
def __init__(self, path):
super(SurveyReader, self).__init__(path)
self._setup_survey()
def _setup_survey(self):
"""
Does some parsing on the survey sheet.
I think this should probably go somewhere else so all the parsing happens in the same place.
"""
self._process_questions()
self._construct_choice_lists()
self._insert_lists()
self._save_settings()
self._organize_sections()
def _save_settings(self):
"""
sets _settings to the settings worksheet.
"""
# the excel reader gives a list of dicts, one dict for each
# row after the headers, we're only going to use the first
# row.
self._settings = self._dict.get(u"settings", [{}])[0]
def _process_questions(self):
"""
A question is dictionary representing a row in the survey worksheet where the keys are the column headers.
This function does some light parsing on them, for example it will:
remove disabled questions, set the survey title and id, break apart select and group statements.
"""
new_question_list = list()
self._dict[SURVEY_SHEET] = dealias_headers(self._dict[SURVEY_SHEET])
for question in self._dict[SURVEY_SHEET]:
if u"type" not in question:
continue
#Disabled should probably be first so the attributes below can be disabled.
if u"disabled" in question:
disabled = question[u"disabled"]
if disabled in yes_no_conversions:
disabled = yes_no_conversions[disabled]
if disabled == 'true()':
continue
#TODO: These should be on the settings sheet... I'm not sure if we need to support them being on the survey sheet as well
# Except default lang, I don't know what to do with that.
if question[constants.TYPE] == SET_TITLE:
if not question[constants.NAME].strip().find(" ") == -1:
raise PyXFormError("Form title must not include any spaces", question[constants.NAME])
self._title = question[constants.NAME]
continue
if question[constants.TYPE] == SET_ID:
#TODO: Can any name cell ever contain spaces? Move this up if not
if not question[constants.NAME].strip().find(" ") == -1:
raise PyXFormError("Form id must not include any spaces", question[constants.NAME])
self._id = question[constants.NAME]
continue
if question[constants.TYPE] == SET_DEFAULT_LANG:
self._def_lang = question[constants.NAME]#We need to hold onto this because it is used when generating itext elements
continue
new_question_list.append(self._process_question_type(question))
self._dict[SURVEY_SHEET] = new_question_list
#Make sure form name and ID are properly set:
if self._id.find(" ") != -1:
raise PyXFormError("Form id must not include any spaces", self._id)
if self._name.find(" ") != -1:
self._name = self._id
def _process_question_type(self, question):
question_type = question[constants.TYPE]
question_type.strip()
question_type = re.sub(r"\s+", " ", question_type) #Remove double spaces?
try:
return self._prepare_multiple_choice_question(question, question_type)
except ParseQuestionException:
try:
return self._prepare_begin_loop(question, question_type)
except ParseQuestionException as e:
#print e.value #just for debug, maybe this should print to a logfile
#raise PyXFormError("Unsupported syntax: '%s'" % question_type)
return question
def _prepare_multiple_choice_question(self, question, question_type):
"""
Parse a multple choice question
Throws ParseQuestionException
Returns the passed in reference to the question object
"""
selectCommands = {#Old commands
"select all that apply from" : u"select all that apply",
"select one from" : u"select one",
#New commands
"select_one" : u"select one",
"select_multiple" : u"select all that apply" }
select_regexp = r"^(?P<select_command>(" + '|'.join(selectCommands.keys()) + r")) (?P<list_name>\S+)( (?P<specify_other>or specify other))?$"
select_parse = re.search(select_regexp, question_type)
if select_parse:
parse_dict = select_parse.groupdict()
if parse_dict["select_command"]:
select_type = selectCommands[parse_dict["select_command"]]
list_name = parse_dict["list_name"] #TODO: should check that this is valid at some point
#TODO: specify_other is not in the new spec
specify_other = ("specify_other" in parse_dict and parse_dict["specify_other"]) or (" or specify other" in parse_dict and parse_dict[" or specify other"]) #old version
question[constants.TYPE] = select_type
if specify_other:
question[constants.TYPE] += " or specify other"
question[CHOICES] = list_name
return question
raise ParseQuestionException("")
def _prepare_begin_loop(self, q, question_type):
m = re.search(r"^(?P<type>begin loop) over (?P<list_name>\S+)$", question_type)
if not m:
raise ParseQuestionException("Regex search returned None")
#raise PyXFormError("unsupported loop syntax:" + question_type)
assert COLUMNS not in q
d = m.groupdict()
q[COLUMNS] = d["list_name"]
q[constants.TYPE] = d["type"]
return q
def _construct_choice_lists(self):
"""
Each choice has a list name associated with it. Go through the
list of choices, grouping all the choices by their list name.
"""
if self._lists_sheet_name is None:
return
choice_list = self._dict[self._lists_sheet_name]
choices = {}
for choice in choice_list:
try:
#TODO: decide whether there should be an underscore in list_name so we can get rid of this.
list_name_string_wo_underscore = re.sub(" ", "_", LIST_NAME) if LIST_NAME not in choice else LIST_NAME
list_name = choice.pop(list_name_string_wo_underscore)
if list_name in choices:
choices[list_name].append(choice)
else:
choices[list_name] = [choice]
except KeyError:
raise PyXFormError("For some reason this choice isn't associated with a list.", choice)
self._dict[self._lists_sheet_name] = choices
def _insert_lists(self):
"""
For each multiple choice question and loop in the survey find
the corresponding list and add it to that question.
"""
lists_by_name = self._dict.get(self._lists_sheet_name, {})
for q in self._dict[SURVEY_SHEET]:
self._insert_list(q, CHOICES, lists_by_name)
self._insert_list(q, COLUMNS, lists_by_name)
def _insert_list(self, q, key, lists_by_name):
if key in q:
list_name = q[key]
if list_name not in lists_by_name:
raise PyXFormError("There is no list of %s by this name" % key, list_name)
q[key] = lists_by_name[list_name]
def _organize_sections(self):
"""
This function arranges all the sections into a tree structure
"""
# this needs to happen after columns have been inserted
self._dict = self._dict[SURVEY_SHEET]
result = {u"type": u"survey", u"name": self._name, u"children": []}
result.update(self._settings)
stack = [result]
for cmd in self._dict:
cmd_type = cmd[u"type"]
match_begin = re.match(r"begin (?P<type>group|repeat|loop)", cmd_type)
match_end = re.match(r"end (?P<type>group|repeat|loop)", cmd_type)
# TODO: combine the begin and end patterns below with those above.
# match_begin = re.match(r"begin (?P<type>lgroup|group|looped group|repeat)", cmd_type)
# match_end = re.match(r"end (?P<type>lgroup|group|looped group|repeat)", cmd_type)
if match_begin:
# start a new section
cmd[u"type"] = match_begin.group(1)
if cmd[u"type"] in group_name_conversions:
cmd[u"type"] = group_name_conversions[cmd[u"type"]]
cmd[u"children"] = []
stack[-1][u"children"].append(cmd)
stack.append(cmd)
elif match_end:
match_end = match_end.group(1)
if match_end in group_name_conversions:
match_end = group_name_conversions[match_end]
begin_cmd = stack.pop()
if begin_cmd[u"type"] != match_end:
raise PyXFormError("This end group does not match the previous begin", cmd)
else:
stack[-1][u"children"].append(cmd)
self._dict = result
class QuestionTypesReader(SpreadsheetReader):
"""
Class for reading spreadsheet file that specifies the available question types.
@see question_type_dictionary
"""
def __init__(self, path):
super(QuestionTypesReader, self).__init__(path)
self._setup_question_types_dictionary()
def _setup_question_types_dictionary(self):
self._dict = self._dict[TYPES_SHEET]
self._organize_by_type_name()
def _organize_by_type_name(self):
result = {}
for question_type in self._dict:
result[question_type.pop(u"name")] = question_type
self._dict = result
#Not used internally
class VariableNameReader(SpreadsheetReader):
def __init__(self, path):
SpreadsheetReader.__init__(self, path)
self._organize_renames()
def _organize_renames(self):
new_dict = {}
variable_names_so_far = []
assert u"Dictionary" in self._dict
for d in self._dict[u"Dictionary"]:
if u"Variable Name" in d:
assert d[u"Variable Name"] not in variable_names_so_far, \
d[u"Variable Name"]
variable_names_so_far.append(d[u"Variable Name"])
new_dict[d[u"XPath"]] = d[u"Variable Name"]
else:
variable_names_so_far.append(d[u"XPath"])
self._dict = new_dict
if __name__ == "__main__":
# Open the excel file that is the second argument to this python
# call, convert that file to json and save that json to a file
path = sys.argv[1]
converter = SurveyReader(path)
# converter.print_json_to_file()
print json.dumps(converter.to_json_dict(), ensure_ascii=False, indent=4)
|
|
"""Support for Habitica sensors."""
from collections import namedtuple
from datetime import timedelta
import logging
from aiohttp import ClientResponseError
from homeassistant.const import CONF_NAME, HTTP_TOO_MANY_REQUESTS
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=15)
ST = SensorType = namedtuple("SensorType", ["name", "icon", "unit", "path"])
SENSORS_TYPES = {
"name": ST("Name", None, "", ["profile", "name"]),
"hp": ST("HP", "mdi:heart", "HP", ["stats", "hp"]),
"maxHealth": ST("max HP", "mdi:heart", "HP", ["stats", "maxHealth"]),
"mp": ST("Mana", "mdi:auto-fix", "MP", ["stats", "mp"]),
"maxMP": ST("max Mana", "mdi:auto-fix", "MP", ["stats", "maxMP"]),
"exp": ST("EXP", "mdi:star", "EXP", ["stats", "exp"]),
"toNextLevel": ST("Next Lvl", "mdi:star", "EXP", ["stats", "toNextLevel"]),
"lvl": ST("Lvl", "mdi:arrow-up-bold-circle-outline", "Lvl", ["stats", "lvl"]),
"gp": ST("Gold", "mdi:currency-usd-circle", "Gold", ["stats", "gp"]),
"class": ST("Class", "mdi:sword", "", ["stats", "class"]),
}
TASKS_TYPES = {
"habits": ST("Habits", "mdi:clipboard-list-outline", "n_of_tasks", ["habits"]),
"dailys": ST("Dailys", "mdi:clipboard-list-outline", "n_of_tasks", ["dailys"]),
"todos": ST("TODOs", "mdi:clipboard-list-outline", "n_of_tasks", ["todos"]),
"rewards": ST("Rewards", "mdi:clipboard-list-outline", "n_of_tasks", ["rewards"]),
}
TASKS_MAP_ID = "id"
TASKS_MAP = {
"repeat": "repeat",
"challenge": "challenge",
"group": "group",
"frequency": "frequency",
"every_x": "everyX",
"streak": "streak",
"counter_up": "counterUp",
"counter_down": "counterDown",
"next_due": "nextDue",
"yester_daily": "yesterDaily",
"completed": "completed",
"collapse_checklist": "collapseChecklist",
"type": "type",
"notes": "notes",
"tags": "tags",
"value": "value",
"priority": "priority",
"start_date": "startDate",
"days_of_month": "daysOfMonth",
"weeks_of_month": "weeksOfMonth",
"created_at": "createdAt",
"text": "text",
"is_due": "isDue",
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the habitica sensors."""
entities = []
name = config_entry.data[CONF_NAME]
sensor_data = HabitipyData(hass.data[DOMAIN][config_entry.entry_id])
await sensor_data.update()
for sensor_type in SENSORS_TYPES:
entities.append(HabitipySensor(name, sensor_type, sensor_data))
for task_type in TASKS_TYPES:
entities.append(HabitipyTaskSensor(name, task_type, sensor_data))
async_add_entities(entities, True)
class HabitipyData:
"""Habitica API user data cache."""
def __init__(self, api):
"""Habitica API user data cache."""
self.api = api
self.data = None
self.tasks = {}
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def update(self):
"""Get a new fix from Habitica servers."""
try:
self.data = await self.api.user.get()
except ClientResponseError as error:
if error.status == HTTP_TOO_MANY_REQUESTS:
_LOGGER.warning(
"Sensor data update for %s has too many API requests."
" Skipping the update.",
DOMAIN,
)
else:
_LOGGER.error(
"Count not update sensor data for %s (%s)",
DOMAIN,
error,
)
for task_type in TASKS_TYPES:
try:
self.tasks[task_type] = await self.api.tasks.user.get(type=task_type)
except ClientResponseError as error:
if error.status == HTTP_TOO_MANY_REQUESTS:
_LOGGER.warning(
"Sensor data update for %s has too many API requests."
" Skipping the update.",
DOMAIN,
)
else:
_LOGGER.error(
"Count not update sensor data for %s (%s)",
DOMAIN,
error,
)
class HabitipySensor(Entity):
"""A generic Habitica sensor."""
def __init__(self, name, sensor_name, updater):
"""Initialize a generic Habitica sensor."""
self._name = name
self._sensor_name = sensor_name
self._sensor_type = SENSORS_TYPES[sensor_name]
self._state = None
self._updater = updater
async def async_update(self):
"""Update Condition and Forecast."""
await self._updater.update()
data = self._updater.data
for element in self._sensor_type.path:
data = data[element]
self._state = data
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return self._sensor_type.icon
@property
def name(self):
"""Return the name of the sensor."""
return f"{DOMAIN}_{self._name}_{self._sensor_name}"
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._sensor_type.unit
class HabitipyTaskSensor(Entity):
"""A Habitica task sensor."""
def __init__(self, name, task_name, updater):
"""Initialize a generic Habitica task."""
self._name = name
self._task_name = task_name
self._task_type = TASKS_TYPES[task_name]
self._state = None
self._updater = updater
async def async_update(self):
"""Update Condition and Forecast."""
await self._updater.update()
all_tasks = self._updater.tasks
for element in self._task_type.path:
tasks_length = len(all_tasks[element])
self._state = tasks_length
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return self._task_type.icon
@property
def name(self):
"""Return the name of the task."""
return f"{DOMAIN}_{self._name}_{self._task_name}"
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes of all user tasks."""
if self._updater.tasks is not None:
all_received_tasks = self._updater.tasks
for element in self._task_type.path:
received_tasks = all_received_tasks[element]
attrs = {}
# Map tasks to TASKS_MAP
for received_task in received_tasks:
task_id = received_task[TASKS_MAP_ID]
task = {}
for map_key, map_value in TASKS_MAP.items():
value = received_task.get(map_value)
if value:
task[map_key] = value
attrs[task_id] = task
return attrs
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._task_type.unit
|
|
# -*- coding: utf-8 -*-
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of IAM policy management command for GCS."""
from __future__ import absolute_import
import itertools
import json
from apitools.base.protorpclite import protojson
from apitools.base.protorpclite.messages import DecodeError
from gslib.cloud_api import ArgumentException
from gslib.cloud_api import PreconditionException
from gslib.cloud_api import ServiceException
from gslib.command import Command
from gslib.command import GetFailureCount
from gslib.command_argument import CommandArgument
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.help_provider import CreateHelpText
from gslib.iamhelpers import BindingStringToTuple
from gslib.iamhelpers import BindingsTuple
from gslib.iamhelpers import DeserializeBindingsTuple
from gslib.iamhelpers import IsEqualBindings
from gslib.iamhelpers import PatchBindings
from gslib.iamhelpers import SerializeBindingsTuple
from gslib.metrics import LogCommandParams
from gslib.name_expansion import NameExpansionIterator
from gslib.name_expansion import SeekAheadNameExpansionIterator
from gslib.plurality_checkable_iterator import PluralityCheckableIterator
from gslib.storage_url import StorageUrlFromString
from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
from gslib.util import GetCloudApiInstance
from gslib.util import NO_MAX
from gslib.util import Retry
_SET_SYNOPSIS = """
gsutil iam set [-afRr] [-e <etag>] file url ...
"""
_GET_SYNOPSIS = """
gsutil iam get url
"""
_CH_SYNOPSIS = """
gsutil iam ch [-fRr] binding ...
where each binding is of the form:
[-d] ("user"|"serviceAccount"|"domain"|"group"):id:role[,...]
[-d] ("allUsers"|"allAuthenticatedUsers"):role[,...]
-d ("user"|"serviceAccount"|"domain"|"group"):id
-d ("allUsers"|"allAuthenticatedUsers")
"""
_GET_DESCRIPTION = """
<B>GET</B>
The "iam get" command gets the IAM policy for a bucket or object, which you
can save and edit for use with the "iam set" command.
For example:
gsutil iam get gs://example > bucket_iam.txt
gsutil iam get gs://example/important.txt > object_iam.txt
The IAM policy returned by "iam get" includes the etag of the IAM policy and
will be used in the precondition check for "iam set", unless the etag is
overridden by setting the "iam set" -e option.
"""
_SET_DESCRIPTION = """
<B>SET</B>
The "iam set" command sets the IAM policy for one or more buckets and / or
objects. It overwrites the current IAM policy that exists on a bucket (or
object) with the policy specified in the input file. The "iam set" command
takes as input a file with an IAM policy in the format of the output
generated by "iam get".
The "iam ch" command can be used to edit an existing policy. It works
correctly in the presence of concurrent updates. You may also do this
manually by using the -e flag and overriding the etag returned in "iam get".
Specifying -e with an empty string (i.e. "gsutil iam set -e '' ...") will
instruct gsutil to skip the precondition check when setting the IAM policy.
If you wish to set an IAM policy on a large number of objects, you may want
to use the gsutil -m option for concurrent processing. The following command
will apply iam.txt to all objects in the "cats" bucket.
gsutil -m iam set -r iam.txt gs://cats
Note that only object-level IAM applications are parallelized; you do not
gain any additional performance when applying an IAM policy to a large
number of buckets with the -m flag.
<B>SET OPTIONS</B>
The "set" sub-command has the following options
-R, -r Performs "iam set" recursively to all objects under the
specified bucket.
-a Performs "iam set" request on all object versions.
-e <etag> Performs the precondition check on each object with the
specified etag before setting the policy.
-f Default gsutil error handling is fail-fast. This flag
changes the request to fail-silent mode. This is implicitly
set when invoking the gsutil -m option.
"""
_CH_DESCRIPTION = """
<B>CH</B>
The "iam ch" command incrementally updates IAM policies. You may specify
multiple access grants and removals in a single command invocation, which
will be batched and applied as a whole to each url via an IAM patch.
The patch will be constructed by applying each access grant or removal in the
order in which they appear in the command line arguments. Each access change
specifies a member and the role that will be either granted or revoked.
The gsutil -m option may be set to handle object-level operations more
efficiently.
<B>CH EXAMPLES</B>
Examples for the "ch" sub-command:
To grant a single role to a single member for some targets:
gsutil iam ch user:john.doe@example.com:objectCreator gs://ex-bucket
To make a bucket's objects publically readable:
gsutil iam ch allUsers:objectViewer gs://ex-bucket
To grant multiple bindings to a bucket:
gsutil iam ch user:john.doe@example.com:objectCreator \\
domain:www.my-domain.org:objectViewer gs://ex-bucket
To specify more than one role for a particular member:
gsutil iam ch user:john.doe@example.com:objectCreator,objectViewer \\
gs://ex-bucket
To apply a grant and simultaneously remove a binding to a bucket:
gsutil iam ch -d group:readers@example.com:legacyBucketReader \\
group:viewers@example.com:objectViewer gs://ex-bucket
To remove a user from all roles on a bucket:
gsutil iam ch -d user:john.doe@example.com gs://ex-bucket
<B>CH OPTIONS</B>
The "ch" sub-command has the following options
-R, -r Performs "iam ch" recursively to all objects under the
specified bucket.
-f Default gsutil error handling is fail-fast. This flag
changes the request to fail-silent mode. This is implicitly
set when invoking the gsutil -m option.
"""
_SYNOPSIS = (_SET_SYNOPSIS + _GET_SYNOPSIS.lstrip('\n') +
_CH_SYNOPSIS.lstrip('\n') + '\n\n')
_DESCRIPTION = """
The iam command has three sub-commands:
""" + '\n'.join([_GET_DESCRIPTION, _SET_DESCRIPTION, _CH_DESCRIPTION])
_DETAILED_HELP_TEXT = CreateHelpText(_SYNOPSIS, _DESCRIPTION)
_get_help_text = CreateHelpText(_GET_SYNOPSIS, _GET_DESCRIPTION)
_set_help_text = CreateHelpText(_SET_SYNOPSIS, _SET_DESCRIPTION)
_ch_help_text = CreateHelpText(_CH_SYNOPSIS, _CH_DESCRIPTION)
def _PatchIamWrapper(cls, iter_result, thread_state):
(serialized_bindings_tuples, expansion_result) = iter_result
return cls.PatchIamHelper(
expansion_result.expanded_storage_url,
# Deserialize the JSON object passed from Command.Apply.
[DeserializeBindingsTuple(t) for t in serialized_bindings_tuples],
thread_state=thread_state)
def _SetIamWrapper(cls, iter_result, thread_state):
(serialized_policy, expansion_result) = iter_result
return cls.SetIamHelper(
expansion_result.expanded_storage_url,
# Deserialize the JSON object passed from Command.Apply.
protojson.decode_message(apitools_messages.Policy, serialized_policy),
thread_state=thread_state)
def _SetIamExceptionHandler(cls, e):
cls.logger.error(str(e))
def _PatchIamExceptionHandler(cls, e):
cls.logger.error(str(e))
class IamCommand(Command):
"""Implementation of gsutil iam command."""
command_spec = Command.CreateCommandSpec(
'iam',
min_args=2,
max_args=NO_MAX,
supported_sub_args='afRrd:e:',
file_url_ok=True,
provider_url_ok=False,
urls_start_arg=1,
gs_api_support=[ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
argparse_arguments={
'get': [
CommandArgument.MakeNCloudURLsArgument(1)
],
'set': [
CommandArgument.MakeNFileURLsArgument(1),
CommandArgument.MakeZeroOrMoreCloudURLsArgument()
],
'ch': [
CommandArgument.MakeOneOrMoreBindingsArgument(),
CommandArgument.MakeZeroOrMoreCloudURLsArgument()
],
},
)
help_spec = Command.HelpSpec(
help_name='iam',
help_name_aliases=[],
help_type='command_help',
help_one_line_summary=('Get, set, or change'
' bucket and/or object IAM permissions.'),
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={
'get': _get_help_text, 'set': _set_help_text, 'ch': _ch_help_text,
}
)
def GetIamHelper(self, storage_url, thread_state=None):
"""Gets an IAM policy for a single, resolved bucket / object URL.
Args:
storage_url: A CloudUrl instance with no wildcards, pointing to a
specific bucket or object.
thread_state: CloudApiDelegator instance which is passed from
command.WorkerThread.__init__() if the global -m flag is
specified. Will use self.gsutil_api if thread_state is set
to None.
Returns:
Policy instance.
"""
gsutil_api = GetCloudApiInstance(self, thread_state=thread_state)
if storage_url.IsBucket():
policy = gsutil_api.GetBucketIamPolicy(
storage_url.bucket_name,
provider=storage_url.scheme,
fields=['bindings', 'etag'],
)
else:
policy = gsutil_api.GetObjectIamPolicy(
storage_url.bucket_name,
storage_url.object_name,
generation=storage_url.generation,
provider=storage_url.scheme,
fields=['bindings', 'etag'],
)
return policy
def _GetIam(self, thread_state=None):
"""Gets IAM policy for single bucket or object."""
pattern = self.args[0]
matches = PluralityCheckableIterator(
self.WildcardIterator(pattern).IterAll(bucket_listing_fields=['name'])
)
if matches.IsEmpty():
raise CommandException('%s matched no URLs' % pattern)
if matches.HasPlurality():
raise CommandException(
'%s matched more than one URL, which is not allowed by the %s '
'command' % (pattern, self.command_name))
storage_url = StorageUrlFromString(list(matches)[0].url_string)
policy = self.GetIamHelper(storage_url, thread_state=thread_state)
print json.dumps(
json.loads(protojson.encode_message(policy)), sort_keys=True, indent=2)
def _SetIamHelperInternal(self, storage_url, policy, thread_state=None):
"""Sets IAM policy for a single, resolved bucket / object URL.
Args:
storage_url: A CloudUrl instance with no wildcards, pointing to a
specific bucket or object.
policy: A Policy object to set on the bucket / object.
thread_state: CloudApiDelegator instance which is passed from
command.WorkerThread.__init__() if the -m flag is
specified. Will use self.gsutil_api if thread_state is set
to None.
Raises:
ServiceException passed from the API call if an HTTP error was returned.
"""
# SetIamHelper may be called by a command.WorkerThread. In the
# single-threaded case, WorkerThread will not pass the CloudApiDelegator
# instance to thread_state. GetCloudInstance is called to resolve the
# edge case.
gsutil_api = GetCloudApiInstance(self, thread_state=thread_state)
if storage_url.IsBucket():
gsutil_api.SetBucketIamPolicy(
storage_url.bucket_name, policy, provider=storage_url.scheme)
else:
gsutil_api.SetObjectIamPolicy(
storage_url.bucket_name, storage_url.object_name, policy,
generation=storage_url.generation, provider=storage_url.scheme)
def SetIamHelper(self, storage_url, policy, thread_state=None):
"""Handles the potential exception raised by the internal set function."""
try:
self._SetIamHelperInternal(
storage_url, policy, thread_state=thread_state)
except ServiceException:
if self.continue_on_error:
self.everything_set_okay = False
else:
raise
def PatchIamHelper(
self, storage_url, bindings_tuples, thread_state=None):
"""Patches an IAM policy for a single, resolved bucket / object URL.
The patch is applied by altering the policy from an IAM get request, and
setting the new IAM with the specified etag. Because concurrent IAM set
requests may alter the etag, we may need to retry this operation several
times before success.
Args:
storage_url: A CloudUrl instance with no wildcards, pointing to a
specific bucket or object.
bindings_tuples: A list of BindingsTuple instances.
thread_state: CloudApiDelegator instance which is passed from
command.WorkerThread.__init__() if the -m flag is
specified. Will use self.gsutil_api if thread_state is set
to None.
"""
try:
self._PatchIamHelperInternal(
storage_url, bindings_tuples, thread_state=thread_state)
except ServiceException:
if self.continue_on_error:
self.everything_set_okay = False
else:
raise
@Retry(PreconditionException, tries=3, timeout_secs=1.0)
def _PatchIamHelperInternal(
self, storage_url, bindings_tuples, thread_state=None):
policy = self.GetIamHelper(storage_url, thread_state=thread_state)
(etag, bindings) = (policy.etag, policy.bindings)
# Create a backup which is untainted by any references to the original
# bindings.
orig_bindings = list(bindings)
for (is_grant, diff) in bindings_tuples:
bindings = PatchBindings(bindings, BindingsTuple(is_grant, diff))
if IsEqualBindings(bindings, orig_bindings):
self.logger.info('No changes made to %s', storage_url)
return
policy = apitools_messages.Policy(bindings=bindings, etag=etag)
# We explicitly wish for etag mismatches to raise an error and allow this
# function to error out, so we are bypassing the exception handling offered
# by IamCommand.SetIamHelper in lieu of our own handling (@Retry).
self._SetIamHelperInternal(
storage_url, policy, thread_state=thread_state)
def _PatchIam(self):
self.continue_on_error = False
self.recursion_requested = False
patch_bindings_tuples = []
if self.sub_opts:
for o, a in self.sub_opts:
if o in ['-r', '-R']:
self.recursion_requested = True
elif o == '-f':
self.continue_on_error = True
elif o == '-d':
patch_bindings_tuples.append(BindingStringToTuple(False, a))
patterns = []
# N.B.: self.sub_opts stops taking in options at the first non-flagged
# token. The rest of the tokens are sent to self.args. Thus, in order to
# handle input of the form "-d <binding> <binding> <url>", we will have to
# parse self.args for a mix of both bindings and CloudUrls. We are not
# expecting to come across the -r, -f flags here.
it = iter(self.args)
for token in it:
if token == '-d':
patch_bindings_tuples.append(
BindingStringToTuple(False, it.next()))
else:
try:
patch_bindings_tuples.append(
BindingStringToTuple(True, token)
)
# All following arguments are urls.
except (ArgumentException, CommandException):
patterns.append(token)
for token in it:
patterns.append(token)
# We must have some bindings to process, else this is pointless.
if not patch_bindings_tuples:
raise CommandException('Must specify at least one binding.')
self.everything_set_okay = True
threaded_wildcards = []
for pattern in patterns:
surl = StorageUrlFromString(pattern)
try:
if surl.IsBucket():
if self.recursion_requested:
surl.object = '*'
threaded_wildcards.append(surl.url_string)
else:
self.PatchIamHelper(surl, patch_bindings_tuples)
else:
threaded_wildcards.append(surl.url_string)
except AttributeError:
error_msg = 'Invalid Cloud URL "%s".' % surl.object_name
if set(surl.object_name).issubset(set('-Rrf')):
error_msg += (
' This resource handle looks like a flag, which must appear '
'before all bindings. See "gsutil help iam ch" for more details.'
)
raise CommandException(error_msg)
if threaded_wildcards:
name_expansion_iterator = NameExpansionIterator(
self.command_name, self.debug,
self.logger, self.gsutil_api,
threaded_wildcards, self.recursion_requested,
all_versions=self.all_versions,
continue_on_error=self.continue_on_error or self.parallel_operations,
bucket_listing_fields=['name'])
seek_ahead_iterator = SeekAheadNameExpansionIterator(
self.command_name, self.debug, self.GetSeekAheadGsutilApi(),
threaded_wildcards, self.recursion_requested,
all_versions=self.all_versions)
serialized_bindings_tuples_it = itertools.repeat(
[SerializeBindingsTuple(t) for t in patch_bindings_tuples])
self.Apply(
_PatchIamWrapper,
itertools.izip(
serialized_bindings_tuples_it, name_expansion_iterator),
_PatchIamExceptionHandler,
fail_on_error=not self.continue_on_error,
seek_ahead_iterator=seek_ahead_iterator)
self.everything_set_okay &= not GetFailureCount() > 0
# TODO: Add an error counter for files and objects.
if not self.everything_set_okay:
raise CommandException('Some IAM policies could not be patched.')
# TODO(iam-beta): Add an optional flag to specify etag and edit the policy
# accordingly to be passed into the helper functions.
def _SetIam(self):
"""Set IAM policy for given wildcards on the command line."""
self.continue_on_error = False
self.recursion_requested = False
self.all_versions = False
force_etag = False
etag = ''
if self.sub_opts:
for o, arg in self.sub_opts:
if o in ['-r', '-R']:
self.recursion_requested = True
elif o == '-f':
self.continue_on_error = True
elif o == '-a':
self.all_versions = True
elif o == '-e':
etag = str(arg)
force_etag = True
else:
self.RaiseInvalidArgumentException()
file_url = self.args[0]
patterns = self.args[1:]
# Load the IAM policy file and raise error if the file is invalid JSON or
# does not exist.
try:
with open(file_url, 'r') as fp:
policy = json.loads(fp.read())
except IOError:
raise ArgumentException(
'Specified IAM policy file "%s" does not exist.' % file_url)
except ValueError:
raise ArgumentException(
'Invalid IAM policy file "%s".' % file_url)
bindings = policy.get('bindings', [])
if not force_etag:
etag = policy.get('etag', '')
policy_json = json.dumps({'bindings': bindings, 'etag': etag})
try:
policy = protojson.decode_message(apitools_messages.Policy, policy_json)
except DecodeError:
raise ArgumentException(
'Invalid IAM policy file "%s" or etag "%s".' % (file_url, etag))
self.everything_set_okay = True
# This list of wildcard strings will be handled by NameExpansionIterator.
threaded_wildcards = []
for pattern in patterns:
surl = StorageUrlFromString(pattern)
if surl.IsBucket():
if self.recursion_requested:
surl.object_name = '*'
threaded_wildcards.append(surl.url_string)
else:
self.SetIamHelper(surl, policy)
else:
threaded_wildcards.append(surl.url_string)
# N.B.: If threaded_wildcards contains a non-existent bucket
# (e.g. ["gs://non-existent", "gs://existent"]), NameExpansionIterator
# will raise an exception in iter.next. This halts all iteration, even
# when -f is set. This behavior is also evident in acl set. This behavior
# also appears for any exception that will be raised when iterating over
# wildcard expansions (access denied if bucket cannot be listed, etc.).
if threaded_wildcards:
name_expansion_iterator = NameExpansionIterator(
self.command_name, self.debug,
self.logger, self.gsutil_api,
threaded_wildcards, self.recursion_requested,
all_versions=self.all_versions,
continue_on_error=self.continue_on_error or self.parallel_operations,
bucket_listing_fields=['name'])
seek_ahead_iterator = SeekAheadNameExpansionIterator(
self.command_name, self.debug, self.GetSeekAheadGsutilApi(),
threaded_wildcards, self.recursion_requested,
all_versions=self.all_versions)
policy_it = itertools.repeat(protojson.encode_message(policy))
self.Apply(
_SetIamWrapper,
itertools.izip(
policy_it, name_expansion_iterator),
_SetIamExceptionHandler,
fail_on_error=not self.continue_on_error,
seek_ahead_iterator=seek_ahead_iterator)
self.everything_set_okay &= not GetFailureCount() > 0
# TODO: Add an error counter for files and objects.
if not self.everything_set_okay:
raise CommandException('Some IAM policies could not be set.')
def RunCommand(self):
"""Command entry point for the acl command."""
action_subcommand = self.args.pop(0)
self.ParseSubOpts(check_args=True)
# Commands with both suboptions and subcommands need to reparse for
# suboptions, so we log again.
LogCommandParams(sub_opts=self.sub_opts)
self.def_acl = False
if action_subcommand == 'get':
LogCommandParams(subcommands=[action_subcommand])
self._GetIam()
elif action_subcommand == 'set':
LogCommandParams(subcommands=[action_subcommand])
self._SetIam()
elif action_subcommand == 'ch':
LogCommandParams(subcommands=[action_subcommand])
self._PatchIam()
else:
raise CommandException(
'Invalid subcommand "%s" for the %s command.\n'
'See "gsutil help iam".' % (action_subcommand, self.command_name))
return 0
|
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit testing base class for Port implementations."""
import errno
import logging
import os
import socket
import sys
import time
import webkitpy.thirdparty.unittest2 as unittest
from webkitpy.common.system.executive_mock import MockExecutive
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.system.systemhost_mock import MockSystemHost
from webkitpy.layout_tests.port.base import Port
from webkitpy.layout_tests.port.server_process_mock import MockServerProcess
from webkitpy.layout_tests.servers import http_server_base
from webkitpy.tool.mocktool import MockOptions
# FIXME: get rid of this fixture
class TestWebKitPort(Port):
port_name = "testwebkitport"
def __init__(self, port_name=None, symbols_string=None,
expectations_file=None, skips_file=None, host=None, config=None,
**kwargs):
port_name = port_name or TestWebKitPort.port_name
self.symbols_string = symbols_string # Passing "" disables all staticly-detectable features.
host = host or MockSystemHost()
super(TestWebKitPort, self).__init__(host, port_name=port_name, **kwargs)
def all_test_configurations(self):
return [self.test_configuration()]
def _symbols_string(self):
return self.symbols_string
def _tests_for_disabled_features(self):
return ["accessibility", ]
class PortTestCase(unittest.TestCase):
"""Tests that all Port implementations must pass."""
HTTP_PORTS = (8000, 8080, 8443)
WEBSOCKET_PORTS = (8880,)
# Subclasses override this to point to their Port subclass.
os_name = None
os_version = None
port_maker = TestWebKitPort
port_name = None
def make_port(self, host=None, port_name=None, options=None, os_name=None, os_version=None, **kwargs):
host = host or MockSystemHost(os_name=(os_name or self.os_name), os_version=(os_version or self.os_version))
options = options or MockOptions(configuration='Release')
port_name = port_name or self.port_name
port_name = self.port_maker.determine_full_port_name(host, options, port_name)
port = self.port_maker(host, port_name, options=options, **kwargs)
port._config.build_directory = lambda configuration: '/mock-build'
return port
def make_wdiff_available(self, port):
port._wdiff_available = True
def test_default_max_locked_shards(self):
port = self.make_port()
port.default_child_processes = lambda: 16
self.assertEqual(port.default_max_locked_shards(), 1)
port.default_child_processes = lambda: 2
self.assertEqual(port.default_max_locked_shards(), 1)
def test_default_timeout_ms(self):
self.assertEqual(self.make_port(options=MockOptions(configuration='Release')).default_timeout_ms(), 6000)
self.assertEqual(self.make_port(options=MockOptions(configuration='Debug')).default_timeout_ms(), 36000)
def test_default_pixel_tests(self):
self.assertEqual(self.make_port().default_pixel_tests(), True)
def test_driver_cmd_line(self):
port = self.make_port()
self.assertTrue(len(port.driver_cmd_line()))
options = MockOptions(additional_drt_flag=['--foo=bar', '--foo=baz'])
port = self.make_port(options=options)
cmd_line = port.driver_cmd_line()
self.assertTrue('--foo=bar' in cmd_line)
self.assertTrue('--foo=baz' in cmd_line)
def test_uses_apache(self):
self.assertTrue(self.make_port()._uses_apache())
def assert_servers_are_down(self, host, ports):
for port in ports:
try:
test_socket = socket.socket()
test_socket.connect((host, port))
self.fail()
except IOError, e:
self.assertTrue(e.errno in (errno.ECONNREFUSED, errno.ECONNRESET))
finally:
test_socket.close()
def assert_servers_are_up(self, host, ports):
for port in ports:
try:
test_socket = socket.socket()
test_socket.connect((host, port))
except IOError, e:
self.fail('failed to connect to %s:%d' % (host, port))
finally:
test_socket.close()
def test_diff_image__missing_both(self):
port = self.make_port()
self.assertEqual(port.diff_image(None, None), (None, None))
self.assertEqual(port.diff_image(None, ''), (None, None))
self.assertEqual(port.diff_image('', None), (None, None))
self.assertEqual(port.diff_image('', ''), (None, None))
def test_diff_image__missing_actual(self):
port = self.make_port()
self.assertEqual(port.diff_image(None, 'foo'), ('foo', None))
self.assertEqual(port.diff_image('', 'foo'), ('foo', None))
def test_diff_image__missing_expected(self):
port = self.make_port()
self.assertEqual(port.diff_image('foo', None), ('foo', None))
self.assertEqual(port.diff_image('foo', ''), ('foo', None))
def test_diff_image(self):
port = self.make_port()
self.proc = None
def make_proc(port, nm, cmd, env):
self.proc = MockServerProcess(port, nm, cmd, env, lines=['diff: 100% failed\n', 'diff: 100% failed\n'])
return self.proc
port._server_process_constructor = make_proc
port.setup_test_run()
self.assertEqual(port.diff_image('foo', 'bar'), ('', 100.0, None))
port.clean_up_test_run()
self.assertTrue(self.proc.stopped)
self.assertEqual(port._image_differ, None)
def test_check_wdiff(self):
port = self.make_port()
port.check_wdiff()
def test_wdiff_text_fails(self):
host = MockSystemHost(os_name=self.os_name, os_version=self.os_version)
host.executive = MockExecutive(should_throw=True)
port = self.make_port(host=host)
port._executive = host.executive # AndroidPortTest.make_port sets its own executive, so reset that as well.
# This should raise a ScriptError that gets caught and turned into the
# error text, and also mark wdiff as not available.
self.make_wdiff_available(port)
self.assertTrue(port.wdiff_available())
diff_txt = port.wdiff_text("/tmp/foo.html", "/tmp/bar.html")
self.assertEqual(diff_txt, port._wdiff_error_html)
self.assertFalse(port.wdiff_available())
def test_test_configuration(self):
port = self.make_port()
self.assertTrue(port.test_configuration())
def test_all_test_configurations(self):
port = self.make_port()
self.assertTrue(len(port.all_test_configurations()) > 0)
self.assertTrue(port.test_configuration() in port.all_test_configurations(), "%s not in %s" % (port.test_configuration(), port.all_test_configurations()))
def test_get_crash_log(self):
port = self.make_port()
self.assertEqual(port._get_crash_log(None, None, None, None, newer_than=None),
(None,
'crash log for <unknown process name> (pid <unknown>):\n'
'STDOUT: <empty>\n'
'STDERR: <empty>\n'))
self.assertEqual(port._get_crash_log('foo', 1234, 'out bar\nout baz', 'err bar\nerr baz\n', newer_than=None),
('err bar\nerr baz\n',
'crash log for foo (pid 1234):\n'
'STDOUT: out bar\n'
'STDOUT: out baz\n'
'STDERR: err bar\n'
'STDERR: err baz\n'))
self.assertEqual(port._get_crash_log('foo', 1234, 'foo\xa6bar', 'foo\xa6bar', newer_than=None),
('foo\xa6bar',
u'crash log for foo (pid 1234):\n'
u'STDOUT: foo\ufffdbar\n'
u'STDERR: foo\ufffdbar\n'))
self.assertEqual(port._get_crash_log('foo', 1234, 'foo\xa6bar', 'foo\xa6bar', newer_than=1.0),
('foo\xa6bar',
u'crash log for foo (pid 1234):\n'
u'STDOUT: foo\ufffdbar\n'
u'STDERR: foo\ufffdbar\n'))
def assert_build_path(self, options, dirs, expected_path):
port = self.make_port(options=options)
for directory in dirs:
port.host.filesystem.maybe_make_directory(directory)
self.assertEqual(port._build_path(), expected_path)
def test_expectations_ordering(self):
port = self.make_port()
for path in port.expectations_files():
port._filesystem.write_text_file(path, '')
ordered_dict = port.expectations_dict()
self.assertEqual(port.path_to_generic_test_expectations_file(), ordered_dict.keys()[0])
options = MockOptions(additional_expectations=['/tmp/foo', '/tmp/bar'])
port = self.make_port(options=options)
for path in port.expectations_files():
port._filesystem.write_text_file(path, '')
port._filesystem.write_text_file('/tmp/foo', 'foo')
port._filesystem.write_text_file('/tmp/bar', 'bar')
ordered_dict = port.expectations_dict()
self.assertEqual(ordered_dict.keys()[-2:], options.additional_expectations) # pylint: disable=E1101
self.assertEqual(ordered_dict.values()[-2:], ['foo', 'bar'])
def test_skipped_directories_for_symbols(self):
# This first test confirms that the commonly found symbols result in the expected skipped directories.
symbols_string = " ".join(["fooSymbol"])
expected_directories = set([
"webaudio/codec-tests/mp3",
"webaudio/codec-tests/aac",
])
result_directories = set(TestWebKitPort(symbols_string=symbols_string)._skipped_tests_for_unsupported_features(test_list=['webaudio/codec-tests/mp3/foo.html']))
self.assertEqual(result_directories, expected_directories)
# Test that the nm string parsing actually works:
symbols_string = """
000000000124f498 s __ZZN7WebCore13ff_mp3_decoder12replaceChildEPS0_S1_E19__PRETTY_FUNCTION__
000000000124f500 s __ZZN7WebCore13ff_mp3_decoder13addChildAboveEPS0_S1_E19__PRETTY_FUNCTION__
000000000124f670 s __ZZN7WebCore13ff_mp3_decoder13addChildBelowEPS0_S1_E19__PRETTY_FUNCTION__
"""
# Note 'compositing' is not in the list of skipped directories (hence the parsing of GraphicsLayer worked):
expected_directories = set([
"webaudio/codec-tests/aac",
])
result_directories = set(TestWebKitPort(symbols_string=symbols_string)._skipped_tests_for_unsupported_features(test_list=['webaudio/codec-tests/mp3/foo.html']))
self.assertEqual(result_directories, expected_directories)
def test_expectations_files(self):
port = TestWebKitPort()
def platform_dirs(port):
return [port.host.filesystem.basename(port.host.filesystem.dirname(f)) for f in port.expectations_files()]
self.assertEqual(platform_dirs(port), ['LayoutTests', 'testwebkitport'])
port = TestWebKitPort(port_name="testwebkitport-version")
self.assertEqual(platform_dirs(port), ['LayoutTests', 'testwebkitport', 'testwebkitport-version'])
port = TestWebKitPort(port_name="testwebkitport-version",
options=MockOptions(additional_platform_directory=["internal-testwebkitport"]))
self.assertEqual(platform_dirs(port), ['LayoutTests', 'testwebkitport', 'testwebkitport-version', 'internal-testwebkitport'])
def test_test_expectations(self):
# Check that we read the expectations file
host = MockSystemHost()
host.filesystem.write_text_file('/mock-checkout/LayoutTests/platform/testwebkitport/TestExpectations',
'BUG_TESTEXPECTATIONS SKIP : fast/html/article-element.html = FAIL\n')
port = TestWebKitPort(host=host)
self.assertEqual(''.join(port.expectations_dict().values()), 'BUG_TESTEXPECTATIONS SKIP : fast/html/article-element.html = FAIL\n')
def _assert_config_file_for_platform(self, port, platform, config_file):
self.assertEqual(port._apache_config_file_name_for_platform(platform), config_file)
def test_linux_distro_detection(self):
port = TestWebKitPort()
self.assertFalse(port._is_redhat_based())
self.assertFalse(port._is_debian_based())
port._filesystem = MockFileSystem({'/etc/redhat-release': ''})
self.assertTrue(port._is_redhat_based())
self.assertFalse(port._is_debian_based())
port._filesystem = MockFileSystem({'/etc/debian_version': ''})
self.assertFalse(port._is_redhat_based())
self.assertTrue(port._is_debian_based())
def test_apache_config_file_name_for_platform(self):
port = TestWebKitPort()
self._assert_config_file_for_platform(port, 'cygwin', 'cygwin-httpd.conf')
self._assert_config_file_for_platform(port, 'linux2', 'apache2-httpd.conf')
self._assert_config_file_for_platform(port, 'linux3', 'apache2-httpd.conf')
port._is_redhat_based = lambda: True
port._apache_version = lambda: '2.2'
self._assert_config_file_for_platform(port, 'linux2', 'fedora-httpd-2.2.conf')
port = TestWebKitPort()
port._is_debian_based = lambda: True
self._assert_config_file_for_platform(port, 'linux2', 'apache2-debian-httpd.conf')
self._assert_config_file_for_platform(port, 'mac', 'apache2-httpd.conf')
self._assert_config_file_for_platform(port, 'win32', 'apache2-httpd.conf') # win32 isn't a supported sys.platform. AppleWin/WinCairo/WinCE ports all use cygwin.
self._assert_config_file_for_platform(port, 'barf', 'apache2-httpd.conf')
def test_path_to_apache_config_file(self):
port = TestWebKitPort()
saved_environ = os.environ.copy()
try:
os.environ['WEBKIT_HTTP_SERVER_CONF_PATH'] = '/path/to/httpd.conf'
self.assertRaises(IOError, port._path_to_apache_config_file)
port._filesystem.write_text_file('/existing/httpd.conf', 'Hello, world!')
os.environ['WEBKIT_HTTP_SERVER_CONF_PATH'] = '/existing/httpd.conf'
self.assertEqual(port._path_to_apache_config_file(), '/existing/httpd.conf')
finally:
os.environ = saved_environ.copy()
# Mock out _apache_config_file_name_for_platform to ignore the passed sys.platform value.
port._apache_config_file_name_for_platform = lambda platform: 'httpd.conf'
self.assertEqual(port._path_to_apache_config_file(), '/mock-checkout/LayoutTests/http/conf/httpd.conf')
# Check that even if we mock out _apache_config_file_name, the environment variable takes precedence.
saved_environ = os.environ.copy()
try:
os.environ['WEBKIT_HTTP_SERVER_CONF_PATH'] = '/existing/httpd.conf'
self.assertEqual(port._path_to_apache_config_file(), '/existing/httpd.conf')
finally:
os.environ = saved_environ.copy()
def test_additional_platform_directory(self):
port = self.make_port(options=MockOptions(additional_platform_directory=['/tmp/foo']))
self.assertEqual(port.baseline_search_path()[0], '/tmp/foo')
|
|
"""Support for Google Calendar event device sensors."""
from __future__ import annotations
from datetime import timedelta
from http import HTTPStatus
import logging
import re
from typing import cast, final
from aiohttp import web
from homeassistant.components import http
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.core import HomeAssistant
from homeassistant.helpers.config_validation import ( # noqa: F401
PLATFORM_SCHEMA,
PLATFORM_SCHEMA_BASE,
time_period_str,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.template import DATE_STR_FORMAT
from homeassistant.helpers.typing import ConfigType
from homeassistant.util import dt
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
DOMAIN = "calendar"
ENTITY_ID_FORMAT = DOMAIN + ".{}"
SCAN_INTERVAL = timedelta(seconds=60)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Track states and offer events for calendars."""
component = hass.data[DOMAIN] = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL
)
hass.http.register_view(CalendarListView(component))
hass.http.register_view(CalendarEventView(component))
hass.components.frontend.async_register_built_in_panel(
"calendar", "calendar", "hass:calendar"
)
await component.async_setup(config)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up a config entry."""
component: EntityComponent = hass.data[DOMAIN]
return await component.async_setup_entry(entry)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
component: EntityComponent = hass.data[DOMAIN]
return await component.async_unload_entry(entry)
def get_date(date):
"""Get the dateTime from date or dateTime as a local."""
if "date" in date:
return dt.start_of_local_day(
dt.dt.datetime.combine(dt.parse_date(date["date"]), dt.dt.time.min)
)
return dt.as_local(dt.parse_datetime(date["dateTime"]))
def normalize_event(event):
"""Normalize a calendar event."""
normalized_event = {}
start = event.get("start")
end = event.get("end")
start = get_date(start) if start is not None else None
end = get_date(end) if end is not None else None
normalized_event["dt_start"] = start
normalized_event["dt_end"] = end
start = start.strftime(DATE_STR_FORMAT) if start is not None else None
end = end.strftime(DATE_STR_FORMAT) if end is not None else None
normalized_event["start"] = start
normalized_event["end"] = end
# cleanup the string so we don't have a bunch of double+ spaces
summary = event.get("summary", "")
normalized_event["message"] = re.sub(" +", "", summary).strip()
normalized_event["location"] = event.get("location", "")
normalized_event["description"] = event.get("description", "")
normalized_event["all_day"] = "date" in event["start"]
return normalized_event
def calculate_offset(event, offset):
"""Calculate event offset.
Return the updated event with the offset_time included.
"""
summary = event.get("summary", "")
# check if we have an offset tag in the message
# time is HH:MM or MM
reg = f"{offset}([+-]?[0-9]{{0,2}}(:[0-9]{{0,2}})?)"
search = re.search(reg, summary)
if search and search.group(1):
time = search.group(1)
if ":" not in time:
if time[0] == "+" or time[0] == "-":
time = f"{time[0]}0:{time[1:]}"
else:
time = f"0:{time}"
offset_time = time_period_str(time)
summary = (summary[: search.start()] + summary[search.end() :]).strip()
event["summary"] = summary
else:
offset_time = dt.dt.timedelta() # default it
event["offset_time"] = offset_time
return event
def is_offset_reached(event):
"""Have we reached the offset time specified in the event title."""
start = get_date(event["start"])
if start is None or event["offset_time"] == dt.dt.timedelta():
return False
return start + event["offset_time"] <= dt.now(start.tzinfo)
class CalendarEventDevice(Entity):
"""Base class for calendar event entities."""
@property
def event(self):
"""Return the next upcoming event."""
raise NotImplementedError()
@final
@property
def state_attributes(self):
"""Return the entity state attributes."""
if (event := self.event) is None:
return None
event = normalize_event(event)
return {
"message": event["message"],
"all_day": event["all_day"],
"start_time": event["start"],
"end_time": event["end"],
"location": event["location"],
"description": event["description"],
}
@property
def state(self):
"""Return the state of the calendar event."""
if (event := self.event) is None:
return STATE_OFF
event = normalize_event(event)
start = event["dt_start"]
end = event["dt_end"]
if start is None or end is None:
return STATE_OFF
now = dt.now()
if start <= now < end:
return STATE_ON
return STATE_OFF
async def async_get_events(self, hass, start_date, end_date):
"""Return calendar events within a datetime range."""
raise NotImplementedError()
class CalendarEventView(http.HomeAssistantView):
"""View to retrieve calendar content."""
url = "/api/calendars/{entity_id}"
name = "api:calendars:calendar"
def __init__(self, component: EntityComponent) -> None:
"""Initialize calendar view."""
self.component = component
async def get(self, request, entity_id):
"""Return calendar events."""
entity = self.component.get_entity(entity_id)
start = request.query.get("start")
end = request.query.get("end")
if None in (start, end, entity):
return web.Response(status=HTTPStatus.BAD_REQUEST)
try:
start_date = dt.parse_datetime(start)
end_date = dt.parse_datetime(end)
except (ValueError, AttributeError):
return web.Response(status=HTTPStatus.BAD_REQUEST)
event_list = await entity.async_get_events(
request.app["hass"], start_date, end_date
)
return self.json(event_list)
class CalendarListView(http.HomeAssistantView):
"""View to retrieve calendar list."""
url = "/api/calendars"
name = "api:calendars"
def __init__(self, component: EntityComponent) -> None:
"""Initialize calendar view."""
self.component = component
async def get(self, request: web.Request) -> web.Response:
"""Retrieve calendar list."""
hass = request.app["hass"]
calendar_list: list[dict[str, str]] = []
for entity in self.component.entities:
state = hass.states.get(entity.entity_id)
calendar_list.append({"name": state.name, "entity_id": entity.entity_id})
return self.json(sorted(calendar_list, key=lambda x: cast(str, x["name"])))
|
|
# Copyright (c) 2003-2014 CORE Security Technologies
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# $Id: rrp.py 1105 2014-01-19 13:56:06Z bethus@gmail.com $
#
# Author: Alberto Solino
#
# Description:
# [MS-RRP] Interface implementation
#
from impacket.dcerpc.v5 import ndr
from impacket.dcerpc.v5.ndr import NDRCALL, NDR, NDRSTRUCT, NDRPOINTER, NDRUniConformantVaryingArray
from impacket.dcerpc.v5.dtypes import *
from impacket import system_errors
from impacket.uuid import uuidtup_to_bin
from impacket.dcerpc.v5.enum import Enum
MSRPC_UUID_RRP = uuidtup_to_bin(('338CD001-2244-31F1-AAAA-900038001003', '1.0'))
class DCERPCSessionError(Exception):
def __init__( self, packet = None, error_code = None):
Exception.__init__(self)
self.packet = packet
if packet is not None:
self.error_code = packet['ErrorCode']
else:
self.error_code = error_code
def get_error_code( self ):
return self.error_code
def get_packet( self ):
return self.packet
def __str__( self ):
key = self.error_code
if (system_errors.ERROR_MESSAGES.has_key(key)):
error_msg_short = system_errors.ERROR_MESSAGES[key][0]
error_msg_verbose = system_errors.ERROR_MESSAGES[key][1]
return 'RRP SessionError: code: 0x%x - %s - %s' % (self.error_code, error_msg_short, error_msg_verbose)
else:
return 'RRP SessionError: unknown error code: 0x%x' % (self.error_code)
################################################################################
# CONSTANTS
################################################################################
# 2.2.2 PREGISTRY_SERVER_NAME
PREGISTRY_SERVER_NAME = PWCHAR
# 2.2.3 error_status_t
error_status_t = ULONG
# 2.2.5 RRP_UNICODE_STRING
RRP_UNICODE_STRING = RPC_UNICODE_STRING
PRRP_UNICODE_STRING = PRPC_UNICODE_STRING
# 2.2.4 REGSAM
REGSAM = ULONG
KEY_QUERY_VALUE = 0x00000001
KEY_SET_VALUE = 0x00000002
KEY_CREATE_SUB_KEY = 0x00000004
KEY_ENUMERATE_SUB_KEYS = 0x00000008
KEY_CREATE_LINK = 0x00000020
KEY_WOW64_64KEY = 0x00000100
KEY_WOW64_32KEY = 0x00000200
REG_BINARY = 3
REG_DWORD = 4
REG_DWORD_LITTLE_ENDIAN = 4
REG_DWORD_BIG_ENDIAN = 5
REG_EXPAND_SZ = 2
REG_LINK = 6
REG_MULTI_SZ = 7
REG_NONE = 0
REG_QWORD = 11
REG_QWORD_LITTLE_ENDIAN = 11
REG_SZ = 1
# 3.1.5.7 BaseRegCreateKey (Opnum 6)
REG_CREATED_NEW_KEY = 0x00000001
REG_OPENED_EXISTING_KEY = 0x00000002
# 3.1.5.19 BaseRegRestoreKey (Opnum 19)
# Flags
REG_WHOLE_HIVE_VOLATILE = 0x00000001
REG_REFRESH_HIVE = 0x00000002
REG_NO_LAZY_FLUSH = 0x00000004
REG_FORCE_RESTORE = 0x00000008
################################################################################
# STRUCTURES
################################################################################
# 2.2.1 RPC_HKEY
class RPC_HKEY(NDRSTRUCT):
structure = (
('context_handle_attributes',ULONG),
('context_handle_uuid',UUID),
)
def __init__(self, data = None,isNDR64 = False):
NDRSTRUCT.__init__(self, data, isNDR64)
self['context_handle_uuid'] = '\x00'*20
# 2.2.6 RVALENT
class RVALENT(NDRSTRUCT):
structure = (
('ve_valuename',PRRP_UNICODE_STRING),
('ve_valuelen',DWORD),
('ve_valueptr',DWORD),
('ve_type',DWORD),
)
class RVALENT_ARRAY(NDRUniConformantVaryingArray):
item = RVALENT
# 2.2.9 RPC_SECURITY_DESCRIPTOR
class BYTE_ARRAY(NDRUniConformantVaryingArray):
pass
class PBYTE_ARRAY(NDRPOINTER):
referent = (
('Data', BYTE_ARRAY),
)
class RPC_SECURITY_DESCRIPTOR(NDRSTRUCT):
structure = (
('lpSecurityDescriptor',PBYTE_ARRAY),
('cbInSecurityDescriptor',DWORD),
('cbOutSecurityDescriptor',DWORD),
)
# 2.2.8 RPC_SECURITY_ATTRIBUTES
class RPC_SECURITY_ATTRIBUTES(NDRSTRUCT):
structure = (
('nLength',DWORD),
('RpcSecurityDescriptor',RPC_SECURITY_DESCRIPTOR),
('bInheritHandle',BOOLEAN),
)
class PRPC_SECURITY_ATTRIBUTES(NDRPOINTER):
referent = (
('Data', RPC_SECURITY_ATTRIBUTES),
)
################################################################################
# RPC CALLS
################################################################################
# 3.1.5.1 OpenClassesRoot (Opnum 0)
class OpenClassesRoot(NDRCALL):
opnum = 0
structure = (
('ServerName', PREGISTRY_SERVER_NAME),
('samDesired', REGSAM),
)
class OpenClassesRootResponse(NDRCALL):
structure = (
('phKey', RPC_HKEY),
('ErrorCode', error_status_t),
)
# 3.1.5.2 OpenCurrentUser (Opnum 1)
class OpenCurrentUser(NDRCALL):
opnum = 1
structure = (
('ServerName', PREGISTRY_SERVER_NAME),
('samDesired', REGSAM),
)
class OpenCurrentUserResponse(NDRCALL):
structure = (
('phKey', RPC_HKEY),
('ErrorCode', error_status_t),
)
# 3.1.5.3 OpenLocalMachine (Opnum 2)
class OpenLocalMachine(NDRCALL):
opnum = 2
structure = (
('ServerName', PREGISTRY_SERVER_NAME),
('samDesired', REGSAM),
)
class OpenLocalMachineResponse(NDRCALL):
structure = (
('phKey', RPC_HKEY),
('ErrorCode', error_status_t),
)
# 3.1.5.4 OpenPerformanceData (Opnum 3)
class OpenPerformanceData(NDRCALL):
opnum = 3
structure = (
('ServerName', PREGISTRY_SERVER_NAME),
('samDesired', REGSAM),
)
class OpenPerformanceDataResponse(NDRCALL):
structure = (
('phKey', RPC_HKEY),
('ErrorCode', error_status_t),
)
# 3.1.5.5 OpenUsers (Opnum 4)
class OpenUsers(NDRCALL):
opnum = 4
structure = (
('ServerName', PREGISTRY_SERVER_NAME),
('samDesired', REGSAM),
)
class OpenUsersResponse(NDRCALL):
structure = (
('phKey', RPC_HKEY),
('ErrorCode', error_status_t),
)
# 3.1.5.6 BaseRegCloseKey (Opnum 5)
class BaseRegCloseKey(NDRCALL):
opnum = 5
structure = (
('hKey', RPC_HKEY),
)
class BaseRegCloseKeyResponse(NDRCALL):
structure = (
('hKey', RPC_HKEY),
('ErrorCode', error_status_t),
)
# 3.1.5.7 BaseRegCreateKey (Opnum 6)
class BaseRegCreateKey(NDRCALL):
opnum = 6
structure = (
('hKey', RPC_HKEY),
('lpSubKey', RRP_UNICODE_STRING),
('lpClass', RRP_UNICODE_STRING),
('dwOptions', DWORD),
('samDesired', REGSAM),
('lpSecurityAttributes', PRPC_SECURITY_ATTRIBUTES),
('lpdwDisposition', LPULONG),
)
class BaseRegCreateKeyResponse(NDRCALL):
structure = (
('phkResult', RPC_HKEY),
('lpdwDisposition', LPULONG),
('ErrorCode', error_status_t),
)
# 3.1.5.8 BaseRegDeleteKey (Opnum 7)
class BaseRegDeleteKey(NDRCALL):
opnum = 7
structure = (
('hKey', RPC_HKEY),
('lpSubKey', RRP_UNICODE_STRING),
)
class BaseRegDeleteKeyResponse(NDRCALL):
structure = (
('ErrorCode', error_status_t),
)
# 3.1.5.9 BaseRegDeleteValue (Opnum 8)
class BaseRegDeleteValue(NDRCALL):
opnum = 8
structure = (
('hKey', RPC_HKEY),
('lpValueName', RRP_UNICODE_STRING),
)
class BaseRegDeleteValueResponse(NDRCALL):
structure = (
('ErrorCode', error_status_t),
)
# 3.1.5.10 BaseRegEnumKey (Opnum 9)
class BaseRegEnumKey(NDRCALL):
opnum = 9
structure = (
('hKey', RPC_HKEY),
('dwIndex', DWORD),
('lpNameIn', RRP_UNICODE_STRING),
('lpClassIn', PRRP_UNICODE_STRING),
('lpftLastWriteTime', PFILETIME),
)
class BaseRegEnumKeyResponse(NDRCALL):
structure = (
('lpNameOut', RRP_UNICODE_STRING),
('lplpClassOut', PRRP_UNICODE_STRING),
('lpftLastWriteTime', PFILETIME),
('ErrorCode', error_status_t),
)
# 3.1.5.11 BaseRegEnumValue (Opnum 10)
class BaseRegEnumValue(NDRCALL):
opnum = 10
structure = (
('hKey', RPC_HKEY),
('dwIndex', DWORD),
('lpValueNameIn', RRP_UNICODE_STRING),
('lpType', LPULONG),
('lpData', PBYTE_ARRAY),
('lpcbData', LPULONG),
('lpcbLen', LPULONG),
)
class BaseRegEnumValueResponse(NDRCALL):
structure = (
('lpValueNameOut', RRP_UNICODE_STRING),
('lpType', LPULONG),
('lpData', PBYTE_ARRAY),
('lpcbData', LPULONG),
('lpcbLen', LPULONG),
('ErrorCode', error_status_t),
)
# 3.1.5.12 BaseRegFlushKey (Opnum 11)
class BaseRegFlushKey(NDRCALL):
opnum = 11
structure = (
('hKey', RPC_HKEY),
)
class BaseRegFlushKeyResponse(NDRCALL):
structure = (
('ErrorCode', error_status_t),
)
# 3.1.5.13 BaseRegGetKeySecurity (Opnum 12)
class BaseRegGetKeySecurity(NDRCALL):
opnum = 12
structure = (
('hKey', RPC_HKEY),
('SecurityInformation', SECURITY_INFORMATION),
('pRpcSecurityDescriptorIn', RPC_SECURITY_DESCRIPTOR),
)
class BaseRegGetKeySecurityResponse(NDRCALL):
structure = (
('pRpcSecurityDescriptorOut', RPC_SECURITY_DESCRIPTOR),
('ErrorCode', error_status_t),
)
# 3.1.5.14 BaseRegLoadKey (Opnum 13)
class BaseRegLoadKey(NDRCALL):
opnum = 13
structure = (
('hKey', RPC_HKEY),
('lpSubKey', RRP_UNICODE_STRING),
('lpFile', RRP_UNICODE_STRING),
)
class BaseRegLoadKeyResponse(NDRCALL):
structure = (
('ErrorCode', error_status_t),
)
# 3.1.5.15 BaseRegOpenKey (Opnum 15)
class BaseRegOpenKey(NDRCALL):
opnum = 15
structure = (
('hKey', RPC_HKEY),
('lpSubKey', RRP_UNICODE_STRING),
('dwOptions', DWORD),
('samDesired', REGSAM),
)
class BaseRegOpenKeyResponse(NDRCALL):
structure = (
('phkResult', RPC_HKEY),
('ErrorCode', error_status_t),
)
# 3.1.5.16 BaseRegQueryInfoKey (Opnum 16)
class BaseRegQueryInfoKey(NDRCALL):
opnum = 16
structure = (
('hKey', RPC_HKEY),
('lpClassIn', RRP_UNICODE_STRING),
)
class BaseRegQueryInfoKeyResponse(NDRCALL):
structure = (
('lpClassOut', RPC_UNICODE_STRING),
('lpcSubKeys', DWORD),
('lpcbMaxSubKeyLen', DWORD),
('lpcbMaxClassLen', DWORD),
('lpcValues', DWORD),
('lpcbMaxValueNameLen', DWORD),
('lpcbMaxValueLen', DWORD),
('lpcbSecurityDescriptor', DWORD),
('lpftLastWriteTime', FILETIME),
('ErrorCode', error_status_t),
)
# 3.1.5.17 BaseRegQueryValue (Opnum 17)
class BaseRegQueryValue(NDRCALL):
opnum = 17
structure = (
('hKey', RPC_HKEY),
('lpValueName', RRP_UNICODE_STRING),
('lpType', LPULONG),
('lpData', PBYTE_ARRAY),
('lpcbData', LPULONG),
('lpcbLen', LPULONG),
)
class BaseRegQueryValueResponse(NDRCALL):
structure = (
('lpType', LPULONG),
('lpData', PBYTE_ARRAY),
('lpcbData', LPULONG),
('lpcbLen', LPULONG),
('ErrorCode', error_status_t),
)
# 3.1.5.18 BaseRegReplaceKey (Opnum 18)
class BaseRegReplaceKey(NDRCALL):
opnum = 18
structure = (
('hKey', RPC_HKEY),
('lpSubKey', RRP_UNICODE_STRING),
('lpNewFile', RRP_UNICODE_STRING),
('lpOldFile', RRP_UNICODE_STRING),
)
class BaseRegReplaceKeyResponse(NDRCALL):
structure = (
('ErrorCode', error_status_t),
)
# 3.1.5.19 BaseRegRestoreKey (Opnum 19)
class BaseRegRestoreKey(NDRCALL):
opnum = 19
structure = (
('hKey', RPC_HKEY),
('lpFile', RRP_UNICODE_STRING),
('Flags', DWORD),
)
class BaseRegRestoreKeyResponse(NDRCALL):
structure = (
('ErrorCode', error_status_t),
)
# 3.1.5.20 BaseRegSaveKey (Opnum 20)
class BaseRegSaveKey(NDRCALL):
opnum = 20
structure = (
('hKey', RPC_HKEY),
('lpFile', RRP_UNICODE_STRING),
('pSecurityAttributes', PRPC_SECURITY_ATTRIBUTES),
)
class BaseRegSaveKeyResponse(NDRCALL):
structure = (
('ErrorCode', error_status_t),
)
# 3.1.5.21 BaseRegSetKeySecurity (Opnum 21)
class BaseRegSetKeySecurity(NDRCALL):
opnum = 21
structure = (
('hKey', RPC_HKEY),
('SecurityInformation', SECURITY_INFORMATION),
('pRpcSecurityDescriptor', RPC_SECURITY_DESCRIPTOR),
)
class BaseRegSetKeySecurityResponse(NDRCALL):
structure = (
('ErrorCode', error_status_t),
)
# 3.1.5.22 BaseRegSetValue (Opnum 22)
class BaseRegSetValue(NDRCALL):
opnum = 22
structure = (
('hKey', RPC_HKEY),
('lpValueName', RRP_UNICODE_STRING),
('dwType', DWORD),
('lpData', NDRUniConformantArray),
('cbData', DWORD),
)
class BaseRegSetValueResponse(NDRCALL):
structure = (
('ErrorCode', error_status_t),
)
# 3.1.5.23 BaseRegUnLoadKey (Opnum 23)
class BaseRegUnLoadKey(NDRCALL):
opnum = 23
structure = (
('hKey', RPC_HKEY),
('lpSubKey', RRP_UNICODE_STRING),
)
class BaseRegUnLoadKeyResponse(NDRCALL):
structure = (
('ErrorCode', error_status_t),
)
# 3.1.5.24 BaseRegGetVersion (Opnum 26)
class BaseRegGetVersion(NDRCALL):
opnum = 26
structure = (
('hKey', RPC_HKEY),
)
class BaseRegGetVersionResponse(NDRCALL):
structure = (
('lpdwVersion', DWORD),
('ErrorCode', error_status_t),
)
# 3.1.5.25 OpenCurrentConfig (Opnum 27)
class OpenCurrentConfig(NDRCALL):
opnum = 27
structure = (
('ServerName', PREGISTRY_SERVER_NAME),
('samDesired', REGSAM),
)
class OpenCurrentConfigResponse(NDRCALL):
structure = (
('phKey', RPC_HKEY),
('ErrorCode', error_status_t),
)
# 3.1.5.26 BaseRegQueryMultipleValues (Opnum 29)
class BaseRegQueryMultipleValues(NDRCALL):
opnum = 29
structure = (
('hKey', RPC_HKEY),
('val_listIn', RVALENT_ARRAY),
('num_vals', DWORD),
('lpvalueBuf', PBYTE_ARRAY),
('ldwTotsize', DWORD),
)
class BaseRegQueryMultipleValuesResponse(NDRCALL):
structure = (
('val_listOut', RVALENT_ARRAY),
('lpvalueBuf', PBYTE_ARRAY),
('ldwTotsize', DWORD),
('ErrorCode', error_status_t),
)
# 3.1.5.27 BaseRegSaveKeyEx (Opnum 31)
class BaseRegSaveKeyEx(NDRCALL):
opnum = 31
structure = (
('hKey', RPC_HKEY),
('lpFile', RRP_UNICODE_STRING),
('pSecurityAttributes', PRPC_SECURITY_ATTRIBUTES),
('Flags', DWORD),
)
class BaseRegSaveKeyExResponse(NDRCALL):
structure = (
('ErrorCode', error_status_t),
)
# 3.1.5.28 OpenPerformanceText (Opnum 32)
class OpenPerformanceText(NDRCALL):
opnum = 32
structure = (
('ServerName', PREGISTRY_SERVER_NAME),
('samDesired', REGSAM),
)
class OpenPerformanceTextResponse(NDRCALL):
structure = (
('phKey', RPC_HKEY),
('ErrorCode', error_status_t),
)
# 3.1.5.29 OpenPerformanceNlsText (Opnum 33)
class OpenPerformanceNlsText(NDRCALL):
opnum = 33
structure = (
('ServerName', PREGISTRY_SERVER_NAME),
('samDesired', REGSAM),
)
class OpenPerformanceNlsTextResponse(NDRCALL):
structure = (
('phKey', RPC_HKEY),
('ErrorCode', error_status_t),
)
# 3.1.5.30 BaseRegQueryMultipleValues2 (Opnum 34)
class BaseRegQueryMultipleValues2(NDRCALL):
opnum = 34
structure = (
('hKey', RPC_HKEY),
('val_listIn', RVALENT_ARRAY),
('num_vals', DWORD),
('lpvalueBuf', PBYTE_ARRAY),
('ldwTotsize', DWORD),
)
class BaseRegQueryMultipleValues2Response(NDRCALL):
structure = (
('val_listOut', RVALENT_ARRAY),
('lpvalueBuf', PBYTE_ARRAY),
('ldwRequiredSize', DWORD),
('ErrorCode', error_status_t),
)
# 3.1.5.31 BaseRegDeleteKeyEx (Opnum 35)
class BaseRegDeleteKeyEx(NDRCALL):
opnum = 35
structure = (
('hKey', RPC_HKEY),
('lpSubKey', RRP_UNICODE_STRING),
('AccessMask', REGSAM),
('Reserved', DWORD),
)
class BaseRegDeleteKeyExResponse(NDRCALL):
structure = (
('ErrorCode', error_status_t),
)
################################################################################
# OPNUMs and their corresponding structures
################################################################################
OPNUMS = {
0 : (OpenClassesRoot, OpenClassesRootResponse),
1 : (OpenCurrentUser, OpenCurrentUserResponse),
2 : (OpenLocalMachine, OpenLocalMachineResponse),
3 : (OpenPerformanceData, OpenPerformanceDataResponse),
4 : (OpenUsers, OpenUsersResponse),
5 : (BaseRegCloseKey, BaseRegCloseKeyResponse),
6 : (BaseRegCreateKey, BaseRegCreateKeyResponse),
7 : (BaseRegDeleteKey, BaseRegDeleteKeyResponse),
8 : (BaseRegDeleteValue, BaseRegDeleteValueResponse),
9 : (BaseRegEnumKey, BaseRegEnumKeyResponse),
10 : (BaseRegEnumValue, BaseRegEnumValueResponse),
11 : (BaseRegFlushKey, BaseRegFlushKeyResponse),
12 : (BaseRegGetKeySecurity, BaseRegGetKeySecurityResponse),
13 : (BaseRegLoadKey, BaseRegLoadKeyResponse),
15 : (BaseRegOpenKey, BaseRegOpenKeyResponse),
16 : (BaseRegQueryInfoKey, BaseRegQueryInfoKeyResponse),
17 : (BaseRegQueryValue, BaseRegQueryValueResponse),
18 : (BaseRegReplaceKey, BaseRegReplaceKeyResponse),
19 : (BaseRegRestoreKey, BaseRegRestoreKeyResponse),
20 : (BaseRegSaveKey, BaseRegSaveKeyResponse),
21 : (BaseRegSetKeySecurity, BaseRegSetKeySecurityResponse),
22 : (BaseRegSetValue, BaseRegSetValueResponse),
23 : (BaseRegUnLoadKey, BaseRegUnLoadKeyResponse),
26 : (BaseRegGetVersion, BaseRegGetVersionResponse),
27 : (OpenCurrentConfig, OpenCurrentConfigResponse),
29 : (BaseRegQueryMultipleValues, BaseRegQueryMultipleValuesResponse),
31 : (BaseRegSaveKeyEx, BaseRegSaveKeyExResponse),
32 : (OpenPerformanceText, OpenPerformanceTextResponse),
33 : (OpenPerformanceNlsText, OpenPerformanceNlsTextResponse),
34 : (BaseRegQueryMultipleValues2, BaseRegQueryMultipleValues2Response),
35 : (BaseRegDeleteKeyEx, BaseRegDeleteKeyExResponse),
}
################################################################################
# HELPER FUNCTIONS
################################################################################
def checkNullString(string):
if string == NULL:
return string
if string[-1:] != '\x00':
return string + '\x00'
else:
return string
def unpackValue(valueType, value):
if valueType == REG_DWORD:
retData = unpack('<L', ''.join(value))[0]
elif valueType == REG_DWORD_BIG_ENDIAN:
retData = unpack('>L', ''.join(value))[0]
elif valueType == REG_EXPAND_SZ:
retData = ''.join(value).decode('utf-16le')
elif valueType == REG_MULTI_SZ:
retData = ''.join(value).decode('utf-16le')
elif valueType == REG_QWORD:
retData = unpack('<Q', ''.join(value))[0]
elif valueType == REG_QWORD_LITTLE_ENDIAN:
retData = unpack('>Q', ''.join(value))[0]
elif valueType == REG_SZ:
retData = ''.join(value).decode('utf-16le')
else:
retData = ''.join(value)
return retData
def hOpenClassesRoot(dce, samDesired = MAXIMUM_ALLOWED):
request = OpenClassesRoot()
request['ServerName'] = NULL
request['samDesired'] = samDesired
return dce.request(request)
def hOpenCurrentUser(dce, samDesired = MAXIMUM_ALLOWED):
request = OpenCurrentUser()
request['ServerName'] = NULL
request['samDesired'] = samDesired
return dce.request(request)
def hOpenLocalMachine(dce, samDesired = MAXIMUM_ALLOWED):
request = OpenLocalMachine()
request['ServerName'] = NULL
request['samDesired'] = samDesired
return dce.request(request)
def hOpenPerformanceData(dce, samDesired = MAXIMUM_ALLOWED):
request = OpenPerformanceData()
request['ServerName'] = NULL
request['samDesired'] = samDesired
return dce.request(request)
def hOpenUsers(dce, samDesired = MAXIMUM_ALLOWED):
request = OpenUsers()
request['ServerName'] = NULL
request['samDesired'] = samDesired
return dce.request(request)
def hBaseRegCloseKey(dce, hKey):
request = BaseRegCloseKey()
request['hKey'] = hKey
return dce.request(request)
def hBaseRegCreateKey(dce, hKey, lpSubKey, lpClass = NULL, dwOptions = 0x00000001, samDesired = MAXIMUM_ALLOWED, lpSecurityAttributes = NULL, lpdwDisposition = REG_CREATED_NEW_KEY):
request = BaseRegCreateKey()
request['hKey'] = hKey
request['lpSubKey'] = checkNullString(lpSubKey)
request['lpClass'] = checkNullString(lpClass)
request['dwOptions'] = dwOptions
request['samDesired'] = samDesired
if lpSecurityAttributes == NULL:
request['lpSecurityAttributes']['RpcSecurityDescriptor']['lpSecurityDescriptor'] = NULL
else:
request['lpSecurityAttributes'] = lpSecurityAttributes
request['lpdwDisposition'] = lpdwDisposition
return dce.request(request)
def hBaseRegDeleteKey(dce, hKey, lpSubKey):
request = BaseRegDeleteKey()
request['hKey'] = hKey
request['lpSubKey'] = checkNullString(lpSubKey)
return dce.request(request)
def hBaseRegEnumKey(dce, hKey, dwIndex, lpftLastWriteTime = NULL):
request = BaseRegEnumKey()
request['hKey'] = hKey
request['dwIndex'] = dwIndex
request.fields['lpNameIn'].fields['MaximumLength'] = 1024
request.fields['lpNameIn'].fields['Data'].fields['Data'].fields['MaximumCount'] = 1024/2
request['lpClassIn'] = ' '* 64
request['lpftLastWriteTime'] = lpftLastWriteTime
return dce.request(request)
def hBaseRegEnumValue(dce, hKey, dwIndex):
# ToDo, check the result to see whether we need to
# have a bigger buffer for the data to receive
request = BaseRegEnumValue()
request['hKey'] = hKey
request['dwIndex'] = dwIndex
request['lpValueNameIn'] = ' '*128
request['lpData'] = ' '*128
request['lpcbData'] = 128
request['lpcbLen'] = 128
return dce.request(request)
def hBaseRegFlushKey(dce, hKey):
request = BaseRegFlushKey()
request['hKey'] = hKey
return dce.request(request)
def hBaseRegGetKeySecurity(dce, hKey, securityInformation = OWNER_SECURITY_INFORMATION ):
request = BaseRegGetKeySecurity()
request['hKey'] = hKey
request['SecurityInformation'] = securityInformation
request['pRpcSecurityDescriptorIn']['lpSecurityDescriptor'] = NULL
request['pRpcSecurityDescriptorIn']['cbInSecurityDescriptor'] = 1024
return dce.request(request)
def hBaseRegLoadKey(dce, hKey, lpSubKey, lpFile):
request = BaseRegLoadKey()
request['hKey'] = hKey
request['lpSubKey'] = checkNullString(lpSubKey)
request['lpFile'] = checkNullString(lpFile)
return dce.request(request)
def hBaseRegUnLoadKey(dce, hKey, lpSubKey):
request = BaseRegUnLoadKey()
request['hKey'] = hKey
request['lpSubKey'] = checkNullString(lpSubKey)
return dce.request(request)
def hBaseRegOpenKey(dce, hKey, lpSubKey, dwOptions=0x00000001, samDesired = MAXIMUM_ALLOWED):
request = BaseRegOpenKey()
request['hKey'] = hKey
request['lpSubKey'] = checkNullString(lpSubKey)
request['dwOptions'] = dwOptions
request['samDesired'] = samDesired
return dce.request(request)
def hBaseRegQueryInfoKey(dce, hKey):
request = BaseRegQueryInfoKey()
request['hKey'] = hKey
request['lpClassIn'] = NULL
# Not the cleanest way, but oh well
request.fields['lpClassIn'].fields['MaximumLength'] = 1024
return dce.request(request)
def hBaseRegQueryValue(dce, hKey, lpValueName):
# ToDo, check the result to see whether we need to
# have a bigger buffer for the data to receive
request = BaseRegQueryValue()
request['hKey'] = hKey
request['lpValueName'] = checkNullString(lpValueName)
request['lpData'] = ' '*512
request['lpcbData'] = 512
request['lpcbLen'] = 512
resp = dce.request(request)
# Returns
# ( dataType, data )
return resp['lpType'], unpackValue(resp['lpType'], resp['lpData'])
def hBaseRegReplaceKey(dce, hKey, lpSubKey, lpNewFile, lpOldFile):
request = BaseRegReplaceKey()
request['hKey'] = hKey
request['lpSubKey'] = checkNullString(lpSubKey)
request['lpNewFile'] = checkNullString(lpNewFile)
request['lpOldFile'] = checkNullString(lpOldFile)
return dce.request(request)
def hBaseRegRestoreKey(dce, hKey, lpFile, flags=REG_REFRESH_HIVE):
request = BaseRegRestoreKey()
request['hKey'] = hKey
request['lpFile'] = checkNullString(lpFile)
request['Flags'] = flags
return dce.request(request)
def hBaseRegSaveKey(dce, hKey, lpFile, pSecurityAttributes = NULL):
request = BaseRegSaveKey()
request['hKey'] = hKey
request['lpFile'] = checkNullString(lpFile)
request['pSecurityAttributes'] = pSecurityAttributes
return dce.request(request)
def hBaseRegSetValue(dce, hKey, lpValueName, dwType, lpData):
request = BaseRegSetValue()
request['hKey'] = hKey
request['lpValueName'] = checkNullString(lpValueName)
request['dwType'] = dwType
request['lpData'] = lpData.encode('utf-16le')
request['cbData'] = len(request['lpData'])
return dce.request(request)
def hBaseRegGetVersion(dce, hKey):
request = BaseRegGetVersion()
request['hKey'] = hKey
return dce.request(request)
def hOpenCurrentConfig(dce, samDesired = MAXIMUM_ALLOWED):
request = OpenCurrentConfig()
request['ServerName'] = NULL
request['samDesired'] = samDesired
return dce.request(request)
def hBaseRegQueryMultipleValues(dce, hKey, val_listIn):
# ToDo, check the result to see whether we need to
# have a bigger buffer for the data to receive
request = BaseRegQueryMultipleValues()
request['hKey'] = hKey
for item in val_listIn:
itemn = RVALENT()
itemn['ve_valuename'] = checkNullString(item['ValueName'])
itemn['ve_valuelen'] = len(itemn['ve_valuename'])
itemn['ve_valueptr'] = NULL
itemn['ve_type'] = item['ValueType']
request['val_listIn'].append(itemn)
request['num_vals'] = len(request['val_listIn'])
request['lpvalueBuf'] = list(' '*128)
request['ldwTotsize'] = 128
resp = dce.request(request)
retVal = list()
for item in resp['val_listOut']:
itemn = {}
itemn['ValueName'] = item['ve_valuename']
itemn['ValueData'] = unpackValue(item['ve_type'], resp['lpvalueBuf'][item['ve_valueptr'] : item['ve_valueptr']+item['ve_valuelen']])
retVal.append(itemn)
return retVal
def hBaseRegSaveKeyEx(dce, hKey, lpFile, pSecurityAttributes = NULL, flags=1):
request = BaseRegSaveKeyEx()
request['hKey'] = hKey
request['lpFile'] = checkNullString(lpFile)
request['pSecurityAttributes'] = pSecurityAttributes
request['Flags'] = flags
return dce.request(request)
def hOpenPerformanceText(dce, samDesired = MAXIMUM_ALLOWED):
request = OpenPerformanceText()
request['ServerName'] = NULL
request['samDesired'] = samDesired
return dce.request(request)
def hOpenPerformanceNlsText(dce, samDesired = MAXIMUM_ALLOWED):
request = OpenPerformanceNlsText()
request['ServerName'] = NULL
request['samDesired'] = samDesired
return dce.request(request)
def hBaseRegDeleteValue(dce, hKey, lpValueName):
request = BaseRegDeleteValue()
request['hKey'] = hKey
request['lpValueName'] = checkNullString(lpValueName)
return dce.request(request)
|
|
from __future__ import absolute_import
from __future__ import with_statement
import sys
import types
from contextlib import contextmanager
from mock import Mock, patch
from celery import states
from celery.backends.cache import CacheBackend, DummyClient
from celery.exceptions import ImproperlyConfigured
from celery.registry import tasks
from celery.result import AsyncResult
from celery.task import subtask
from celery.utils import uuid
from celery.utils.encoding import str_to_bytes
from celery.tests.utils import Case, mask_modules, reset_modules
class SomeClass(object):
def __init__(self, data):
self.data = data
class test_CacheBackend(Case):
def setUp(self):
self.tb = CacheBackend(backend="memory://")
self.tid = uuid()
def test_mark_as_done(self):
self.assertEqual(self.tb.get_status(self.tid), states.PENDING)
self.assertIsNone(self.tb.get_result(self.tid))
self.tb.mark_as_done(self.tid, 42)
self.assertEqual(self.tb.get_status(self.tid), states.SUCCESS)
self.assertEqual(self.tb.get_result(self.tid), 42)
def test_is_pickled(self):
result = {"foo": "baz", "bar": SomeClass(12345)}
self.tb.mark_as_done(self.tid, result)
# is serialized properly.
rindb = self.tb.get_result(self.tid)
self.assertEqual(rindb.get("foo"), "baz")
self.assertEqual(rindb.get("bar").data, 12345)
def test_mark_as_failure(self):
try:
raise KeyError("foo")
except KeyError, exception:
self.tb.mark_as_failure(self.tid, exception)
self.assertEqual(self.tb.get_status(self.tid), states.FAILURE)
self.assertIsInstance(self.tb.get_result(self.tid), KeyError)
def test_on_chord_apply(self):
tb = CacheBackend(backend="memory://")
tb.on_chord_apply("setid", [])
@patch("celery.result.TaskSetResult")
def test_on_chord_part_return(self, setresult):
tb = CacheBackend(backend="memory://")
deps = Mock()
deps.total = 2
setresult.restore.return_value = deps
task = Mock()
task.name = "foobarbaz"
try:
tasks["foobarbaz"] = task
task.request.chord = subtask(task)
task.request.taskset = "setid"
tb.on_chord_apply(task.request.taskset, [])
self.assertFalse(deps.join.called)
tb.on_chord_part_return(task)
self.assertFalse(deps.join.called)
tb.on_chord_part_return(task)
deps.join.assert_called_with(propagate=False)
deps.delete.assert_called_with()
finally:
tasks.pop("foobarbaz")
def test_mget(self):
self.tb.set("foo", 1)
self.tb.set("bar", 2)
self.assertDictEqual(self.tb.mget(["foo", "bar"]),
{"foo": 1, "bar": 2})
def test_forget(self):
self.tb.mark_as_done(self.tid, {"foo": "bar"})
x = AsyncResult(self.tid, backend=self.tb)
x.forget()
self.assertIsNone(x.result)
def test_process_cleanup(self):
self.tb.process_cleanup()
def test_expires_as_int(self):
tb = CacheBackend(backend="memory://", expires=10)
self.assertEqual(tb.expires, 10)
def test_unknown_backend_raises_ImproperlyConfigured(self):
with self.assertRaises(ImproperlyConfigured):
CacheBackend(backend="unknown://")
class MyMemcachedStringEncodingError(Exception):
pass
class MemcachedClient(DummyClient):
def set(self, key, value, *args, **kwargs):
if isinstance(key, unicode):
raise MyMemcachedStringEncodingError(
"Keys must be str()'s, not unicode. Convert your unicode "
"strings using mystring.encode(charset)!")
return super(MemcachedClient, self).set(key, value, *args, **kwargs)
class MockCacheMixin(object):
@contextmanager
def mock_memcache(self):
memcache = types.ModuleType("memcache")
memcache.Client = MemcachedClient
memcache.Client.__module__ = memcache.__name__
prev, sys.modules["memcache"] = sys.modules.get("memcache"), memcache
yield True
if prev is not None:
sys.modules["memcache"] = prev
@contextmanager
def mock_pylibmc(self):
pylibmc = types.ModuleType("pylibmc")
pylibmc.Client = MemcachedClient
pylibmc.Client.__module__ = pylibmc.__name__
prev = sys.modules.get("pylibmc")
sys.modules["pylibmc"] = pylibmc
yield True
if prev is not None:
sys.modules["pylibmc"] = prev
class test_get_best_memcache(Case, MockCacheMixin):
def test_pylibmc(self):
with self.mock_pylibmc():
with reset_modules("celery.backends.cache"):
from celery.backends import cache
cache._imp = [None]
self.assertEqual(cache.get_best_memcache().__module__,
"pylibmc")
def test_memcache(self):
with self.mock_memcache():
with reset_modules("celery.backends.cache"):
with mask_modules("pylibmc"):
from celery.backends import cache
cache._imp = [None]
self.assertEqual(cache.get_best_memcache().__module__,
"memcache")
def test_no_implementations(self):
with mask_modules("pylibmc", "memcache"):
with reset_modules("celery.backends.cache"):
from celery.backends import cache
cache._imp = [None]
with self.assertRaises(ImproperlyConfigured):
cache.get_best_memcache()
def test_cached(self):
with self.mock_pylibmc():
with reset_modules("celery.backends.cache"):
from celery.backends import cache
cache._imp = [None]
cache.get_best_memcache(behaviors={"foo": "bar"})
self.assertTrue(cache._imp[0])
cache.get_best_memcache()
def test_backends(self):
from celery.backends.cache import backends
for name, fun in backends.items():
self.assertTrue(fun())
class test_memcache_key(Case, MockCacheMixin):
def test_memcache_unicode_key(self):
with self.mock_memcache():
with reset_modules("celery.backends.cache"):
with mask_modules("pylibmc"):
from celery.backends import cache
cache._imp = [None]
task_id, result = unicode(uuid()), 42
b = cache.CacheBackend(backend='memcache')
b.store_result(task_id, result, status=states.SUCCESS)
self.assertEqual(b.get_result(task_id), result)
def test_memcache_bytes_key(self):
with self.mock_memcache():
with reset_modules("celery.backends.cache"):
with mask_modules("pylibmc"):
from celery.backends import cache
cache._imp = [None]
task_id, result = str_to_bytes(uuid()), 42
b = cache.CacheBackend(backend='memcache')
b.store_result(task_id, result, status=states.SUCCESS)
self.assertEqual(b.get_result(task_id), result)
def test_pylibmc_unicode_key(self):
with reset_modules("celery.backends.cache"):
with self.mock_pylibmc():
from celery.backends import cache
cache._imp = [None]
task_id, result = unicode(uuid()), 42
b = cache.CacheBackend(backend='memcache')
b.store_result(task_id, result, status=states.SUCCESS)
self.assertEqual(b.get_result(task_id), result)
def test_pylibmc_bytes_key(self):
with reset_modules("celery.backends.cache"):
with self.mock_pylibmc():
from celery.backends import cache
cache._imp = [None]
task_id, result = str_to_bytes(uuid()), 42
b = cache.CacheBackend(backend='memcache')
b.store_result(task_id, result, status=states.SUCCESS)
self.assertEqual(b.get_result(task_id), result)
|
|
#!/usr/bin/python
from bcc import BPF
from ctypes import *
import argparse
import os
from time import sleep,time,localtime,asctime
import types
# pre defines -------------------------------
ROOT_PATH = "/sys/class/net"
IFNAMSIZ = 16
COL_WIDTH = 10
MAX_QUEUE_NUM = 1024
EBPF_FILE = "netqtop.c"
# structure for network interface name array
class Devname(Structure):
_fields_=[
('name', c_char*IFNAMSIZ)
]
################## printer for results ###################
def to_str(num):
s = ""
if num > 1000000:
return str(round(num/(1024*1024.0), 2)) + 'M'
elif num > 1000:
return str(round(num/1024.0, 2)) + 'K'
else:
if type(num) == types.FloatType:
return str(round(num, 2))
else:
return str(num)
def print_table(table, qnum):
global print_interval
# ---- print headers ----------------
headers = [
"QueueID",
"avg_size",
"[0, 64)",
"[64, 512)",
"[512, 2K)",
"[2K, 16K)",
"[16K, 64K)"
]
if args.throughput:
headers.append("BPS")
headers.append("PPS")
for hd in headers:
print(hd.center(COL_WIDTH)),
print
# ------- calculates --------------
qids=[]
tBPS = 0
tPPS = 0
tAVG = 0
tGroup = [0,0,0,0,0]
tpkt = 0
tlen = 0
for k, v in table.items():
qids += [k.value]
tlen += v.total_pkt_len
tpkt += v.num_pkt
tGroup[0] += v.size_64B
tGroup[1] += v.size_512B
tGroup[2] += v.size_2K
tGroup[3] += v.size_16K
tGroup[4] += v.size_64K
tBPS = tlen / print_interval
tPPS = tpkt / print_interval
if tpkt != 0:
tAVG = tlen / tpkt
# -------- print table --------------
for k in range(qnum):
if k in qids:
item = table[c_ushort(k)]
data = [
k,
item.total_pkt_len,
item.num_pkt,
item.size_64B,
item.size_512B,
item.size_2K,
item.size_16K,
item.size_64K
]
else:
data = [k,0,0,0,0,0,0,0]
# print a line per queue
avg = 0
if data[2] != 0:
avg = data[1] / data[2]
print("%5d %11s %10s %10s %10s %10s %10s" % (
data[0],
to_str(avg),
to_str(data[3]),
to_str(data[4]),
to_str(data[5]),
to_str(data[6]),
to_str(data[7])
)),
if args.throughput:
BPS = data[1] / print_interval
PPS = data[2] / print_interval
print("%10s %10s" % (
to_str(BPS),
to_str(PPS)
))
else:
print
# ------- print total --------------
print(" Total %10s %10s %10s %10s %10s %10s" % (
to_str(tAVG),
to_str(tGroup[0]),
to_str(tGroup[1]),
to_str(tGroup[2]),
to_str(tGroup[3]),
to_str(tGroup[4])
)),
if args.throughput:
print("%10s %10s" % (
to_str(tBPS),
to_str(tPPS)
))
else:
print
def print_result(b):
# --------- print tx queues ---------------
print(asctime(localtime(time())))
print("TX")
table = b['tx_q']
print_table(table, tx_num)
b['tx_q'].clear()
# --------- print rx queues ---------------
print("")
print("RX")
table = b['rx_q']
print_table(table, rx_num)
b['rx_q'].clear()
if args.throughput:
print("-"*95)
else:
print("-"*76)
############## specify network interface #################
parser = argparse.ArgumentParser(description="")
parser.add_argument("--name", "-n", type=str, default="")
parser.add_argument("--interval", "-i", type=float, default=1)
parser.add_argument("--throughput", "-t", action="store_true")
parser.add_argument("--ebpf", action="store_true", help=argparse.SUPPRESS)
args = parser.parse_args()
if args.ebpf:
with open(EBPF_FILE) as fileobj:
progtxt = fileobj.read()
print(progtxt)
exit()
if args.name == "":
print ("Please specify a network interface.")
exit()
else:
dev_name = args.name
if len(dev_name) > IFNAMSIZ-1:
print ("NIC name too long")
exit()
print_interval = args.interval + 0.0
if print_interval == 0:
print "print interval must be non-zero"
exit()
################ get number of queues #####################
tx_num = 0
rx_num = 0
path = ROOT_PATH + "/" + dev_name + "/queues"
if not os.path.exists(path):
print "Net interface", dev_name, "does not exits."
exit()
list = os.listdir(path)
for s in list:
if s[0] == 'r':
rx_num += 1
if s[0] == 't':
tx_num += 1
if tx_num > MAX_QUEUE_NUM or rx_num > MAX_QUEUE_NUM:
print "number of queues over 1024 is not supported."
exit()
################## start tracing ##################
b = BPF(src_file = EBPF_FILE)
# --------- set hash array --------
devname_map = b['name_map']
_name = Devname()
_name.name = dev_name
devname_map[0] = _name
while 1:
try:
sleep(print_interval)
print_result(b)
except KeyboardInterrupt:
exit()
|
|
# Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
This script will migrate the database of an openvswitch or linuxbridge
plugin so that it can be used with the ml2 plugin.
Known Limitations:
- THIS SCRIPT IS DESTRUCTIVE! Make sure to backup your
Neutron database before running this script, in case anything goes
wrong.
- It will be necessary to upgrade the database to the target release
via neutron-db-manage before attempting to migrate to ml2.
Initially, only the icehouse release is supported.
- This script does not automate configuration migration.
Example usage:
python -m neutron.db.migration.migrate_to_ml2 openvswitch \
mysql://login:pass@127.0.0.1/neutron
Note that migration of tunneling state will only be attempted if the
--tunnel-type parameter is provided.
To manually test migration from ovs to ml2 with devstack:
- stack with Q_PLUGIN=openvswitch
- boot an instance and validate connectivity
- stop the neutron service and all agents
- run the neutron-migrate-to-ml2 script
- update /etc/neutron/neutron.conf as follows:
core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
- Create /etc/neutron/plugins/ml2/ml2_conf.ini and ensure that:
- ml2.mechanism_drivers includes 'openvswitch'
- ovs.local_ip is set correctly
- database.connection is set correctly
- Start the neutron service with the ml2 config file created in
the previous step in place of the openvswitch config file
- Start all the agents
- verify that the booted instance still has connectivity
- boot a second instance and validate connectivity
"""
import argparse
from oslo.db.sqlalchemy import session
import sqlalchemy as sa
from neutron.extensions import portbindings
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2.drivers import type_vxlan
# Migration targets
LINUXBRIDGE = 'linuxbridge'
OPENVSWITCH = 'openvswitch'
# Releases
ICEHOUSE = 'icehouse'
JUNO = 'juno'
SUPPORTED_SCHEMA_VERSIONS = [ICEHOUSE, JUNO]
def check_db_schema_version(engine, metadata):
"""Check that current version of the db schema is supported."""
version_table = sa.Table(
'alembic_version', metadata, autoload=True, autoload_with=engine)
versions = [v[0] for v in engine.execute(version_table.select())]
if not versions:
raise ValueError(_("Missing version in alembic_versions table"))
elif len(versions) > 1:
raise ValueError(_("Multiple versions in alembic_versions table: %s")
% versions)
current_version = versions[0]
if current_version not in SUPPORTED_SCHEMA_VERSIONS:
raise SystemError(_("Unsupported database schema %(current)s. "
"Please migrate your database to one of following "
"versions: %(supported)s")
% {'current': current_version,
'supported': ', '.join(SUPPORTED_SCHEMA_VERSIONS)}
)
# Duplicated from neutron.plugins.linuxbridge.common.constants to
# avoid having any dependency on the linuxbridge plugin being
# installed.
def interpret_vlan_id(vlan_id):
"""Return (network_type, segmentation_id) tuple for encoded vlan_id."""
FLAT_VLAN_ID = -1
LOCAL_VLAN_ID = -2
if vlan_id == LOCAL_VLAN_ID:
return (p_const.TYPE_LOCAL, None)
elif vlan_id == FLAT_VLAN_ID:
return (p_const.TYPE_FLAT, None)
else:
return (p_const.TYPE_VLAN, vlan_id)
class BaseMigrateToMl2(object):
def __init__(self, vif_type, driver_type, segment_table_name,
vlan_allocation_table_name, old_tables):
self.vif_type = vif_type
self.driver_type = driver_type
self.segment_table_name = segment_table_name
self.vlan_allocation_table_name = vlan_allocation_table_name
self.old_tables = old_tables
def __call__(self, connection_url, save_tables=False, tunnel_type=None,
vxlan_udp_port=None):
engine = session.create_engine(connection_url)
metadata = sa.MetaData()
check_db_schema_version(engine, metadata)
self.define_ml2_tables(metadata)
# Autoload the ports table to ensure that foreign keys to it and
# the network table can be created for the new tables.
sa.Table('ports', metadata, autoload=True, autoload_with=engine)
metadata.create_all(engine)
self.migrate_network_segments(engine, metadata)
if tunnel_type:
self.migrate_tunnels(engine, tunnel_type, vxlan_udp_port)
self.migrate_vlan_allocations(engine)
self.migrate_port_bindings(engine, metadata)
self.drop_old_tables(engine, save_tables)
def migrate_segment_dict(self, binding):
binding['id'] = uuidutils.generate_uuid()
def migrate_network_segments(self, engine, metadata):
# Migrating network segments requires loading the data to python
# so that a uuid can be generated for each segment.
source_table = sa.Table(self.segment_table_name, metadata,
autoload=True, autoload_with=engine)
source_segments = engine.execute(source_table.select())
ml2_segments = [dict(x) for x in source_segments]
for segment in ml2_segments:
self.migrate_segment_dict(segment)
if ml2_segments:
ml2_network_segments = metadata.tables['ml2_network_segments']
engine.execute(ml2_network_segments.insert(), ml2_segments)
def migrate_tunnels(self, engine, tunnel_type, vxlan_udp_port=None):
"""Override this method to perform plugin-specific tunnel migration."""
pass
def migrate_vlan_allocations(self, engine):
engine.execute(("""
INSERT INTO ml2_vlan_allocations
SELECT physical_network, vlan_id, allocated
FROM %(source_table)s
WHERE allocated = TRUE
""") % {'source_table': self.vlan_allocation_table_name})
def get_port_segment_map(self, engine):
"""Retrieve a mapping of port id to segment id.
The monolithic plugins only support a single segment per
network, so the segment id can be uniquely identified by
the network associated with a given port.
"""
port_segments = engine.execute("""
SELECT ports_network.port_id, ml2_network_segments.id AS segment_id
FROM ml2_network_segments, (
SELECT portbindingports.port_id, ports.network_id
FROM portbindingports, ports
WHERE portbindingports.port_id = ports.id
) AS ports_network
WHERE ml2_network_segments.network_id = ports_network.network_id
""")
return dict(x for x in port_segments)
def migrate_port_bindings(self, engine, metadata):
port_segment_map = self.get_port_segment_map(engine)
port_binding_ports = sa.Table('portbindingports', metadata,
autoload=True, autoload_with=engine)
source_bindings = engine.execute(port_binding_ports.select())
ml2_bindings = [dict(x) for x in source_bindings]
for binding in ml2_bindings:
binding['vif_type'] = self.vif_type
binding['driver'] = self.driver_type
segment = port_segment_map.get(binding['port_id'])
if segment:
binding['segment'] = segment
if ml2_bindings:
ml2_port_bindings = metadata.tables['ml2_port_bindings']
engine.execute(ml2_port_bindings.insert(), ml2_bindings)
class BaseMigrateToMl2_IcehouseMixin(object):
"""A mixin to ensure ml2 database schema state for Icehouse.
This classes the missing tables for Icehouse schema revisions. In Juno,
the schema state has been healed, so we do not need to run these.
"""
def drop_old_tables(self, engine, save_tables=False):
if save_tables:
return
old_tables = self.old_tables + [self.vlan_allocation_table_name,
self.segment_table_name]
for table_name in old_tables:
engine.execute('DROP TABLE %s' % table_name)
def define_ml2_tables(self, metadata):
sa.Table(
'arista_provisioned_nets', metadata,
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('network_id', sa.String(length=36), nullable=True),
sa.Column('segmentation_id', sa.Integer(),
autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id'),
)
sa.Table(
'arista_provisioned_vms', metadata,
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('vm_id', sa.String(length=255), nullable=True),
sa.Column('host_id', sa.String(length=255), nullable=True),
sa.Column('port_id', sa.String(length=36), nullable=True),
sa.Column('network_id', sa.String(length=36), nullable=True),
sa.PrimaryKeyConstraint('id'),
)
sa.Table(
'arista_provisioned_tenants', metadata,
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.PrimaryKeyConstraint('id'),
)
sa.Table(
'cisco_ml2_nexusport_bindings', metadata,
sa.Column('binding_id', sa.Integer(), nullable=False),
sa.Column('port_id', sa.String(length=255), nullable=True),
sa.Column('vlan_id', sa.Integer(), autoincrement=False,
nullable=False),
sa.Column('switch_ip', sa.String(length=255), nullable=True),
sa.Column('instance_id', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('binding_id'),
)
sa.Table(
'cisco_ml2_credentials', metadata,
sa.Column('credential_id', sa.String(length=255), nullable=True),
sa.Column('tenant_id', sa.String(length=255), nullable=False),
sa.Column('credential_name', sa.String(length=255),
nullable=False),
sa.Column('user_name', sa.String(length=255), nullable=True),
sa.Column('password', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('tenant_id', 'credential_name'),
)
sa.Table(
'ml2_flat_allocations', metadata,
sa.Column('physical_network', sa.String(length=64),
nullable=False),
sa.PrimaryKeyConstraint('physical_network'),
)
sa.Table(
'ml2_gre_allocations', metadata,
sa.Column('gre_id', sa.Integer, nullable=False,
autoincrement=False),
sa.Column('allocated', sa.Boolean, nullable=False),
sa.PrimaryKeyConstraint('gre_id'),
)
sa.Table(
'ml2_gre_endpoints', metadata,
sa.Column('ip_address', sa.String(length=64)),
sa.PrimaryKeyConstraint('ip_address'),
)
sa.Table(
'ml2_network_segments', metadata,
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('network_type', sa.String(length=32), nullable=False),
sa.Column('physical_network', sa.String(length=64), nullable=True),
sa.Column('segmentation_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
)
sa.Table(
'ml2_port_bindings', metadata,
sa.Column('port_id', sa.String(length=36), nullable=False),
sa.Column('host', sa.String(length=255), nullable=False),
sa.Column('vif_type', sa.String(length=64), nullable=False),
sa.Column('driver', sa.String(length=64), nullable=True),
sa.Column('segment', sa.String(length=36), nullable=True),
sa.Column('vnic_type', sa.String(length=64), nullable=False,
server_default='normal'),
sa.Column('vif_details', sa.String(4095), nullable=False,
server_default=''),
sa.Column('profile', sa.String(4095), nullable=False,
server_default=''),
sa.ForeignKeyConstraint(['port_id'], ['ports.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['segment'], ['ml2_network_segments.id'],
ondelete='SET NULL'),
sa.PrimaryKeyConstraint('port_id'),
)
sa.Table(
'ml2_vlan_allocations', metadata,
sa.Column('physical_network', sa.String(length=64),
nullable=False),
sa.Column('vlan_id', sa.Integer(), autoincrement=False,
nullable=False),
sa.Column('allocated', sa.Boolean(), autoincrement=False,
nullable=False),
sa.PrimaryKeyConstraint('physical_network', 'vlan_id'),
)
sa.Table(
'ml2_vxlan_allocations', metadata,
sa.Column('vxlan_vni', sa.Integer, nullable=False,
autoincrement=False),
sa.Column('allocated', sa.Boolean, nullable=False),
sa.PrimaryKeyConstraint('vxlan_vni'),
)
sa.Table(
'ml2_vxlan_endpoints', metadata,
sa.Column('ip_address', sa.String(length=64)),
sa.Column('udp_port', sa.Integer(), nullable=False,
autoincrement=False),
sa.PrimaryKeyConstraint('ip_address', 'udp_port'),
)
class MigrateLinuxBridgeToMl2_Juno(BaseMigrateToMl2):
def __init__(self):
super(MigrateLinuxBridgeToMl2_Juno, self).__init__(
vif_type=portbindings.VIF_TYPE_BRIDGE,
driver_type=LINUXBRIDGE,
segment_table_name='network_bindings',
vlan_allocation_table_name='network_states',
old_tables=['portbindingports'])
def migrate_segment_dict(self, binding):
super(MigrateLinuxBridgeToMl2_Juno, self).migrate_segment_dict(
binding)
vlan_id = binding.pop('vlan_id')
network_type, segmentation_id = interpret_vlan_id(vlan_id)
binding['network_type'] = network_type
binding['segmentation_id'] = segmentation_id
class MigrateOpenvswitchToMl2_Juno(BaseMigrateToMl2):
def __init__(self):
super(MigrateOpenvswitchToMl2_Juno, self).__init__(
vif_type=portbindings.VIF_TYPE_OVS,
driver_type=OPENVSWITCH,
segment_table_name='ovs_network_bindings',
vlan_allocation_table_name='ovs_vlan_allocations',
old_tables=[
'ovs_tunnel_allocations',
'ovs_tunnel_endpoints',
'portbindingports',
])
def migrate_tunnels(self, engine, tunnel_type, vxlan_udp_port=None):
if tunnel_type == p_const.TYPE_GRE:
engine.execute("""
INSERT INTO ml2_gre_allocations
SELECT tunnel_id as gre_id, allocated
FROM ovs_tunnel_allocations
WHERE allocated = TRUE
""")
engine.execute("""
INSERT INTO ml2_gre_endpoints
SELECT ip_address
FROM ovs_tunnel_endpoints
""")
elif tunnel_type == p_const.TYPE_VXLAN:
if not vxlan_udp_port:
vxlan_udp_port = type_vxlan.VXLAN_UDP_PORT
engine.execute("""
INSERT INTO ml2_vxlan_allocations
SELECT tunnel_id as vxlan_vni, allocated
FROM ovs_tunnel_allocations
WHERE allocated = TRUE
""")
engine.execute(sa.text("""
INSERT INTO ml2_vxlan_endpoints
SELECT ip_address, :udp_port as udp_port
FROM ovs_tunnel_endpoints
"""), udp_port=vxlan_udp_port)
else:
raise ValueError(_('Unknown tunnel type: %s') % tunnel_type)
class MigrateLinuxBridgeToMl2_Icehouse(MigrateLinuxBridgeToMl2_Juno,
BaseMigrateToMl2_IcehouseMixin):
pass
class MigrateOpenvswitchToMl2_Icehouse(MigrateOpenvswitchToMl2_Juno,
BaseMigrateToMl2_IcehouseMixin):
pass
migrate_map = {
ICEHOUSE: {
OPENVSWITCH: MigrateOpenvswitchToMl2_Icehouse,
LINUXBRIDGE: MigrateLinuxBridgeToMl2_Icehouse,
},
JUNO: {
OPENVSWITCH: MigrateOpenvswitchToMl2_Juno,
LINUXBRIDGE: MigrateLinuxBridgeToMl2_Juno,
},
}
def main():
parser = argparse.ArgumentParser()
parser.add_argument('plugin', choices=[OPENVSWITCH, LINUXBRIDGE],
help=_('The plugin type whose database will be '
'migrated'))
parser.add_argument('connection',
help=_('The connection url for the target db'))
parser.add_argument('--tunnel-type', choices=[p_const.TYPE_GRE,
p_const.TYPE_VXLAN],
help=_('The %s tunnel type to migrate from') %
OPENVSWITCH)
parser.add_argument('--vxlan-udp-port', default=None, type=int,
help=_('The UDP port to use for VXLAN tunnels.'))
parser.add_argument('--release', default=JUNO, choices=[ICEHOUSE, JUNO])
parser.add_argument('--save-tables', default=False, action='store_true',
help=_("Retain the old plugin's tables"))
#TODO(marun) Provide a verbose option
args = parser.parse_args()
if args.plugin == LINUXBRIDGE and (args.tunnel_type or
args.vxlan_udp_port):
msg = _('Tunnel args (tunnel-type and vxlan-udp-port) are not valid '
'for the %s plugin')
parser.error(msg % LINUXBRIDGE)
try:
migrate_func = migrate_map[args.release][args.plugin]()
except KeyError:
msg = _('Support for migrating %(plugin)s for release '
'%(release)s is not yet implemented')
parser.error(msg % {'plugin': args.plugin, 'release': args.release})
else:
migrate_func(args.connection, args.save_tables, args.tunnel_type,
args.vxlan_udp_port)
if __name__ == '__main__':
main()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Cloudscaling Group, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The MatchMaker classes should except a Topic or Fanout exchange key and
return keys for direct exchanges, per (approximate) AMQP parlance.
"""
import contextlib
import eventlet
from oslo.config import cfg
from keystone.openstack.common.gettextutils import _ # noqa
from keystone.openstack.common import log as logging
matchmaker_opts = [
cfg.IntOpt('matchmaker_heartbeat_freq',
default=300,
help='Heartbeat frequency'),
cfg.IntOpt('matchmaker_heartbeat_ttl',
default=600,
help='Heartbeat time-to-live.'),
]
CONF = cfg.CONF
CONF.register_opts(matchmaker_opts)
LOG = logging.getLogger(__name__)
contextmanager = contextlib.contextmanager
class MatchMakerException(Exception):
"""Signified a match could not be found."""
message = _("Match not found by MatchMaker.")
class Exchange(object):
"""Implements lookups.
Subclass this to support hashtables, dns, etc.
"""
def __init__(self):
pass
def run(self, key):
raise NotImplementedError()
class Binding(object):
"""A binding on which to perform a lookup."""
def __init__(self):
pass
def test(self, key):
raise NotImplementedError()
class MatchMakerBase(object):
"""Match Maker Base Class.
Build off HeartbeatMatchMakerBase if building a heartbeat-capable
MatchMaker.
"""
def __init__(self):
# Array of tuples. Index [2] toggles negation, [3] is last-if-true
self.bindings = []
self.no_heartbeat_msg = _('Matchmaker does not implement '
'registration or heartbeat.')
def register(self, key, host):
"""Register a host on a backend.
Heartbeats, if applicable, may keepalive registration.
"""
pass
def ack_alive(self, key, host):
"""Acknowledge that a key.host is alive.
Used internally for updating heartbeats, but may also be used
publically to acknowledge a system is alive (i.e. rpc message
successfully sent to host)
"""
pass
def is_alive(self, topic, host):
"""Checks if a host is alive."""
pass
def expire(self, topic, host):
"""Explicitly expire a host's registration."""
pass
def send_heartbeats(self):
"""Send all heartbeats.
Use start_heartbeat to spawn a heartbeat greenthread,
which loops this method.
"""
pass
def unregister(self, key, host):
"""Unregister a topic."""
pass
def start_heartbeat(self):
"""Spawn heartbeat greenthread."""
pass
def stop_heartbeat(self):
"""Destroys the heartbeat greenthread."""
pass
def add_binding(self, binding, rule, last=True):
self.bindings.append((binding, rule, False, last))
#NOTE(ewindisch): kept the following method in case we implement the
# underlying support.
#def add_negate_binding(self, binding, rule, last=True):
# self.bindings.append((binding, rule, True, last))
def queues(self, key):
workers = []
# bit is for negate bindings - if we choose to implement it.
# last stops processing rules if this matches.
for (binding, exchange, bit, last) in self.bindings:
if binding.test(key):
workers.extend(exchange.run(key))
# Support last.
if last:
return workers
return workers
class HeartbeatMatchMakerBase(MatchMakerBase):
"""Base for a heart-beat capable MatchMaker.
Provides common methods for registering, unregistering, and maintaining
heartbeats.
"""
def __init__(self):
self.hosts = set()
self._heart = None
self.host_topic = {}
super(HeartbeatMatchMakerBase, self).__init__()
def send_heartbeats(self):
"""Send all heartbeats.
Use start_heartbeat to spawn a heartbeat greenthread,
which loops this method.
"""
for key, host in self.host_topic:
self.ack_alive(key, host)
def ack_alive(self, key, host):
"""Acknowledge that a host.topic is alive.
Used internally for updating heartbeats, but may also be used
publically to acknowledge a system is alive (i.e. rpc message
successfully sent to host)
"""
raise NotImplementedError("Must implement ack_alive")
def backend_register(self, key, host):
"""Implements registration logic.
Called by register(self,key,host)
"""
raise NotImplementedError("Must implement backend_register")
def backend_unregister(self, key, key_host):
"""Implements de-registration logic.
Called by unregister(self,key,host)
"""
raise NotImplementedError("Must implement backend_unregister")
def register(self, key, host):
"""Register a host on a backend.
Heartbeats, if applicable, may keepalive registration.
"""
self.hosts.add(host)
self.host_topic[(key, host)] = host
key_host = '.'.join((key, host))
self.backend_register(key, key_host)
self.ack_alive(key, host)
def unregister(self, key, host):
"""Unregister a topic."""
if (key, host) in self.host_topic:
del self.host_topic[(key, host)]
self.hosts.discard(host)
self.backend_unregister(key, '.'.join((key, host)))
LOG.info(_("Matchmaker unregistered: %(key)s, %(host)s"),
{'key': key, 'host': host})
def start_heartbeat(self):
"""Implementation of MatchMakerBase.start_heartbeat.
Launches greenthread looping send_heartbeats(),
yielding for CONF.matchmaker_heartbeat_freq seconds
between iterations.
"""
if not self.hosts:
raise MatchMakerException(
_("Register before starting heartbeat."))
def do_heartbeat():
while True:
self.send_heartbeats()
eventlet.sleep(CONF.matchmaker_heartbeat_freq)
self._heart = eventlet.spawn(do_heartbeat)
def stop_heartbeat(self):
"""Destroys the heartbeat greenthread."""
if self._heart:
self._heart.kill()
class DirectBinding(Binding):
"""Specifies a host in the key via a '.' character.
Although dots are used in the key, the behavior here is
that it maps directly to a host, thus direct.
"""
def test(self, key):
return '.' in key
class TopicBinding(Binding):
"""Where a 'bare' key without dots.
AMQP generally considers topic exchanges to be those *with* dots,
but we deviate here in terminology as the behavior here matches
that of a topic exchange (whereas where there are dots, behavior
matches that of a direct exchange.
"""
def test(self, key):
return '.' not in key
class FanoutBinding(Binding):
"""Match on fanout keys, where key starts with 'fanout.' string."""
def test(self, key):
return key.startswith('fanout~')
class StubExchange(Exchange):
"""Exchange that does nothing."""
def run(self, key):
return [(key, None)]
class LocalhostExchange(Exchange):
"""Exchange where all direct topics are local."""
def __init__(self, host='localhost'):
self.host = host
super(Exchange, self).__init__()
def run(self, key):
return [('.'.join((key.split('.')[0], self.host)), self.host)]
class DirectExchange(Exchange):
"""Exchange where all topic keys are split, sending to second half.
i.e. "compute.host" sends a message to "compute.host" running on "host"
"""
def __init__(self):
super(Exchange, self).__init__()
def run(self, key):
e = key.split('.', 1)[1]
return [(key, e)]
class MatchMakerLocalhost(MatchMakerBase):
"""Match Maker where all bare topics resolve to localhost.
Useful for testing.
"""
def __init__(self, host='localhost'):
super(MatchMakerLocalhost, self).__init__()
self.add_binding(FanoutBinding(), LocalhostExchange(host))
self.add_binding(DirectBinding(), DirectExchange())
self.add_binding(TopicBinding(), LocalhostExchange(host))
class MatchMakerStub(MatchMakerBase):
"""Match Maker where topics are untouched.
Useful for testing, or for AMQP/brokered queues.
Will not work where knowledge of hosts is known (i.e. zeromq)
"""
def __init__(self):
super(MatchMakerStub, self).__init__()
self.add_binding(FanoutBinding(), StubExchange())
self.add_binding(DirectBinding(), StubExchange())
self.add_binding(TopicBinding(), StubExchange())
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import uuid
import logging
from .. import helpers
logger = logging.getLogger(__name__)
# Module API
def write_trial_and_record(conn, trial_attrs, record_id, source_url):
"""Write trial and its record to database.
Args:
conn (dict): connection dict
trial_attrs (dict): normalized trial data
record_id (str): identifier of the record to be upserted
source_url (str): record's source_url
Raises:
KeyError: if data structure is not valid
Returns:
(str, bool)/(None, None): trial id and is primary flag/if not written (skipped)
"""
create = False
trial, deduplication_method = helpers.find_trial(conn, trial_attrs, ignore_record_id=record_id)
# Create object
if not trial:
trial = {
'id': uuid.uuid1().hex,
}
create = True
deduplication_method = 'initial'
# Decide primary
is_primary = False
priority = ['nct', 'euctr', 'isrctn']
if create or trial.get('source_id') == trial_attrs['source_id']:
is_primary = True
else:
for register in priority:
if trial_attrs['source_id'] == register:
is_primary = True
break
elif trial.get('source_id') == register:
is_primary = False
break
# BUG #389: Overwrite trials without records from the same source
if trial.get('source_id'):
records_count = conn['database']['records'].count(
trial_id=trial['id'],
source_id=trial.get('source_id')
)
if records_count == 0:
is_primary = True
# Update attributes
trial.update(_get_all_trial_attrs(trial_attrs))
# Write trial
try:
conn['database'].begin()
if is_primary:
conn['database']['trials'].upsert(trial, ['id'], ensure=False)
record_id = _write_record(conn, trial, record_id, trial['source_id'], source_url, is_primary)
_write_deduplication_log(conn, trial['id'], record_id, deduplication_method)
except Exception:
conn['database'].rollback()
raise
else:
conn['database'].commit()
# Log debug
logger.debug('Trial - %s: %s',
'created' if create else 'updated', trial_attrs['identifiers'])
return trial['id'], is_primary
def _write_record(conn, trial, record_id, source_id, source_url, is_primary):
"""Write record to database.
Args:
conn (dict): connection dict
trial (dict): related trial data
record_id (dict): UUID of the record
source_id (str): related source id
source_url (str): Record source's URL
is_primary (bool): is the record primary
Raises:
KeyError: if data structure is not valid
Returns:
str/None: record identifier/if not written (skipped)
"""
create = False
# Read record
record = conn['database']['records'].find_one(id=record_id)
# Create
if not record:
record = {
'id': record_id,
}
create = True
# Update record
record.update(_get_all_trial_attrs(trial))
record.update({
'trial_id': trial['id'],
'source_id': source_id,
'source_url': source_url,
'is_primary': is_primary,
# ---
'last_verification_date': trial.get('last_verification_date'),
})
if helpers.validate_remote_url(record['source_url']):
try:
conn['database'].begin()
if record['is_primary']:
conn['database']['records'].update(
{
'trial_id': record['trial_id'],
'is_primary': False,
},
['trial_id']
)
conn['database']['records'].upsert(record, ['id'], ensure=False)
except Exception:
conn['database'].rollback()
raise
else:
conn['database'].commit()
logger.debug('Record - %s: %s',
'created' if create else 'updated', trial['identifiers'])
return record['id']
else:
msg = "Record couldn't be written because source_url '%s' is invalid" % record['source_url']
raise ValueError(msg)
def _write_deduplication_log(conn, trial_id, record_id, method):
latest_log = conn['database']['trial_deduplication_logs'].find_one(
trial_id=trial_id,
record_id=record_id,
order_by='-created_at'
)
logger.debug(
'Trial "%s" was matched with record "%s" (method: %s)',
trial_id,
record_id,
method
)
if not latest_log or latest_log['method'] != method:
data = {
'trial_id': trial_id,
'record_id': record_id,
'method': method,
'commit': os.environ.get('SOURCE_COMMIT')
}
conn['database']['trial_deduplication_logs'].insert(data)
def _get_all_trial_attrs(trial_attrs):
'''Returns dict with all trial attributes set from the received `trial_attrs`
The keys of the returned dict should contain all attributes from the Trial
table, except `id`, `created_at`, `updated_at`.
'''
return {
'identifiers': trial_attrs['identifiers'],
'public_title': trial_attrs['public_title'],
# ---
'source_id': trial_attrs.get('source_id'),
'registration_date': trial_attrs.get('registration_date'),
'completion_date': trial_attrs.get('completion_date'),
'brief_summary': trial_attrs.get('brief_summary'),
'scientific_title': trial_attrs.get('scientific_title'),
'description': trial_attrs.get('description'),
'status': trial_attrs.get('status'),
'recruitment_status': trial_attrs.get('recruitment_status'),
'eligibility_criteria': trial_attrs.get('eligibility_criteria'),
'target_sample_size': trial_attrs.get('target_sample_size'),
'first_enrollment_date': trial_attrs.get('first_enrollment_date'),
'study_type': trial_attrs.get('study_type'),
'study_design': trial_attrs.get('study_design'),
'study_phase': trial_attrs.get('study_phase'),
'primary_outcomes': trial_attrs.get('primary_outcomes'),
'secondary_outcomes': trial_attrs.get('primary_outcomes'),
'gender': trial_attrs.get('gender'),
'age_range': trial_attrs.get('age_range'),
'has_published_results': trial_attrs.get('has_published_results'),
'results_exemption_date': trial_attrs.get('results_exemption_date'),
}
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core.client_options import ClientOptions
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.notebooks_v1.services.managed_notebook_service import pagers
from google.cloud.notebooks_v1.types import managed_service
from google.cloud.notebooks_v1.types import runtime
from google.cloud.notebooks_v1.types import runtime as gcn_runtime
from google.cloud.notebooks_v1.types import service
from google.protobuf import empty_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import ManagedNotebookServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import ManagedNotebookServiceGrpcAsyncIOTransport
from .client import ManagedNotebookServiceClient
class ManagedNotebookServiceAsyncClient:
"""API v1 service for Managed Notebooks."""
_client: ManagedNotebookServiceClient
DEFAULT_ENDPOINT = ManagedNotebookServiceClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = ManagedNotebookServiceClient.DEFAULT_MTLS_ENDPOINT
runtime_path = staticmethod(ManagedNotebookServiceClient.runtime_path)
parse_runtime_path = staticmethod(ManagedNotebookServiceClient.parse_runtime_path)
common_billing_account_path = staticmethod(
ManagedNotebookServiceClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
ManagedNotebookServiceClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(ManagedNotebookServiceClient.common_folder_path)
parse_common_folder_path = staticmethod(
ManagedNotebookServiceClient.parse_common_folder_path
)
common_organization_path = staticmethod(
ManagedNotebookServiceClient.common_organization_path
)
parse_common_organization_path = staticmethod(
ManagedNotebookServiceClient.parse_common_organization_path
)
common_project_path = staticmethod(ManagedNotebookServiceClient.common_project_path)
parse_common_project_path = staticmethod(
ManagedNotebookServiceClient.parse_common_project_path
)
common_location_path = staticmethod(
ManagedNotebookServiceClient.common_location_path
)
parse_common_location_path = staticmethod(
ManagedNotebookServiceClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ManagedNotebookServiceAsyncClient: The constructed client.
"""
return ManagedNotebookServiceClient.from_service_account_info.__func__(ManagedNotebookServiceAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ManagedNotebookServiceAsyncClient: The constructed client.
"""
return ManagedNotebookServiceClient.from_service_account_file.__func__(ManagedNotebookServiceAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
return ManagedNotebookServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
@property
def transport(self) -> ManagedNotebookServiceTransport:
"""Returns the transport used by the client instance.
Returns:
ManagedNotebookServiceTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(ManagedNotebookServiceClient).get_transport_class,
type(ManagedNotebookServiceClient),
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, ManagedNotebookServiceTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the managed notebook service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.ManagedNotebookServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = ManagedNotebookServiceClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def list_runtimes(
self,
request: Union[managed_service.ListRuntimesRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListRuntimesAsyncPager:
r"""Lists Runtimes in a given project and location.
.. code-block:: python
from google.cloud import notebooks_v1
def sample_list_runtimes():
# Create a client
client = notebooks_v1.ManagedNotebookServiceClient()
# Initialize request argument(s)
request = notebooks_v1.ListRuntimesRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_runtimes(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.notebooks_v1.types.ListRuntimesRequest, dict]):
The request object. Request for listing Managed Notebook
Runtimes.
parent (:class:`str`):
Required. Format:
``parent=projects/{project_id}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.notebooks_v1.services.managed_notebook_service.pagers.ListRuntimesAsyncPager:
Response for listing Managed Notebook
Runtimes.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = managed_service.ListRuntimesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_runtimes,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListRuntimesAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def get_runtime(
self,
request: Union[managed_service.GetRuntimeRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> runtime.Runtime:
r"""Gets details of a single Runtime. The location must
be a regional endpoint rather than zonal.
.. code-block:: python
from google.cloud import notebooks_v1
def sample_get_runtime():
# Create a client
client = notebooks_v1.ManagedNotebookServiceClient()
# Initialize request argument(s)
request = notebooks_v1.GetRuntimeRequest(
name="name_value",
)
# Make the request
response = client.get_runtime(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.notebooks_v1.types.GetRuntimeRequest, dict]):
The request object. Request for getting a Managed
Notebook Runtime.
name (:class:`str`):
Required. Format:
``projects/{project_id}/locations/{location}/runtimes/{runtime_id}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.notebooks_v1.types.Runtime:
The definition of a Runtime for a
managed notebook instance.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = managed_service.GetRuntimeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_runtime,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def create_runtime(
self,
request: Union[managed_service.CreateRuntimeRequest, dict] = None,
*,
parent: str = None,
runtime_id: str = None,
runtime: gcn_runtime.Runtime = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Creates a new Runtime in a given project and
location.
.. code-block:: python
from google.cloud import notebooks_v1
def sample_create_runtime():
# Create a client
client = notebooks_v1.ManagedNotebookServiceClient()
# Initialize request argument(s)
request = notebooks_v1.CreateRuntimeRequest(
parent="parent_value",
runtime_id="runtime_id_value",
)
# Make the request
operation = client.create_runtime(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.notebooks_v1.types.CreateRuntimeRequest, dict]):
The request object. Request for creating a Managed
Notebook Runtime.
parent (:class:`str`):
Required. Format:
``parent=projects/{project_id}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
runtime_id (:class:`str`):
Required. User-defined unique ID of
this Runtime.
This corresponds to the ``runtime_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
runtime (:class:`google.cloud.notebooks_v1.types.Runtime`):
Required. The Runtime to be created.
This corresponds to the ``runtime`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.notebooks_v1.types.Runtime` The
definition of a Runtime for a managed notebook instance.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, runtime_id, runtime])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = managed_service.CreateRuntimeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if runtime_id is not None:
request.runtime_id = runtime_id
if runtime is not None:
request.runtime = runtime
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_runtime,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
gcn_runtime.Runtime,
metadata_type=service.OperationMetadata,
)
# Done; return the response.
return response
async def delete_runtime(
self,
request: Union[managed_service.DeleteRuntimeRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Deletes a single Runtime.
.. code-block:: python
from google.cloud import notebooks_v1
def sample_delete_runtime():
# Create a client
client = notebooks_v1.ManagedNotebookServiceClient()
# Initialize request argument(s)
request = notebooks_v1.DeleteRuntimeRequest(
name="name_value",
)
# Make the request
operation = client.delete_runtime(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.notebooks_v1.types.DeleteRuntimeRequest, dict]):
The request object. Request for deleting a Managed
Notebook Runtime.
name (:class:`str`):
Required. Format:
``projects/{project_id}/locations/{location}/runtimes/{runtime_id}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = managed_service.DeleteRuntimeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_runtime,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
empty_pb2.Empty,
metadata_type=service.OperationMetadata,
)
# Done; return the response.
return response
async def start_runtime(
self,
request: Union[managed_service.StartRuntimeRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Starts a Managed Notebook Runtime.
Perform "Start" on GPU instances; "Resume" on CPU
instances See:
https://cloud.google.com/compute/docs/instances/stop-start-instance
https://cloud.google.com/compute/docs/instances/suspend-resume-instance
.. code-block:: python
from google.cloud import notebooks_v1
def sample_start_runtime():
# Create a client
client = notebooks_v1.ManagedNotebookServiceClient()
# Initialize request argument(s)
request = notebooks_v1.StartRuntimeRequest(
name="name_value",
)
# Make the request
operation = client.start_runtime(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.notebooks_v1.types.StartRuntimeRequest, dict]):
The request object. Request for starting a Managed
Notebook Runtime.
name (:class:`str`):
Required. Format:
``projects/{project_id}/locations/{location}/runtimes/{runtime_id}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.notebooks_v1.types.Runtime` The
definition of a Runtime for a managed notebook instance.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = managed_service.StartRuntimeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.start_runtime,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
runtime.Runtime,
metadata_type=service.OperationMetadata,
)
# Done; return the response.
return response
async def stop_runtime(
self,
request: Union[managed_service.StopRuntimeRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Stops a Managed Notebook Runtime.
Perform "Stop" on GPU instances; "Suspend" on CPU
instances See:
https://cloud.google.com/compute/docs/instances/stop-start-instance
https://cloud.google.com/compute/docs/instances/suspend-resume-instance
.. code-block:: python
from google.cloud import notebooks_v1
def sample_stop_runtime():
# Create a client
client = notebooks_v1.ManagedNotebookServiceClient()
# Initialize request argument(s)
request = notebooks_v1.StopRuntimeRequest(
name="name_value",
)
# Make the request
operation = client.stop_runtime(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.notebooks_v1.types.StopRuntimeRequest, dict]):
The request object. Request for stopping a Managed
Notebook Runtime.
name (:class:`str`):
Required. Format:
``projects/{project_id}/locations/{location}/runtimes/{runtime_id}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.notebooks_v1.types.Runtime` The
definition of a Runtime for a managed notebook instance.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = managed_service.StopRuntimeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.stop_runtime,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
runtime.Runtime,
metadata_type=service.OperationMetadata,
)
# Done; return the response.
return response
async def switch_runtime(
self,
request: Union[managed_service.SwitchRuntimeRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Switch a Managed Notebook Runtime.
.. code-block:: python
from google.cloud import notebooks_v1
def sample_switch_runtime():
# Create a client
client = notebooks_v1.ManagedNotebookServiceClient()
# Initialize request argument(s)
request = notebooks_v1.SwitchRuntimeRequest(
name="name_value",
)
# Make the request
operation = client.switch_runtime(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.notebooks_v1.types.SwitchRuntimeRequest, dict]):
The request object. Request for switching a Managed
Notebook Runtime.
name (:class:`str`):
Required. Format:
``projects/{project_id}/locations/{location}/runtimes/{runtime_id}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.notebooks_v1.types.Runtime` The
definition of a Runtime for a managed notebook instance.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = managed_service.SwitchRuntimeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.switch_runtime,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
runtime.Runtime,
metadata_type=service.OperationMetadata,
)
# Done; return the response.
return response
async def reset_runtime(
self,
request: Union[managed_service.ResetRuntimeRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Resets a Managed Notebook Runtime.
.. code-block:: python
from google.cloud import notebooks_v1
def sample_reset_runtime():
# Create a client
client = notebooks_v1.ManagedNotebookServiceClient()
# Initialize request argument(s)
request = notebooks_v1.ResetRuntimeRequest(
name="name_value",
)
# Make the request
operation = client.reset_runtime(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.notebooks_v1.types.ResetRuntimeRequest, dict]):
The request object. Request for reseting a Managed
Notebook Runtime.
name (:class:`str`):
Required. Format:
``projects/{project_id}/locations/{location}/runtimes/{runtime_id}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.notebooks_v1.types.Runtime` The
definition of a Runtime for a managed notebook instance.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = managed_service.ResetRuntimeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.reset_runtime,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
runtime.Runtime,
metadata_type=service.OperationMetadata,
)
# Done; return the response.
return response
async def report_runtime_event(
self,
request: Union[managed_service.ReportRuntimeEventRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Report and process a runtime event.
.. code-block:: python
from google.cloud import notebooks_v1
def sample_report_runtime_event():
# Create a client
client = notebooks_v1.ManagedNotebookServiceClient()
# Initialize request argument(s)
request = notebooks_v1.ReportRuntimeEventRequest(
name="name_value",
vm_id="vm_id_value",
)
# Make the request
operation = client.report_runtime_event(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.notebooks_v1.types.ReportRuntimeEventRequest, dict]):
The request object. Request for reporting a Managed
Notebook Event.
name (:class:`str`):
Required. Format:
``projects/{project_id}/locations/{location}/runtimes/{runtime_id}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.notebooks_v1.types.Runtime` The
definition of a Runtime for a managed notebook instance.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = managed_service.ReportRuntimeEventRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.report_runtime_event,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
runtime.Runtime,
metadata_type=service.OperationMetadata,
)
# Done; return the response.
return response
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-notebooks",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("ManagedNotebookServiceAsyncClient",)
|
|
# Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Visualize features as a graph.
This module visualizes relationships among features (of an ML model) as a graph.
The input can be any similarity matrix X (N by N) where each row/column
corresponds to a feature, and the value of X_ij corresonds to a similarity score
between feature i and feature j. An edge will be added between feature i and
feature j as long as the value of X_ij is bigger than the specified threshold.
"""
from typing import Dict, List, Optional, Tuple
from absl import logging
import matplotlib.cm
import networkx as nx
import numpy as np
import pandas as pd
from plotly import graph_objects as go
CMAP_TYPE = matplotlib.colors.LinearSegmentedColormap
def cluster_to_sim(clusters: Dict[str, int]) -> pd.DataFrame:
"""Convert a clustering result to a similarity matrix.
Returns an N (number of features) by N matrix X where both rows and columns
correspond to the features, and X_ij = 1 if feature i and feature j are in
the same cluster, otherwise X_ij = 0.
Args:
clusters (Dict): {feature_name: cluster_ID} of length N (number of
features).
Returns:
An N by N similarity matrix.
"""
features = clusters.keys()
sim_matrix = pd.DataFrame([[0] * len(features)] * len(features))
sim_matrix.columns = features
sim_matrix.index = features
for f1 in sim_matrix.columns:
for f2 in sim_matrix.index:
if clusters[f1] == clusters[f2]:
sim_matrix.loc[f1, f2] = 1
return sim_matrix
def customized_discrete_colorscale(
pl_entries: int = 10,
cmap: CMAP_TYPE = matplotlib.cm.get_cmap('hsv')
) -> List[Tuple[np.float64, str]]:
"""Converts a matplotlib colormap to a discrete color scale.
Args:
pl_entries: number of discrete values to assign colors to.
cmap: a matplotlib cmap object. If None, defaults
to matplotlib.cm.get_cmap('hsv').
Returns:
A discrete color scale as a list of [location on scale, color] (e.g.[(0.1,
'rgb(255, 0, 0)'), (0.5, 'rgb(255, 147, 0)')])
"""
if not isinstance(cmap, CMAP_TYPE):
raise ValueError('Please provide a matplotlib colormap object for cmap.')
h = 1.0/(pl_entries)
pl_colorscale = []
c_order = h * np.arange(pl_entries+1)
for i in range(pl_entries):
cs = list(map(np.uint8, np.array(cmap(c_order[i])[:3]) * 255))
pl_colorscale.append(
(c_order[i], 'rgb({},{},{})'.format(*cs)))
# To have clear boundaries between colors in the colorbar
if i < (pl_entries):
pl_colorscale.append((c_order[i + 1],
'rgb({},{},{})'.format(*cs)))
return pl_colorscale
def plot_graph(graph: nx.Graph,
title: str,
edge_colors: Optional[List[str]] = None,
edge_widths: Optional[List[float]] = None,
color_map: str = 'hsv') -> go.Figure:
"""Visualize a graph.
Hover over each node to see the variable name and the number of edges, i.e.
number of nodes connected to it.
Node size is proportional to the number of edges.
Nodes are colored based on the number of edges.
Edge colors and edge widths can be customized.
Args:
graph: a graph to be visualized.
title: the title of the graph.
edge_colors: a list of colors for the edges.
When None, use 'red' for all edges.
edge_widths: a list of widths for the edges.
When None, use 0.5 for all edges.
color_map: the type of colormap (passed through to matplotlib.cm.get_cmap).
Returns:
fig: a plotly figure object.
"""
pos = nx.spring_layout(graph)
nx.set_node_attributes(graph, pos, 'pos')
# Plot the edges
if edge_colors is None:
edge_colors = ['red'] * len(graph.edges())
if edge_widths is None:
edge_widths = [0.5] * len(graph.edges())
edge_trace = []
for edge, c, w in zip(graph.edges(), edge_colors, edge_widths):
x0, y0 = graph.nodes[edge[0]]['pos']
x1, y1 = graph.nodes[edge[1]]['pos']
edge_trace.append(
go.Scatter(
x=[x0, x1, None],
y=[y0, y1, None],
line=dict(width=w, color=c),
hoverinfo='none',
mode='lines'))
# Plot the nodes
node_x = []
node_y = []
for node in graph.nodes():
x, y = graph.nodes[node]['pos']
node_x.append(x)
node_y.append(y)
node_adjacencies = []
node_text = []
for node, adjacencies in graph.adjacency():
node_adjacencies.append(len(adjacencies))
node_text.append('{}, {} edges'.format(node, len(adjacencies)))
max_degree = max(node_adjacencies)
try:
cmap = matplotlib.cm.get_cmap(color_map)
except ValueError:
logging.error('Color map %s is not found!', color_map)
return None
custom_colorscale = customized_discrete_colorscale(max_degree, cmap)
node_trace = go.Scatter(
x=node_x, y=node_y,
mode='markers',
hoverinfo='text',
marker=dict(
showscale=True,
colorscale=custom_colorscale,
reversescale=True,
color=node_adjacencies,
size=[5*a for a in node_adjacencies],
colorbar=dict(
thickness=15,
title='Number of edges',
xanchor='left',
titleside='right',
nticks=max_degree,
),
line_width=1,
line_color='black'),
text=node_text)
# Generate the plot
fig = go.Figure(
data=edge_trace + [node_trace],
layout=go.Layout(
title=title,
titlefont_size=16,
showlegend=False,
hovermode='closest',
autosize=False,
width=600,
height=600,
))
fig.update_layout(xaxis=dict(showgrid=False,
zeroline=False,
showticklabels=False),
yaxis=dict(showgrid=False,
zeroline=False,
showticklabels=False))
return fig
def feature_graph_visualization(sim_matrix: pd.DataFrame,
threshold: float = 0.5,
color_map: str = 'hsv') -> go.Figure:
"""Visualize features as a graph.
Hover over each node to see the variable name and the number of edges, i.e.
number of nodes connected to it with a similarity value above the threshold.
Node size is proportional to the number of edges.
Nodes are colored based on the number of edges.
Edges are color-coded based on the sign of similarity (red for positive, blue
for negative).
Args:
sim_matrix: An N * N dataframe containing pairwise
similarity values among features, where N is the number of features.
threshold: Default to 0.5. The threshold for the absolute
value of similarity.
An edge is only visualized if the nodes it connects have a similarity
with an absolute value above this threshold.
color_map: the type of colormap.
Returns:
fig: a plotly figure object containing information to visualize the input
graph (that can be plotted to output using fig.show()).
"""
# TODO(): add a check for matrix being positive semidefinite
if sum(sim_matrix.columns != sim_matrix.index) != 0:
raise ValueError(
'Columns and rows of the similarity matrix are not in the same order!')
features = sim_matrix.columns
inds = np.argwhere(abs(np.tril(np.array(sim_matrix), -1)) > threshold)
linked_features = [(features[i1], features[i2]) for i1, i2 in inds]
# Create graph
feature_graph = nx.Graph()
feature_graph.add_edges_from(linked_features)
pos = nx.spring_layout(feature_graph)
nx.set_node_attributes(feature_graph, pos, 'pos')
title = f'Feature cluster (similarity threshold = {threshold})'
edge_colors = ['red' if sim_matrix.loc[edge_start, edge_end] > 0
else 'blue' for edge_start, edge_end in feature_graph.edges()]
edge_widths = [abs(sim_matrix.loc[edge_start, edge_end]) * 5
for edge_start, edge_end in feature_graph.edges()]
# Plot the graph
fig = plot_graph(feature_graph, title, edge_colors, edge_widths, color_map)
return fig
def initialize_colab_import() -> None:
"""Initialize colab import by plotting a random geometric graph.
This is called within an adhoc_import context to touch all required
dependencies for this module when imported into a colab notebook.
"""
graph = nx.random_geometric_graph(10, 1)
plot_graph(graph, 'Random Geometric Graph')
|
|
#!/usr/bin/python
#
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
"""
Responsible for generating actual decoders based on parsed table
representations.
Note: We define a notion of "actual" and "baseline" classes to
distinquish how they are used.
"baseline" instruction decoders are defined for each instruction that
appears in the manual. The "baseline" decoder captures the properties
of the instruction through the corresponding set of defined virtuals
(as defined in "inst_classes.h"). "baseline" instruction decoders are
used to test that individual instructions are decoded as expected.
"actual" instruction decoders are merged "baseline" instruction
decoders, and are used in the production instruction decoder. The
main purpose of merging instruction decoders into corresponding actual
instruction decoders is to minimize production code size, by
minimizing the number of implementations that must be used by the
production intruction decoder.
To verify that actual and baseline instruction decoders are
equivalent, we test all possible instruction patterns and verify that
the baseline and actual instruction decoders behave the same way
(modulo a concept on whether we cared if condition codes were
assigned).
The main problem with the initial implementation of baseline and
actual instruction decoders is that all are hand written, and the
relationship between the baseline and corresponding actual decoders is
also determined by hand.
To simplify the maintenance of actual instruction decoders, this file
merges baseline classes, based on having the same set of defined
virtual functions (as defined by the corresponding fields in the
decoder tables within file "armv7.table"). The advantage of using this
notion is that changes only need be defined in "armv7.table", and the
generator will figure out how baseline classes are merged (rather than
in the old system where everything was updated by hand).
Verification of the new actual decoders will be the same as before.
We test all possible instruction patterns and verify that the baseline
and actual instruction decoders behave the same way.
"""
import dgen_core
import dgen_decoder
import dgen_output
# Holds the decoder that actuals are defined on.
ACTUAL_DECODER = None
# Holds the map from baseline decoder to the corresponding
# actual decoder.
BASELINE_TO_ACTUAL_MAP = {}
# Holds the map from an actual decoder to the corresponding
# (sorted) set of baseline decoders.
ACTUAL_TO_BASELINE_MAP = {}
# Holds the map from baseline decoder name, to the corresponding
# (sorted) list of baseline decoders with that name.
BASELINE_NAME_TO_BASELINE_MAP = {}
# Holds the map from an actual, to the name we will use for it.
ACTUAL_TO_NAME_MAP = {}
def GetActualDecoders(decoder):
"""Takes the given decoder table, and builds the corresponding
internal maps, so that we can consistently name actual classes.
Returns the (sorted) list of actual decoders to build.
"""
global ACTUAL_DECODER
# Verify whether actual classes have already been recorded.
if ACTUAL_DECODER:
raise Exception("GetActualDecoders: Multiple decoders not allowed.")
ACTUAL_DECODER = decoder
actuals = set()
# Get the list of decoder (actions) defined in the decoder table.
for baseline in decoder.decoders():
if not dgen_decoder.ActionDefinesDecoder(baseline): continue
actual = dgen_decoder.BaselineToActual(baseline)
actuals.add(actual)
_AddBaselineToBaselineNameToBaselineMap(baseline)
_AddToBaselineToActualMap(baseline, actual)
_AddToActualToBaselineMap(baseline, actual)
_FixActualToBaselineMap()
_FixBaselineNameToBaselineMap()
_DefineActualNames(actuals)
return sorted(actuals, key=ActualName)
def _DefineActualNames(actuals):
"""Installs a unique name for each actual, based on the baseline decoders
associated with it.
"""
global ACTUAL_TO_NAME_MAP
name_map = {}
for actual in sorted(actuals):
bases = ACTUAL_TO_BASELINE_MAP[actual]
name = 'Unnamed'
if bases:
baseline = bases[0]
name = dgen_decoder.BaselineName(baseline)
count = name_map.get(name)
if count == None:
count = 1
actual_name = 'Actual_%s_case_%s' % (name, count)
name_map[name] = count + 1
ACTUAL_TO_NAME_MAP[actual] = actual_name
def ActualName(actual):
"""Returns the name to use for the actual."""
return ACTUAL_TO_NAME_MAP[actual]
def AddAutoActualsToDecoder(decoder, tables):
"""Adds the automatically generated class decoders (in files
"*_actuals.h" and "*_actuals.cc" as the 'actual' class decoder to
the listed tables, and returns the generated (new) decoder.
"""
if not tables: return decoder
GetActualDecoders(decoder)
return decoder.table_filter(
lambda tbl: _AddActualToTable(tbl) if tbl.name in tables
else tbl.copy())
def _AddActualToTable(table):
"""Generates a copy of the given table, where the 'actual' field is
defined by the corresponding (actual) class decoder described in
the table."""
return table.row_filter(_AddActualToRow)
def _AddActualToRow(r):
"""Generates a copy of the given row, where (if applicable), the
'actual' field is defined by the corresponding (actual) class
decoder described in the table."""
patterns = list(r.patterns)
action = r.action.copy()
if (isinstance(action, dgen_core.DecoderAction) and
dgen_decoder.ActionDefinesDecoder(action)):
actual = dgen_decoder.BaselineToActual(action)
action.define('actual', ActualName(actual))
row = dgen_core.Row(patterns, action)
return row
ACTUAL_BASE_H_HEADER="""%(FILE_HEADER)s
#ifndef %(IFDEF_NAME)s
#define %(IFDEF_NAME)s
// Note: actual decoders are spread out over multiple files so that
// they are small enough to be handled by the Rietveld server.
"""
ACTUAL_BASE_INCLUDE=(
"""#include "%(FILEBASE)s_%(filename_index)s.h"
""")
ACTUAL_BASE_H_FOOTER="""
#endif // %(IFDEF_NAME)s
"""
def generate_actuals_base_h(decoder, decoder_name,
filename, out, cl_args):
"""Generates actual decoder C++ declarations in the given file.
Args:
decoder: The decoder tables.
decoder_name: The name of the decoder state to build.
filename: The (localized) name for the .h file.
out: a COutput object to write to.
cl_args: A dictionary of additional command line arguments.
"""
if not decoder.primary: raise Exception('No tables provided.')
num_blocks = dgen_output.GetNumberCodeBlocks(cl_args['auto-actual-sep'])
assert filename.endswith('actuals.h')
values = {
'FILE_HEADER': dgen_output.HEADER_BOILERPLATE,
'IFDEF_NAME' : dgen_output.ifdef_name(filename),
'FILEBASE' : filename[:-len('.h')],
'decoder_name': decoder_name,
}
out.write(ACTUAL_BASE_H_HEADER % values)
for block in range(1, num_blocks+1):
values['filename_index'] = block
out.write(ACTUAL_BASE_INCLUDE % values)
out.write(ACTUAL_BASE_H_FOOTER % values)
ACTUAL_H_HEADER="""%(FILE_HEADER)s
#ifndef %(IFDEF_NAME)s
#define %(IFDEF_NAME)s
#include "native_client/src/trusted/validator_arm/inst_classes.h"
#include "native_client/src/trusted/validator_arm/arm_helpers.h"
namespace nacl_arm_dec {
"""
ACTUAL_H_FOOTER="""
} // namespace nacl_arm_test
#endif // %(IFDEF_NAME)s
"""
def generate_actuals_h(decoder, decoder_name, filename, out, cl_args):
"""Generates actual decoder C++ declarations in the given file.
Args:
decoder: The decoder tables.
decoder_name: The name of the decoder state to build.
filename: The (localized) name for the .h file.
out: a COutput object to write to.
cl_args: A dictionary of additional command line arguments.
"""
if not decoder.primary: raise Exception('No tables provided.')
separators = cl_args['auto-actual-sep']
num_blocks = dgen_output.GetNumberCodeBlocks(separators)
# Find block to print
block = dgen_output.FindBlockIndex(filename, 'actuals_%s.h', num_blocks)
values = {
'FILE_HEADER': dgen_output.HEADER_BOILERPLATE,
'IFDEF_NAME' : dgen_output.ifdef_name(filename),
'decoder_name': decoder_name,
}
out.write(ACTUAL_H_HEADER % values)
_print_actual_headers(
GetActualDecodersBlock(decoder, block, separators), out)
out.write(ACTUAL_H_FOOTER % values)
ACTUAL_CC_HEADER="""%(FILE_HEADER)s
#include "native_client/src/trusted/validator_arm/inst_classes.h"
#include "native_client/src/trusted/validator_arm/gen/arm32_decode_actuals.h"
namespace nacl_arm_dec {
"""
ACTUAL_CC_FOOTER="""
} // namespace nacl_arm_dec
"""
def generate_actuals_cc(decoder, decoder_name, filename, out, cl_args):
"""Generates the actual decoder C++ definitions in the given file.
Args:
decoder: The decoder tables.
decoder_name: The name of the decoder state to build.
filename: The (localized) name for the .h file.
out: a COutput object to write to.
cl_args: A dictionary of additional command line arguments.
"""
if not decoder.primary: raise Exception('No tables provided.')
separators = cl_args['auto-actual-sep']
num_blocks = dgen_output.GetNumberCodeBlocks(separators)
# Find block to print
block = dgen_output.FindBlockIndex(filename, 'actuals_%s.cc', num_blocks)
values = {
'FILE_HEADER': dgen_output.HEADER_BOILERPLATE,
'decoder_name': decoder_name,
}
out.write(ACTUAL_CC_HEADER % values)
_print_actual_classes(
GetActualDecodersBlock(decoder, block, separators), out)
out.write(ACTUAL_CC_FOOTER % values)
ACTUAL_CLASS_HEADER="""
// %(decoder_name)s
//
// Actual:
// %(actual_rep)s"""
ACTUAL_CLASS_REP="""
//
// Baseline:
// %(baseline_rep)s"""
def _print_actual_headers(actuals, out):
"""Generates C++ class declarations for each of the given actual decoders."""
for actual in actuals:
actual_name = ActualName(actual)
values = {
'decoder_name': actual_name,
'actual_rep': dgen_decoder.commented_decoder_neutral_repr(actual)
}
out.write(ACTUAL_CLASS_HEADER % values)
for baseline in ACTUAL_TO_BASELINE_MAP[actual]:
values['baseline_rep'] = (
dgen_decoder.commented_decoder_repr(baseline))
out.write(ACTUAL_CLASS_REP % values)
dgen_decoder.DeclareDecoder(actual, actual_name, out)
ACTUAL_CLASS_DEF_HEADER="""
// %(decoder_name)s
//
// Actual:
// %(actual_rep)s
"""
def _print_actual_classes(actuals, out):
"""Generates C++ class definitions for each of the given actual decoders."""
for actual in actuals:
actual_name = ActualName(actual)
values = {
'decoder_name': actual_name,
'actual_rep': dgen_decoder.commented_decoder_neutral_repr(actual),
}
out.write(ACTUAL_CLASS_DEF_HEADER % values)
dgen_decoder.DefineDecoder(actual, actual_name, out)
def _AddBaselineToBaselineNameToBaselineMap(baseline):
"""Add entry into BASELINE_NAME_TO_BASELINE_NAME_MAP for the
given baseline class."""
baseline_name = dgen_decoder.BaselineName(baseline)
bases = BASELINE_NAME_TO_BASELINE_MAP.get(baseline_name)
if bases == None:
bases = set()
BASELINE_NAME_TO_BASELINE_MAP[baseline_name] = bases
bases.add(baseline)
def _FixBaselineNameToBaselineMap():
"""Replaces the sets in BASELINE_NAME_TO_BASELINE_MAP with
corresponding sorted lists."""
for baseline_name in BASELINE_NAME_TO_BASELINE_MAP.keys():
BASELINE_NAME_TO_BASELINE_MAP[baseline_name] = sorted(
BASELINE_NAME_TO_BASELINE_MAP[baseline_name])
def _AddToBaselineToActualMap(baseline, actual):
"""Add given entry to BASELINE_TO_ACTUAL_MAP."""
BASELINE_TO_ACTUAL_MAP[baseline] = actual
def _AddToActualToBaselineMap(baseline, actual):
"""Add given entry to ACTUAL_TO_BASELINE_MAP."""
bases = ACTUAL_TO_BASELINE_MAP.get(actual)
if bases == None:
bases = set()
ACTUAL_TO_BASELINE_MAP[actual] = bases
bases.add(baseline)
def _FixActualToBaselineMap():
"""Replace the sets in ACTUAL_TO_BASELINE_MAP with corresponding
sorted lists."""
for actual in ACTUAL_TO_BASELINE_MAP.keys():
ACTUAL_TO_BASELINE_MAP[actual] = sorted(
ACTUAL_TO_BASELINE_MAP[actual],
key=dgen_decoder.BaselineNameAndBaseline)
def GetActualDecodersBlock(decoder, n, separators):
"""Returns the (sorted) list of actual classes to include
in block n, assuming actual classes are split using
the list of separators."""
return dgen_output.GetDecodersBlock(n, separators,
GetActualDecoders(decoder),
_ActualNameLessActualPrefix)
def _ActualNameLessActualPrefix(decoder):
name = ActualName(decoder)
if name.startswith('Actual_'):
return name[len('Actual_'):]
else:
return name
|
|
"""Handle network addresses and ranges
Tools for validating, parsing, and comparing network addresses, ranges,
and for querying whether a given address is within a set of ranges.
Address is an abstract class, of which IPv4 and IPv6 are subclasses,
which builds on top of the socket parsing of network addresses and
represents addresses directly as their integer values. IP is the
direct superclass of IPv4 and IPv6, which accepts valid addresses for
either class, preferring IPv4 in ambiguous cases.
AddressRange is a general construct for specifying a contiguous block
of addresses, and does not connote a structure. Subnet adds CIDR
structure. AddressRanges are addable, and Subnets devolve into
AddressRanges for this purpose if there isn't a trivial overlap.
AddrList replicates much of the behavior of John Hoffman's IP_List
data structures, if more simply. Ranges are stored in a strict
ordering, and addition of a new range will combine any now-contiguous
ranges.
"""
import socket
import bisect
import operator
from functools import reduce
class Address(int):
"""Unsigned integer representations of network addresses, building on the
socket library.
Subclass with number of bits and address family."""
bits = None
family = None
def __new__(cls, val=0):
"""Convert a number or a string to an Address."""
if cls.bits is None or cls.family is None:
raise NotImplementedError(
"Do not call {!s}() directly".format(cls.__name__))
if isinstance(val, str):
if val.find(':') < 0:
try:
val = socket.gethostbyname(val)
except socket.gaierror:
pass
try:
return cls.from_bytes(socket.inet_pton(cls.family, val), 'big')
except OSError:
raise ValueError("invalid literal for {}(): {!r}".format(
cls.__name__, val))
address = super(Address, cls).__new__(cls, val)
if address < 0:
raise OverflowError("can't convert negative int to {}".format(
cls.__name__))
if address.bit_length() > cls.bits:
raise OverflowError("too large a value for {}: {!s}".format(
cls.__name__, val))
return address
def __str__(self):
"""Use socket library formatting"""
return socket.inet_ntop(self.family,
self.to_bytes(self.bits // 8, 'big'))
def mask(self, nbits):
"""Return an address with the first n bits preserved and the
rest zeroes out."""
ones = (1 << self.bits) - 1
return self.__class__(self & (ones << (self.bits - nbits)))
class IP(Address):
"""Generic IP address
IP() == IPv4('0.0.0.0')
IP('::') == IPv6('::')
Enables conversion between IP classes:
IP().to(IPv6) == IPv6('::ffff:0:0')
IP('::ffff:0:0').to(IPv4) == IPv4('0.0.0.0')
"""
v4mask = 0xffff00000000
def __new__(cls, val=0):
if cls.family is None:
for subclass in cls.subclasses:
try:
return subclass(val)
except (ValueError, OverflowError):
pass
raise ValueError('Invalid address: {}'.format(val))
return super(IP, cls).__new__(cls, val)
def to(self, cls): # pylint: disable=invalid-name
"""Convert between IP classes, if possible.
IPv4('w.x.y.z').to(IPv6) == IPv6('::ffff:w.x.y.z')
IPv6('::ffff:w.x.y.z').to(IPv4) == IPv4('w.x.y.z')
"""
if isinstance(self, cls):
return self
try:
return cls(self.convert[type(self)][cls](self))
except (KeyError, OverflowError):
raise ValueError("not convertible to {}".format(cls.__name__))
class IPv4(IP):
"""Integer representation of IPv4 network addresses, building on the
socket library."""
bits = 32
family = socket.AF_INET
class IPv6(IP):
"""Integer representation of IPv6 network addresses, building on the
socket library."""
bits = 128
family = socket.AF_INET6
IP.subclasses = (IPv4, IPv6)
IP.convert = {IPv4: {IPv6: lambda x: x | IP.v4mask},
IPv6: {IPv4: lambda x: x ^ IP.v4mask}}
class AddressRange(object): # pylint: disable=R0903
"""Range within a given address family that allows unions, comparisons,
and checks for inclusion.
Strict greater/less-than comparisons are True when two ranges cannot be
combined because there is at least one address separating the two. This
allows a new range to be quickly inserted into a sorted list of ranges,
combining when possible.
"""
def __init__(self, start, end=None):
"""Create range of from start to end, or lift address into a
range, if no end."""
if end is None:
end = start
self.family = type(start)
self.setrange(start, end)
def setrange(self, start, end):
"""Set AddressRange bounds"""
assert isinstance(start, self.family)
assert isinstance(end, self.family)
assert start <= end
self._start, self._end = start, end
@property
def start(self):
"""First IP in range"""
return self._start
@start.setter
def start(self, val):
"""Set first IP in range"""
self.setrange(val, self._end)
@property
def end(self):
"""Last IP in range"""
return self._end
@end.setter
def end(self, val):
"""Set last IP in range"""
self.setrange(self._start, val)
def __str__(self):
return '{}-{}'.format(self._start, self._end)
def __contains__(self, addr):
if isinstance(addr, AddressRange):
return self._start <= addr.start and addr.end <= self._end
return self._start <= addr <= self._end
def __add__(self, addr):
if not isinstance(addr, AddressRange):
addr = AddressRange(addr)
if self < addr:
return (self, addr)
elif self > addr:
return (addr, self)
elif addr in self:
return self
elif self in addr:
return addr
else:
return AddressRange(min(self._start, addr.start),
max(self._end, addr.end))
def __lt__(self, addr):
"""True if there is at least one address above the range and below x"""
if isinstance(addr, AddressRange):
addr = addr.start
return self._end + 1 < addr
def __gt__(self, addr):
"""True if there is at least one address below the range and above x"""
if isinstance(addr, AddressRange):
addr = addr.end
return self._start > addr + 1
def __eq__(self, addr):
return self._start == addr.start and self._end == addr.end
@classmethod
def from_string(cls, iprange):
"""Parse address range of the form start-end"""
start, _, end = iprange.partition('-')
startip = IP(start)
if end:
endip = IP(end)
assert startip.bits == endip.bits
else:
endip = None
return cls(startip, endip)
class Subnet(AddressRange):
"""Address range that operates on the logic of CIDR blocks.
If addition of new addresses breaks this logic, revert to AddressRange."""
def __init__(self, address, cidr):
self.address = address.mask(cidr)
self.cidr = cidr
start = self.address
diff = (1 << (address.bits - cidr)) - 1
end = address.__class__(start + diff)
super(Subnet, self).__init__(start, end)
def __str__(self):
return '{}/{:d}'.format(self.address, self.cidr)
def __add__(self, addr):
"""If a Subnet subsumes another range, keep the larger Subnet. If not,
revert to AddressRange addition."""
if addr in self:
return self
elif self in addr:
return addr
else:
return super(Subnet, self).__add__(addr)
@classmethod
def from_string(cls, netstring):
"""Parse CIDR string of the form IP/CIDR"""
ipstring, _, cidr = netstring.partition('/')
addr = IP(ipstring)
return cls(addr, int(cidr) if cidr else addr.bits)
class AddrList(object):
"""Collection of addresses with no constraints on contiguity,
featuring insertion functions and inclusion tests."""
def __init__(self):
self.ranges = {IPv4: [], IPv6: []}
def add_ip(self, addr):
"""Insert individual address string into list"""
self.add_addressrange(AddressRange(IP(addr)))
def add_subnet(self, subnet):
"""Insert CIDR block string into list"""
self.add_addressrange(Subnet.from_string(subnet))
def add_range(self, iprange):
"""Insert contiguous address range string into list"""
self.add_addressrange(AddressRange.from_string(iprange))
def add_addressrange(self, iprange):
"""Insert AddressRange into list, combining overlapping list
elements into a minimal set of ranges."""
ranges = self.ranges[iprange.family]
left = bisect.bisect_left(ranges, iprange)
right = bisect.bisect_right(ranges, iprange)
newseg = reduce(operator.add, ranges[left:right], iprange)
ranges[left:right] = [newseg]
def __contains__(self, address):
if not isinstance(address, IP):
address = IP(address)
return any(address in r for r in self.ranges[type(address)])
def set_intranet_addresses(self):
"""Add addresses corresponding to reserved instranet blocks"""
self.add_subnet('127.0.0.1/8')
self.add_subnet('10.0.0.0/8')
self.add_subnet('172.16.0.0/12')
self.add_subnet('192.168.0.0/16')
self.add_subnet('169.254.0.0/16')
self.add_ip('::1')
self.add_subnet('fe80::/16')
self.add_subnet('fec0::/16')
def set_ipv4_addresses(self):
"""Add the block of IPv4 addresses in the IPv6 space"""
self.add_subnet('::ffff:0:0/96')
def read_fieldlist(self, filename):
"""Read a list from a file in the format 'ip[/len] <whatever>'
Leading whitespace is ignored, as are lines beginning with '#'
"""
with open(filename, 'r') as fieldlistfile:
for line in fieldlistfile:
fields = line.split()
if not fields or fields[0][0] == '#':
continue
try:
self.add_subnet(fields[0])
except ValueError:
print('*** WARNING *** could not parse IP range: ', line)
def read_rangelist(self, filename):
"""Read a list from a file in the format 'whatever:whatever:ip[-ip]
(not IPv6 compatible at all)"""
with open(filename, 'r') as rangelistfile:
for line in rangelistfile:
fields = line.split()
if not fields or fields[0][0] == '#':
continue
try:
self.add_range(fields[0].split(':')[-1])
except ValueError:
print('*** WARNING *** could not parse IP range: ', line)
def to_ipv4(addr):
"""Convert IP string to IPv4 string"""
return str(IP(addr).to(IPv4))
def is_valid_ip(addr):
"""Test if string is valid IPv4 or IPv6"""
try:
IP(addr)
return True
except (ValueError, OverflowError, TypeError):
return False
|
|
#!/usr/bin/env python
# Copyright (c) 2013, 2014 Intel Corporation. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Parse JSON-format manifest configuration file and
provide the specific fields, which have to be integrated with
packaging tool(e.g. make_apk.py) to generate xml-format manifest file.
Sample usage from shell script:
python manifest_json_parser.py --jsonfile=/path/to/manifest.json
"""
import json
import optparse
import os
import re
import sys
def HandlePermissionList(permission_list):
"""This function is used to handle the permission list and return the string
of permissions.
Args:
permission_list: the permission list, e.g.["permission1", "permission2"].
Returns:
The string of permissions with ':' as separator.
e.g. "permission1:permission2".
"""
permissions = list(permission_list)
reg_permission = re.compile(r'^[a-zA-Z\.]*$')
for permission in permissions:
if not reg_permission.match(permission):
print('\'Permissions\' field error, only alphabets and '
'\'.\' are allowed.')
sys.exit(1)
return ':'.join(permissions)
def ParseLaunchScreen(ret_dict, launch_screen_dict, orientation):
if orientation in launch_screen_dict:
sub_dict = launch_screen_dict[orientation]
if 'background_color' in sub_dict:
ret_dict['launch_screen_background_color_' + orientation] = (
sub_dict['background_color'])
if 'background_image' in sub_dict:
ret_dict['launch_screen_background_image_' + orientation] = (
sub_dict['background_image'])
if 'image' in sub_dict:
ret_dict['launch_screen_image_' + orientation] = (
sub_dict['image'])
if 'image_border' in sub_dict:
ret_dict['launch_screen_image_border_' + orientation] = (
sub_dict['image_border'])
def PrintDeprecationWarning(deprecated_items):
if len(deprecated_items) > 0:
print (' Warning: The following fields have been deprecated for '
'Crosswalk:\n %s' %
', '.join([str(item) for item in deprecated_items]))
print (' Please follow: https://www.crosswalk-project.org/#documentation/'
'manifest.')
class ManifestJsonParser(object):
""" The class is used to parse json-format manifest file, recompose the
fields and provide the field interfaces required by the packaging tool.
Args:
input_path: the full path of the json-format manifest file.
"""
def __init__(self, input_path):
self.input_path = input_path
input_file = open(self.input_path)
try:
input_src = input_file.read()
self.data_src = json.JSONDecoder().decode(input_src)
self.ret_dict = self._output_items()
except (TypeError, ValueError, IOError) as error:
print('There is a parser error in manifest.json file: %s' % error)
sys.exit(1)
except KeyError as error:
print('There is a field error in manifest.json file: %s' % error)
sys.exit(1)
finally:
input_file.close()
def _output_items(self):
""" The manifest field items are reorganized and returned as a
dictionary to support single or multiple values of keys.
Returns:
A dictionary to the corresponding items. the dictionary keys are
described as follows, the value is set to "" if the value of the
key is not set.
app_name: The application name.
version: The version number.
icons: An array of icons.
app_url: The url of application, e.g. hosted app.
xwalk_apk_url: The download URL of the Crosswalk runtime library APK
description: The description of application.
app_root: The root path of the web, this flag allows to package
local web application as apk.
app_local_path: The relative path of entry file based on app_root,
this flag should work with "--app-root" together.
permissions: The permission list.
orientation The default allowed orientations.
fullscreen: The fullscreen flag of the application.
launch_screen: The launch screen configuration.
"""
print ("Checking manifest file")
ret_dict = {}
deprecated_items = []
if 'name' not in self.data_src:
print('Error: no \'name\' field in manifest.json file.')
sys.exit(1)
ret_dict['app_name'] = self.data_src['name']
ret_dict['version'] = ''
if 'version' in self.data_src and 'xwalk_version' in self.data_src:
print('WARNING: the value in "version" will be ignored and support '
'for it will be removed in the future.')
ret_dict['version'] = self.data_src['xwalk_version']
elif 'xwalk_version' in self.data_src:
ret_dict['version'] = self.data_src['xwalk_version']
elif 'version' in self.data_src:
deprecated_items.append('version')
ret_dict['version'] = self.data_src['version']
if 'start_url' in self.data_src:
app_url = self.data_src['start_url']
elif 'launch_path' in self.data_src:
deprecated_items.append('launch_path')
app_url = self.data_src['launch_path']
elif ('app' in self.data_src and
'launch' in self.data_src['app'] and
'local_path' in self.data_src['app']['launch']):
deprecated_items.append('app.launch.local_path')
app_url = self.data_src['app']['launch']['local_path']
else:
app_url = ''
if app_url.lower().startswith(('http://', 'https://')):
app_local_path = ''
else:
app_local_path = app_url
app_url = ''
if 'xwalk_apk_url' in self.data_src:
ret_dict['xwalk_apk_url'] = self.data_src['xwalk_apk_url']
else:
ret_dict['xwalk_apk_url'] = ''
file_path_prefix = os.path.split(self.input_path)[0]
if 'icons' in self.data_src:
icons = self.data_src['icons']
if type(icons) == dict:
deprecated_items.append('icons defined as index:value')
ret_dict['icons'] = icons
elif type(icons) == list:
icons_dict = {}
for icon in icons:
if 'sizes' in icon and 'src' in icon:
icons_dict[icon['sizes'].split('x')[0]] = icon['src']
ret_dict['icons'] = icons_dict
else:
ret_dict['icons'] = {}
else:
ret_dict['icons'] = {}
app_root = file_path_prefix
ret_dict['description'] = ''
if 'description' in self.data_src and 'xwalk_description' in self.data_src:
print('WARNING: the value in "description" will be ignored and support '
'for it will be removed in the future.')
ret_dict['description'] = self.data_src['xwalk_description']
elif 'xwalk_description' in self.data_src:
ret_dict['description'] = self.data_src['xwalk_description']
elif 'description' in self.data_src:
deprecated_items.append('description')
ret_dict['description'] = self.data_src['description']
ret_dict['app_url'] = app_url
ret_dict['app_root'] = app_root
ret_dict['app_local_path'] = app_local_path
ret_dict['permissions'] = ''
if 'xwalk_permissions' in self.data_src:
try:
permission_list = self.data_src['xwalk_permissions']
ret_dict['permissions'] = HandlePermissionList(permission_list)
except (TypeError, ValueError, IOError):
print('\'Permissions\' field error in manifest.json file.')
sys.exit(1)
elif 'permissions' in self.data_src:
deprecated_items.append('permissions')
try:
permission_list = self.data_src['permissions']
ret_dict['permissions'] = HandlePermissionList(permission_list)
except (TypeError, ValueError, IOError):
print('\'Permissions\' field error in manifest.json file.')
sys.exit(1)
orientation = {'landscape':'landscape',
'landscape-primary':'landscape',
'landscape-secondary':'reverseLandscape',
'portrait':'portrait',
'portrait-primary':'portrait',
'portrait-secondary':'reversePortrait',
'any':'unspecified',
'natural':'unspecified'}
if 'orientation' in self.data_src:
if self.data_src['orientation'] in orientation:
ret_dict['orientation'] = orientation[self.data_src['orientation']]
else:
ret_dict['orientation'] = 'unspecified'
else:
ret_dict['orientation'] = 'unspecified'
if 'display' in self.data_src and 'fullscreen' in self.data_src['display']:
ret_dict['fullscreen'] = 'true'
else:
ret_dict['fullscreen'] = ''
if 'xwalk_launch_screen' in self.data_src:
launch_screen_dict = self.data_src['xwalk_launch_screen']
ParseLaunchScreen(ret_dict, launch_screen_dict, 'default')
ParseLaunchScreen(ret_dict, launch_screen_dict, 'portrait')
ParseLaunchScreen(ret_dict, launch_screen_dict, 'landscape')
elif 'launch_screen' in self.data_src:
deprecated_items.append('launch_screen')
launch_screen_dict = self.data_src['launch_screen']
ParseLaunchScreen(ret_dict, launch_screen_dict, 'default')
ParseLaunchScreen(ret_dict, launch_screen_dict, 'portrait')
ParseLaunchScreen(ret_dict, launch_screen_dict, 'landscape')
PrintDeprecationWarning(deprecated_items)
return ret_dict
def ShowItems(self):
"""Show the processed results, it is used for command-line
internal debugging."""
print("app_name: %s" % self.GetAppName())
print("version: %s" % self.GetVersion())
print("description: %s" % self.GetDescription())
print("icons: %s" % self.GetIcons())
print("app_url: %s" % self.GetAppUrl())
print("xwalk_apk_url: %s" % self.GetXWalkApkUrl())
print("app_root: %s" % self.GetAppRoot())
print("app_local_path: %s" % self.GetAppLocalPath())
print("permissions: %s" % self.GetPermissions())
print("orientation: %s" % self.GetOrientation())
print("fullscreen: %s" % self.GetFullScreenFlag())
print('launch_screen.default.background_color: %s' %
self.GetLaunchScreenBackgroundColor('default'))
print('launch_screen.default.background_image: %s' %
self.GetLaunchScreenBackgroundImage('default'))
print('launch_screen.default.image: %s' %
self.GetLaunchScreenImage('default'))
print('launch_screen.default.image_border: %s' %
self.GetLaunchScreenImageBorder('default'))
print('launch_screen.portrait.background_color: %s' %
self.GetLaunchScreenBackgroundColor('portrait'))
print('launch_screen.portrait.background_image: %s' %
self.GetLaunchScreenBackgroundImage('portrait'))
print('launch_screen.portrait.image: %s' %
self.GetLaunchScreenImage('portrait'))
print('launch_screen.portrait.image_border: %s' %
self.GetLaunchScreenImageBorder('portrait'))
print('launch_screen.landscape.background_color: %s' %
self.GetLaunchScreenBackgroundColor('landscape'))
print('launch_screen.landscape.background_image: %s' %
self.GetLaunchScreenBackgroundImage('landscape'))
print('launch_screen.landscape.image: %s' %
self.GetLaunchScreenImage('landscape'))
print('launch_screen.landscape.image_border: %s' %
self.GetLaunchScreenImageBorder('landscape'))
def GetAppName(self):
"""Return the application name."""
return self.ret_dict['app_name']
def GetVersion(self):
"""Return the version number."""
return self.ret_dict['version']
def GetIcons(self):
"""Return the icons."""
return self.ret_dict['icons']
def GetAppUrl(self):
"""Return the URL of the application."""
return self.ret_dict['app_url']
def GetXWalkApkUrl(self):
"""Return the download URL of the Crosswalk runtime library APK"""
return self.ret_dict['xwalk_apk_url']
def GetDescription(self):
"""Return the description of the application."""
return self.ret_dict['description']
def GetAppRoot(self):
"""Return the root path of the local web application."""
return self.ret_dict['app_root']
def GetAppLocalPath(self):
"""Return the local relative path of the local web application."""
return self.ret_dict['app_local_path']
def GetPermissions(self):
"""Return the permissions."""
return self.ret_dict['permissions']
def GetOrientation(self):
"""Return the default allowed orientations"""
return self.ret_dict['orientation']
def GetFullScreenFlag(self):
"""Return the set fullscreen flag of the application."""
return self.ret_dict['fullscreen']
def GetLaunchScreenBackgroundColor(self, orientation):
"""Return the background color for launch_screen."""
key = 'launch_screen_background_color_' + orientation
return self.ret_dict.get(key, '')
def GetLaunchScreenBackgroundImage(self, orientation):
"""Return the background image for launch_screen."""
key = 'launch_screen_background_image_' + orientation
return self.ret_dict.get(key, '')
def GetLaunchScreenImage(self, orientation):
"""Return the image for launch_screen."""
key = 'launch_screen_image_' + orientation
return self.ret_dict.get(key, '')
def GetLaunchScreenImageBorder(self, orientation):
"""Return the image border for launch_screen."""
key = 'launch_screen_image_border_' + orientation
return self.ret_dict.get(key, '')
def main(argv):
"""Respond to command mode and show the processed field values."""
parser = optparse.OptionParser()
info = ('The input json-format file name. Such as: '
'--jsonfile=manifest.json')
parser.add_option('-j', '--jsonfile', action='store', dest='jsonfile',
help=info)
opts, _ = parser.parse_args()
if len(argv) == 1:
parser.print_help()
return 0
json_parser = ManifestJsonParser(opts.jsonfile)
json_parser.ShowItems()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
|
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import print_function, absolute_import
from itertools import combinations
import time
import logging
from licensedcode.whoosh_spans.spans import Span
from licensedcode import index
from licensedcode.models import get_all_rules
from textcode import analysis
logger = logging.getLogger(__name__)
# import sys
# logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
# logger.setLevel(logging.DEBUG)
# general debug flag
DEBUG = False
# debug flag for match filtering
DEBUG_FILTER = False
# debug flag for perfs: install pympler and set this to collect memory usage and other stats
DEBUG_PERF = False
def get_license_matches(location=None, minimum_score=100):
"""
Yield detected license matches in the file at `location`.
minimum_score is the minimum score threshold from 0 to 100. The default is
100 means only exact licenses will be detected. With any value below 100,
approximate license results are included. Note that the minimum length for
an approximate match is four words.
"""
return get_index().match(location, minimum_score=minimum_score)
def detect_license(location=None, minimum_score=100):
"""
DEPRECATED: legacy API
Yield detected licenses in the file at `location`. This is a
wrapper to IndexLicense.match working on the full license index and
returning only strings as opposed to objects
An exception may be raised on error.
Directories yield nothing and are not walked for their containing files.
Use commoncode.fileutils.walk for walking a directory tree..
Note that for testing purposes, location can be a list of lines too.
minimum_score is the minimum score threshold from 0 to 100.
"""
for match in get_license_matches(location, minimum_score=minimum_score):
# TODO: return one result per match with a license
# yielding the default license if provided
for detected_license in match.rule.licenses:
yield (detected_license,
match.query_position.start_line, match.query_position.end_line,
match.query_position.start_char, match.query_position.end_char,
match.rule.identifier,
match.score,)
# global in-memory cache of the license index
_LICENSES_INDEX = None
def get_index():
"""
Return the index from a loaded index if loaded or from building and loading
from files.
"""
global _LICENSES_INDEX
if not _LICENSES_INDEX:
_LICENSES_INDEX = get_license_index()
return _LICENSES_INDEX
def get_license_index(rules=None):
"""
Return a LicenseIndex built from a list of rules.
"""
if not rules:
rules = get_all_rules()
if DEBUG_PERF:
from pympler import asizeof # @UnresolvedImport
print('Memory size of rules:', asizeof.asizeof(rules))
idx = LicenseIndex(rules)
if DEBUG_PERF:
print('Memory size of index:', asizeof.asizeof(idx))
return idx
class LicenseIndex(object):
"""
A license detection Index.
This holds an Index and loaded Rules.
"""
def __init__(self, rules):
"""
Init the Index with an iterable of Rule objects.
"""
self.license_index = index.Index()
if DEBUG_PERF:
start = time.time()
print('LicenseIndex: Starting building index.')
# index rules text and keep a mapping of rules rid --> rule object
self.rules_by_id = {}
# note: we use numeric ids
for rid, rule in enumerate(rules):
# FXIEME: we should pass these len and counts downstream
tokens, _min_len, _max_len, _gaps_count = rule.get_tokens()
self.license_index.index_one_from_tokens(rid, tokens)
self.rules_by_id[rid] = rule
if DEBUG_PERF:
duration = time.time() - start
len_rules_by_id = len(self.rules_by_id)
print('Finished building index with %(len_rules_by_id)d rules '
'in %(duration)f seconds.' % locals())
def match(self, location, minimum_score=100):
"""
Match the file at location against the index and return a sequence of
LicenseMatch.
If minimum_score is less than 100, also include approximate matches.
"""
if DEBUG:
print('LicenseIndex.match: location=%(location)r, minimum_score=%(minimum_score)r' % locals())
qdoc = analysis.text_lines(location)
if DEBUG:
qdoc = list(qdoc)
print(' LicenseIndex.match: Query doc has %d lines.' % len(qdoc))
qdoc = iter(qdoc)
exact_matches = self.license_index.match(qdoc, minimum_score=minimum_score)
if DEBUG:
len_exact_matches = len(exact_matches)
print(' LicenseIndex.match: exact_matches#: %(len_exact_matches)r' % locals())
exact_license_matches = []
for rule_id, matched_pos in exact_matches.items():
rule = self.rules_by_id[rule_id]
for match in matched_pos:
index_position, query_position = match
lmatch = LicenseMatch(rule, query_position, index_position, score=100.00)
exact_license_matches.append(lmatch)
if DEBUG:
print(' LicenseIndex.match: unfiltered exact_license_matches: %(exact_license_matches)r' % locals())
if DEBUG_FILTER:
print(' in EXACT: LicenseIndex.match: filtered with filter_overlapping_matches')
filtered_exact = filter_overlapping_matches(exact_license_matches, discard_negative=True)
return sorted(filtered_exact, key=lambda x: x.span)
def increment_line_numbers(token):
"""
Return the token with start and end line numbers incremented by one.
Internally we start at zero, externally we start at one.
"""
if token:
token.start_line += 1
token.end_line += 1
return token
class LicenseMatch(object):
"""
A license detection match object holds:
- a rule: matched Rule object
- the span of the matched region: start and end positions of the analyzed
text where the rule was matched.
- index_position and query_position: the detailed position Token of the
match and matched to texts
- score: a float normalized between 0 and 100. Higher means better.
Exact match score is always 100.
"""
def __init__(self, rule, query_position, index_position=None, score=0):
self.rule = rule
# matched positions, for reference (such as displaying matches)
self.index_position = increment_line_numbers(index_position)
self.query_position = increment_line_numbers(query_position)
# matched query position span (absolute token positions, zero-based)
self.span = Span(query_position.start, query_position.end)
self.score = score
def __repr__(self):
return ('LicenseMatch(\n %(rule)r,\n span=%(span)r, score=%(score)r,'
'\n ipos=%(index_position)r,'
'\n qpos=%(query_position)r\n)' % self.__dict__)
def __len__(self):
return len(self.span)
def is_same(self, othermatch):
"""
Return True if othermatch has the same span, detected license keys and
score.
"""
return (self.has_same_license(othermatch) and self.span == othermatch.span
and self.score == othermatch.score)
def is_more_relevant_than(self, othermatch):
"""
Return True if self is more relevant than othermatch.
"""
return self.span == othermatch.span and self.score >= othermatch.score
def has_same_license(self, othermatch):
"""
Return True if othermatch has the same detected license keys.
"""
return (set(self.rule.licenses) == set(othermatch.rule.licenses))
def is_real_license_match(self):
"""
Return True if this match points to a real license (and not a negative
rule with no license key.)
"""
return self.rule.licenses
def filter_overlapping_matches(matches, discard_negative=True):
"""
Return filtered `matches`, removing duplicated or superfluous matches based
on match position containment and detected licenses overlap. Matches that
are entirely contained in another bigger match are removed. When more than
one matched position matches the same license(s), only one match of this set
is kept.
If discard_negative is True, negative matches (e.g. matches to non- license
rules) are also filtered in the stream and not filtered out.
"""
if DEBUG_FILTER:
print()
ignored_matches = set()
all_pairs = combinations(matches, 2)
for current_match, match in all_pairs:
if DEBUG_FILTER:
print('\ncurrent_match: %(current_match)r' % locals())
if current_match in ignored_matches:
if DEBUG_FILTER:
print(' current in ignored %(current_match)r' % locals())
continue
if DEBUG_FILTER:
print(' match: %(match)r' % locals())
if match in ignored_matches:
if DEBUG_FILTER:
print(' Passing already ignored: %(match)r' % locals())
continue
if current_match is match:
if DEBUG_FILTER:
print(' Passing self: %(match)r' % locals())
continue
# skip duplicates: keep only one of matches to same span and licenses
if current_match.is_same(match):
ignored_matches.add(current_match)
if DEBUG_FILTER:
print(' skipping duplicate: %(current_match)r' % locals())
continue
# filter smaller matches contained in larger matches
if current_match.span in match.span:
ignored_matches.add(current_match)
if DEBUG_FILTER:
print(' skipping contained: %(current_match)r \n is in%(match)r' % locals())
continue
elif match.span in current_match.span:
ignored_matches.add(match)
if DEBUG_FILTER:
print(' skipping contained: %(match)r \n is in %(current_match)r' % locals())
continue
# filter matches with same span, but different licenses
# keep the most specific license (e.g with the fewest gaps)
if match.span == current_match.span:
if current_match.score == 100:
is_less_specific = current_match.rule.min_len >= match.rule.min_len
if is_less_specific:
if DEBUG_FILTER:
print(' skipping less specific: %(current_match)r' % locals())
ignored_matches.add(current_match)
continue
else:
if DEBUG_FILTER:
print(' skipping less specific: %(match)r' % locals())
ignored_matches.add(match)
continue
else:
if current_match.is_more_relevant_than(match):
if DEBUG_FILTER:
print(' skipping more relevant: %(current_match)r' % locals())
ignored_matches.add(current_match)
continue
else:
if DEBUG_FILTER:
print(' skipping more relevant: %(match)r' % locals())
ignored_matches.add(match)
continue
# filter negative matches to empty or not-a-license
if discard_negative and not current_match.is_real_license_match():
ignored_matches.add(current_match)
if DEBUG_FILTER:
print(' skipping negative: %(current_match)r' % locals())
continue
# FIXME: handle touching and overlapping matches
# if (current_match.span.overlaps(match.span) or current_match.span.touches(match.span)):
# current_licenses = current_match.rule.licenses
# match_licenses = match.rule.licenses
# for lic in current_licenses:
# if (lic in match_licenses or any(l.startswith(lic) for l in match_licenses)):
# ignored_matches.add(current_match)
# break
# continue
return [match for match in matches if match not in ignored_matches]
|
|
"""
Base classes for writing management commands (named commands which can
be executed through ``django-admin.py`` or ``manage.py``).
"""
import os
import sys
from optparse import make_option, OptionParser
import django
from django.core.exceptions import ImproperlyConfigured
from django.core.management.color import color_style
from django.utils.encoding import smart_str
class CommandError(Exception):
"""
Exception class indicating a problem while executing a management
command.
If this exception is raised during the execution of a management
command, it will be caught and turned into a nicely-printed error
message to the appropriate output stream (i.e., stderr); as a
result, raising this exception (with a sensible description of the
error) is the preferred way to indicate that something has gone
wrong in the execution of a command.
"""
pass
def handle_default_options(options):
"""
Include any default options that all commands should accept here
so that ManagementUtility can handle them before searching for
user commands.
"""
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
if options.pythonpath:
sys.path.insert(0, options.pythonpath)
class BaseCommand(object):
"""
The base class from which all management commands ultimately
derive.
Use this class if you want access to all of the mechanisms which
parse the command-line arguments and work out what code to call in
response; if you don't need to change any of that behavior,
consider using one of the subclasses defined in this file.
If you are interested in overriding/customizing various aspects of
the command-parsing and -execution behavior, the normal flow works
as follows:
1. ``django-admin.py`` or ``manage.py`` loads the command class
and calls its ``run_from_argv()`` method.
2. The ``run_from_argv()`` method calls ``create_parser()`` to get
an ``OptionParser`` for the arguments, parses them, performs
any environment changes requested by options like
``pythonpath``, and then calls the ``execute()`` method,
passing the parsed arguments.
3. The ``execute()`` method attempts to carry out the command by
calling the ``handle()`` method with the parsed arguments; any
output produced by ``handle()`` will be printed to standard
output and, if the command is intended to produce a block of
SQL statements, will be wrapped in ``BEGIN`` and ``COMMIT``.
4. If ``handle()`` raised a ``CommandError``, ``execute()`` will
instead print an error message to ``stderr``.
Thus, the ``handle()`` method is typically the starting point for
subclasses; many built-in commands and command types either place
all of their logic in ``handle()``, or perform some additional
parsing work in ``handle()`` and then delegate from it to more
specialized methods as needed.
Several attributes affect behavior at various steps along the way:
``args``
A string listing the arguments accepted by the command,
suitable for use in help messages; e.g., a command which takes
a list of application names might set this to '<appname
appname ...>'.
``can_import_settings``
A boolean indicating whether the command needs to be able to
import Django settings; if ``True``, ``execute()`` will verify
that this is possible before proceeding. Default value is
``True``.
``help``
A short description of the command, which will be printed in
help messages.
``option_list``
This is the list of ``optparse`` options which will be fed
into the command's ``OptionParser`` for parsing arguments.
``output_transaction``
A boolean indicating whether the command outputs SQL
statements; if ``True``, the output will automatically be
wrapped with ``BEGIN;`` and ``COMMIT;``. Default value is
``False``.
``requires_model_validation``
A boolean; if ``True``, validation of installed models will be
performed prior to executing the command. Default value is
``True``. To validate an individual application's models
rather than all applications' models, call
``self.validate(app)`` from ``handle()``, where ``app`` is the
application's Python module.
"""
# Metadata about this command.
option_list = (
make_option('-v', '--verbosity', action='store', dest='verbosity', default='1',
type='choice', choices=['0', '1', '2', '3'],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all output'),
make_option('--settings',
help='The Python path to a settings module, e.g. "myproject.settings.main". If this isn\'t provided, the DJANGO_SETTINGS_MODULE environment variable will be used.'),
make_option('--pythonpath',
help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".'),
make_option('--traceback', action='store_true',
help='Print traceback on exception'),
)
help = ''
args = ''
# Configuration shortcuts that alter various logic.
can_import_settings = True
requires_model_validation = True
output_transaction = False # Whether to wrap the output in a "BEGIN; COMMIT;"
def __init__(self):
self.style = color_style()
def get_version(self):
"""
Return the Django version, which should be correct for all
built-in Django commands. User-supplied commands should
override this method.
"""
return django.get_version()
def usage(self, subcommand):
"""
Return a brief description of how to use this command, by
default from the attribute ``self.help``.
"""
usage = '%%prog %s [options] %s' % (subcommand, self.args)
if self.help:
return '%s\n\n%s' % (usage, self.help)
else:
return usage
def create_parser(self, prog_name, subcommand):
"""
Create and return the ``OptionParser`` which will be used to
parse the arguments to this command.
"""
return OptionParser(prog=prog_name,
usage=self.usage(subcommand),
version=self.get_version(),
option_list=self.option_list)
def print_help(self, prog_name, subcommand):
"""
Print the help message for this command, derived from
``self.usage()``.
"""
parser = self.create_parser(prog_name, subcommand)
parser.print_help()
def run_from_argv(self, argv):
"""
Set up any environment changes requested (e.g., Python path
and Django settings), then run this command.
"""
parser = self.create_parser(argv[0], argv[1])
options, args = parser.parse_args(argv[2:])
handle_default_options(options)
self.execute(*args, **options.__dict__)
def execute(self, *args, **options):
"""
Try to execute this command, performing model validation if
needed (as controlled by the attribute
``self.requires_model_validation``). If the command raises a
``CommandError``, intercept it and print it sensibly to
stderr.
"""
# Switch to English, because django-admin.py creates database content
# like permissions, and those shouldn't contain any translations.
# But only do this if we can assume we have a working settings file,
# because django.utils.translation requires settings.
if self.can_import_settings:
try:
from django.utils import translation
translation.activate('en-us')
except ImportError, e:
# If settings should be available, but aren't,
# raise the error and quit.
sys.stderr.write(smart_str(self.style.ERROR('Error: %s\n' % e)))
sys.exit(1)
try:
self.stdout = options.get('stdout', sys.stdout)
self.stderr = options.get('stderr', sys.stderr)
if self.requires_model_validation:
self.validate()
output = self.handle(*args, **options)
if output:
if self.output_transaction:
# This needs to be imported here, because it relies on
# settings.
from django.db import connections, DEFAULT_DB_ALIAS
connection = connections[options.get('database', DEFAULT_DB_ALIAS)]
if connection.ops.start_transaction_sql():
self.stdout.write(self.style.SQL_KEYWORD(connection.ops.start_transaction_sql()))
self.stdout.write(output)
if self.output_transaction:
self.stdout.write(self.style.SQL_KEYWORD("COMMIT;") + '\n')
except CommandError, e:
self.stderr.write(smart_str(self.style.ERROR('Error: %s\n' % e)))
sys.exit(1)
def validate(self, app=None, display_num_errors=False):
"""
Validates the given app, raising CommandError for any errors.
If app is None, then this will validate all installed apps.
"""
from django.core.management.validation import get_validation_errors
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
s = StringIO()
num_errors = get_validation_errors(s, app)
if num_errors:
s.seek(0)
error_text = s.read()
raise CommandError("One or more models did not validate:\n%s" % error_text)
if display_num_errors:
self.stdout.write("%s error%s found\n" % (num_errors, num_errors != 1 and 's' or ''))
def handle(self, *args, **options):
"""
The actual logic of the command. Subclasses must implement
this method.
"""
raise NotImplementedError()
class AppCommand(BaseCommand):
"""
A management command which takes one or more installed application
names as arguments, and does something with each of them.
Rather than implementing ``handle()``, subclasses must implement
``handle_app()``, which will be called once for each application.
"""
args = '<appname appname ...>'
def handle(self, *app_labels, **options):
from django.db import models
if not app_labels:
raise CommandError('Enter at least one appname.')
try:
app_list = [models.get_app(app_label) for app_label in app_labels]
except (ImproperlyConfigured, ImportError), e:
raise CommandError("%s. Are you sure your INSTALLED_APPS setting is correct?" % e)
output = []
for app in app_list:
app_output = self.handle_app(app, **options)
if app_output:
output.append(app_output)
return '\n'.join(output)
def handle_app(self, app, **options):
"""
Perform the command's actions for ``app``, which will be the
Python module corresponding to an application name given on
the command line.
"""
raise NotImplementedError()
class LabelCommand(BaseCommand):
"""
A management command which takes one or more arbitrary arguments
(labels) on the command line, and does something with each of
them.
Rather than implementing ``handle()``, subclasses must implement
``handle_label()``, which will be called once for each label.
If the arguments should be names of installed applications, use
``AppCommand`` instead.
"""
args = '<label label ...>'
label = 'label'
def handle(self, *labels, **options):
if not labels:
raise CommandError('Enter at least one %s.' % self.label)
output = []
for label in labels:
label_output = self.handle_label(label, **options)
if label_output:
output.append(label_output)
return '\n'.join(output)
def handle_label(self, label, **options):
"""
Perform the command's actions for ``label``, which will be the
string as given on the command line.
"""
raise NotImplementedError()
class NoArgsCommand(BaseCommand):
"""
A command which takes no arguments on the command line.
Rather than implementing ``handle()``, subclasses must implement
``handle_noargs()``; ``handle()`` itself is overridden to ensure
no arguments are passed to the command.
Attempting to pass arguments will raise ``CommandError``.
"""
args = ''
def handle(self, *args, **options):
if args:
raise CommandError("Command doesn't accept any arguments")
return self.handle_noargs(**options)
def handle_noargs(self, **options):
"""
Perform this command's actions.
"""
raise NotImplementedError()
def copy_helper(style, app_or_project, name, directory, other_name=''):
"""
Copies either a Django application layout template or a Django project
layout template into the specified directory.
"""
# style -- A color style object (see django.core.management.color).
# app_or_project -- The string 'app' or 'project'.
# name -- The name of the application or project.
# directory -- The directory to which the layout template should be copied.
# other_name -- When copying an application layout, this should be the name
# of the project.
import re
import shutil
other = {'project': 'app', 'app': 'project'}[app_or_project]
if not re.search(r'^[_a-zA-Z]\w*$', name): # If it's not a valid directory name.
# Provide a smart error message, depending on the error.
if not re.search(r'^[_a-zA-Z]', name):
message = 'make sure the name begins with a letter or underscore'
else:
message = 'use only numbers, letters and underscores'
raise CommandError("%r is not a valid %s name. Please %s." % (name, app_or_project, message))
top_dir = os.path.join(directory, name)
try:
os.mkdir(top_dir)
except OSError, e:
raise CommandError(e)
# Determine where the app or project templates are. Use
# django.__path__[0] because we don't know into which directory
# django has been installed.
template_dir = os.path.join(django.__path__[0], 'conf', '%s_template' % app_or_project)
for d, subdirs, files in os.walk(template_dir):
relative_dir = d[len(template_dir)+1:].replace('%s_name' % app_or_project, name)
if relative_dir:
os.mkdir(os.path.join(top_dir, relative_dir))
for i, subdir in enumerate(subdirs):
if subdir.startswith('.'):
del subdirs[i]
for f in files:
if not f.endswith('.py'):
# Ignore .pyc, .pyo, .py.class etc, as they cause various
# breakages.
continue
path_old = os.path.join(d, f)
path_new = os.path.join(top_dir, relative_dir, f.replace('%s_name' % app_or_project, name))
fp_old = open(path_old, 'r')
fp_new = open(path_new, 'w')
fp_new.write(fp_old.read().replace('{{ %s_name }}' % app_or_project, name).replace('{{ %s_name }}' % other, other_name))
fp_old.close()
fp_new.close()
try:
shutil.copymode(path_old, path_new)
_make_writeable(path_new)
except OSError:
sys.stderr.write(style.NOTICE("Notice: Couldn't set permission bits on %s. You're probably using an uncommon filesystem setup. No problem.\n" % path_new))
def _make_writeable(filename):
"""
Make sure that the file is writeable. Useful if our source is
read-only.
"""
import stat
if sys.platform.startswith('java'):
# On Jython there is no os.access()
return
if not os.access(filename, os.W_OK):
st = os.stat(filename)
new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR
os.chmod(filename, new_permissions)
|
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import copy
from collections import defaultdict
from paddle.fluid.framework import Variable
from .process_mesh import ProcessMesh
_g_tensor_dist_attr_field_keys = [
"process_mesh", "dims_mapping", "shard_sizes", "device_placement"
]
_g_op_dist_attr_field_keys = [
"process_mesh", "impl_type", "impl_idx", "is_recompute"
]
_g_op_input_suffix = "@input"
_g_op_output_suffix = "@output"
def get_tensor_dist_attr_field_keys():
global _g_tensor_dist_attr_field_keys
return _g_tensor_dist_attr_field_keys
def get_op_dist_attr_field_keys():
global _g_op_dist_attr_field_keys
return _g_op_dist_attr_field_keys
def append_op_input_suffix(name):
global _g_op_input_suffix
return name + _g_op_input_suffix
def append_op_output_suffix(name):
global _g_op_output_suffix
return name + _g_op_output_suffix
class TensorDistributedAttribute:
def __init__(self):
# The process mesh of distributed operator attribute must is the same as
# the process meshes of all input and output distributed attributed
self._process_mesh = None
self._dims_mapping = None
self._shard_sizes = None
self._device_placement = None
self._is_annotated = {}
@property
def process_mesh(self):
return self._process_mesh
@process_mesh.setter
def process_mesh(self, process_mesh):
if process_mesh is not None:
assert isinstance(process_mesh, (list, ProcessMesh)), \
"The type of process_mesh must be list or ProcessMesh."
if isinstance(process_mesh, list):
process_mesh = ProcessMesh(process_mesh)
self._process_mesh = copy.deepcopy(process_mesh)
@property
def dims_mapping(self):
return self._dims_mapping
@dims_mapping.setter
def dims_mapping(self, dims_mapping):
if dims_mapping is not None:
assert isinstance(dims_mapping, list), \
"The type of dims_mapping must be list."
assert all(isinstance(x, int) for x in dims_mapping), \
("All elements of dims_mapping must be integer")
assert all(x >= -1 for x in dims_mapping), \
("All elements of dims_mapping must be greater than or equal to -1.")
self._dims_mapping = copy.deepcopy(dims_mapping)
@property
def shard_sizes(self):
return self._shard_sizes
@shard_sizes.setter
def shard_sizes(self, shard_sizes):
if shard_sizes is not None:
self._shard_sizes = copy.deepcopy(shard_sizes)
@property
def device_placement(self):
return self._device_placement
@device_placement.setter
def device_placement(self, device_placement):
if device_placement is not None:
self._device_placement = copy.deepcopy(device_placement)
def init(self, dist_attr):
if dist_attr is None:
return
assert isinstance(dist_attr, (dict, TensorDistributedAttribute)), \
"The type of dist_attr must be dict or TensorDistributedAttribute."
if isinstance(dist_attr, dict):
for key, value in dist_attr.items():
if key in get_tensor_dist_attr_field_keys():
field_property = TensorDistributedAttribute.__dict__.get(
key, None)
if field_property:
field_property.fset(self, value)
else:
assert False, "No setter for {} in args {}.".format(
key, dist_attr)
elif isinstance(dist_attr, TensorDistributedAttribute):
for key in get_tensor_dist_attr_field_keys():
field_property = TensorDistributedAttribute.__dict__.get(key,
None)
if field_property:
field_property.fset(self, field_property.fget(dist_attr))
else:
assert False, "No setter for {} in args {}.".format(
key, dist_attr)
self._is_annotated = copy.deepcopy(dist_attr._is_annotated)
def is_annotated(self, dist_attr_field_name):
return self._is_annotated.get(dist_attr_field_name, False)
def mark_annotated(self, dist_attr_field_name):
self._is_annotated[dist_attr_field_name] = True
def mark_annotated_as(self, dist_attr):
if dist_attr is None:
return
assert isinstance(dist_attr, (dict, TensorDistributedAttribute)), \
"The type of dist_attr must be dict or TensorDistributedAttribute."
if isinstance(dist_attr, dict):
for key in dist_attr.keys():
if key in get_tensor_dist_attr_field_keys():
self.mark_annotated(key)
elif isinstance(dist_attr, TensorDistributedAttribute):
self._is_annotated = copy.deepcopy(dist_attr._is_annotated)
def clear_annotated(self):
self._is_annotated.clear()
def __str__(self):
str = "\n\ttensor_dist_attr = {"
if self.is_annotated("process_mesh"):
annotated_str = "annotated"
else:
annotated_str = "non-annotated"
str += "\n\t\tprocess_mesh ({}): {},".format(annotated_str,
self.process_mesh)
if self.is_annotated("dims_mapping"):
annotated_str = "annotated"
else:
annotated_str = "non-annotated"
str += "\n\t\tdims_mapping ({}): {}".format(annotated_str,
self.dims_mapping)
str += "\n\t}"
return str
class OperatorDistributedAttribute:
def __init__(self):
self._process_mesh = None
self._impl_type = None
self._impl_idx = None
self._inputs_dist_attrs = {}
self._outputs_dist_attrs = {}
self._is_annotated = {}
self._is_recompute = False
@property
def process_mesh(self):
return self._process_mesh
@process_mesh.setter
def process_mesh(self, process_mesh):
if process_mesh is not None:
assert isinstance(process_mesh, (list, ProcessMesh)), \
"The type of process_mesh must be list or ProcessMesh."
if isinstance(process_mesh, list):
process_mesh = ProcessMesh(process_mesh)
self._process_mesh = copy.deepcopy(process_mesh)
for dist_attr in self._inputs_dist_attrs.values():
dist_attr.process_mesh = process_mesh
for dist_attr in self._outputs_dist_attrs.values():
dist_attr.process_mesh = process_mesh
@property
def impl_type(self):
return self._impl_type
@impl_type.setter
def impl_type(self, impl_type):
if impl_type is not None:
self._impl_type = impl_type
@property
def impl_idx(self):
return self._impl_idx
@impl_idx.setter
def impl_idx(self, impl_idx):
if impl_idx is not None:
self._impl_idx = impl_idx
@property
def is_recompute(self):
return self._is_recompute
@is_recompute.setter
def is_recompute(self, is_recompute):
assert isinstance(is_recompute, bool)
self._is_recompute = is_recompute
@property
def inputs_dist_attrs(self):
return self._inputs_dist_attrs
@property
def outputs_dist_attrs(self):
return self._outputs_dist_attrs
def get_input_dist_attr(self, name):
return self._inputs_dist_attrs.get(name, None)
def set_input_dist_attr(self, name, dist_attr):
dist_attr_object = TensorDistributedAttribute()
dist_attr_object.init(dist_attr)
self._inputs_dist_attrs[name] = dist_attr_object
def get_output_dist_attr(self, name):
return self._outputs_dist_attrs.get(name, None)
def set_output_dist_attr(self, name, dist_attr):
dist_attr_object = TensorDistributedAttribute()
dist_attr_object.init(dist_attr)
self._outputs_dist_attrs[name] = dist_attr_object
def get_input_dims_mapping(self, name):
input_dist_attr = self.get_input_dist_attr(name)
if input_dist_attr:
dims_mapping = input_dist_attr.dims_mapping
else:
dims_mapping = None
return dims_mapping
def set_input_dims_mapping(self, name, dims_mapping):
input_dist_attr = self.get_input_dist_attr(name)
if input_dist_attr:
input_dist_attr.dims_mapping = dims_mapping
else:
dist_attr = TensorDistributedAttribute()
dist_attr.dims_mapping = dims_mapping
self._inputs_dist_attrs[name] = dist_attr
def get_output_dims_mapping(self, name):
output_dist_attr = self.get_output_dist_attr(name)
if output_dist_attr:
dims_mapping = output_dist_attr.dims_mapping
else:
dims_mapping = None
return dims_mapping
def set_output_dims_mapping(self, name, dims_mapping):
output_dist_attr = self.get_output_dist_attr(name)
if output_dist_attr:
output_dist_attr.dims_mapping = dims_mapping
else:
dist_attr = TensorDistributedAttribute()
dist_attr.dims_mapping = dims_mapping
self._outputs_dist_attrs[name] = dist_attr
def init(self, dist_attr):
if dist_attr is None:
return
assert isinstance(dist_attr, (dict, OperatorDistributedAttribute)), \
"The type of dist_attr must be dict or OperatorDistributedAttribute."
if isinstance(dist_attr, dict):
for key, value in dist_attr.items():
if isinstance(key, Variable):
tensor_dist_attr = TensorDistributedAttribute()
tensor_dist_attr.init(value)
if dist_attr.get(append_op_input_suffix(key.name), False):
self.set_input_dist_attr(key.name, tensor_dist_attr)
if dist_attr.get(append_op_output_suffix(key.name), False):
self.set_output_dist_attr(key.name, tensor_dist_attr)
else:
if key in get_op_dist_attr_field_keys():
field_property = OperatorDistributedAttribute.__dict__.get(
key, None)
if field_property:
field_property.fset(self, value)
else:
assert False, "No setter for {} in args {}.".format(
key, dist_attr)
elif isinstance(dist_attr, OperatorDistributedAttribute):
for tensor_name, tensor_dist_attr in dist_attr.inputs_dist_attrs.items(
):
self.set_input_dist_attr(
tensor_name, dist_attr.get_input_dist_attr(tensor_name))
for tensor_name, tensor_dist_attr in dist_attr.outputs_dist_attrs.items(
):
self.set_output_dist_attr(
tensor_name, dist_attr.get_output_dist_attr(tensor_name))
self._is_annotated = copy.deepcopy(dist_attr._is_annotated)
for key in get_op_dist_attr_field_keys():
field_property = OperatorDistributedAttribute.__dict__.get(key,
None)
if field_property:
field_property.fset(self, field_property.fget(dist_attr))
else:
assert False, "No setter for {} in args {}.".format(
key, dist_attr)
# Make sure proscess_meshes in dist op be same
process_meshes = []
process_meshes.append(self.process_mesh)
for tensor_dist_attr in self.inputs_dist_attrs.values():
process_meshes.append(tensor_dist_attr.process_mesh)
for tensor_dist_attr in self.outputs_dist_attrs.values():
process_meshes.append(tensor_dist_attr.process_mesh)
shared_process_mesh = None
for process_mesh in process_meshes:
if process_mesh is not None:
if shared_process_mesh is None:
shared_process_mesh = process_mesh
else:
assert process_mesh == shared_process_mesh, \
"ProcessMeshes in DistributedOperator must be the same."
self.process_mesh = shared_process_mesh
def is_annotated(self, attr_name):
return self._is_annotated.get(attr_name, False)
def mark_annotated(self, attr_name):
if attr_name == "process_mesh":
# Make sure proscess_mesh be annotated consistently
self._is_annotated[attr_name] = True
for tensor_dist_attr in self.inputs_dist_attrs.values():
tensor_dist_attr.mark_annotated(attr_name)
for tensor_dist_attr in self.outputs_dist_attrs.values():
tensor_dist_attr.mark_annotated(attr_name)
else:
self._is_annotated[attr_name] = True
def mark_annotated_as(self, dist_attr):
if dist_attr is None:
return
assert isinstance(dist_attr, (dict, OperatorDistributedAttribute)), \
"The type of dist_attr must be dict or OperatorDistributedAttribute."
if isinstance(dist_attr, dict):
for key, value in dist_attr.items():
if isinstance(key, Variable):
input_dist_attr = self.get_input_dist_attr(key.name)
if input_dist_attr is not None:
input_dist_attr.mark_annotated_as(value)
output_dist_attr = self.get_output_dist_attr(key.name)
if output_dist_attr is not None:
output_dist_attr.mark_annotated_as(value)
else:
if key in get_op_dist_attr_field_keys():
self.mark_annotated(key)
process_mesh_annotated = False
if self.is_annotated("process_mesh"):
process_mesh_annotated = True
for tensor_dist_attr in self.inputs_dist_attrs.values():
if tensor_dist_attr.is_annotated("process_mesh"):
process_mesh_annotated = True
for tensor_dist_attr in self.outputs_dist_attrs.values():
if tensor_dist_attr.is_annotated("process_mesh"):
process_mesh_annotated = True
if process_mesh_annotated:
self.mark_annotated("process_mesh")
elif isinstance(dist_attr, OperatorDistributedAttribute):
process_mesh_annotated = False
self._is_annotated = copy.deepcopy(dist_attr._is_annotated)
if self.is_annotated("process_mesh"):
process_mesh_annotated = True
for tensor_name, tensor_dist_attr in dist_attr.inputs_dist_attrs.items(
):
input_dist_attr = self.get_input_dist_attr(tensor_name)
if input_dist_attr is not None:
input_dist_attr.mark_annotated_as(tensor_dist_attr)
if input_dist_attr.is_annotated("process_mesh"):
process_mesh_annotated = True
for tensor_name, tensor_dist_attr in dist_attr.outputs_dist_attrs.items(
):
output_dist_attr = self.get_output_dist_attr(tensor_name)
if output_dist_attr is not None:
output_dist_attr.mark_annotated_as(tensor_dist_attr)
if output_dist_attr.is_annotated("process_mesh"):
process_mesh_annotated = True
if process_mesh_annotated:
self.mark_annotated("process_mesh")
def clear_annotated(self):
self._is_annotated.clear()
for tensor_dist_attr in self.inputs_dist_attrs.values():
tensor_dist_attr.clear_annotated()
for tensor_dist_attr in self.outputs_dist_attrs.values():
tensor_dist_attr.clear_annotated()
def is_annotated_input_dims_mapping(self, name):
input_dist_attr = self.get_input_dist_attr(name)
if input_dist_attr:
return input_dist_attr.is_annotated("dims_mapping")
else:
return False
def is_annotated_output_dims_mapping(self, name):
output_dist_attr = self.get_output_dist_attr(name)
if output_dist_attr:
return output_dist_attr.is_annotated("dims_mapping")
else:
return False
def __str__(self):
str = "\n\top_dist_attr = {"
if self.is_annotated("process_mesh"):
annotated_str = "annotated"
else:
annotated_str = "non-annotated"
str += "\n\t\tprocess_mesh ({}): {},".format(annotated_str,
self.process_mesh)
for arg_name, tensor_dist_attr in self.inputs_dist_attrs.items():
str += "\n\t\t{}'s: {},".format(arg_name, tensor_dist_attr)
for arg_name, tensor_dist_attr in self.outputs_dist_attrs.items():
str += "\n\t\t{}'s: {},".format(arg_name, tensor_dist_attr)
str += "\n\t\timpl type: {}, ".format(self._impl_type)
str += "impl idx: {}".format(self._impl_idx)
str += "\n\t}"
return str
|
|
from datetime import timedelta, datetime
from django.shortcuts import render, get_object_or_404, redirect
from django.core.urlresolvers import reverse
from django.http import Http404
from django.contrib import auth
from django.contrib import messages
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.http import require_POST
from django.contrib.auth.views import password_reset_confirm as django_password_reset_confirm
from django.utils.http import urlsafe_base64_decode, is_safe_url
from froide.foirequestfollower.models import FoiRequestFollower
from froide.foirequest.models import FoiRequest, FoiEvent
from froide.helper.auth import login_user
from froide.helper.utils import render_403
from .forms import (UserLoginForm, PasswordResetForm, NewUserForm,
UserEmailConfirmationForm, UserChangeAddressForm, UserDeleteForm,
UserChangeEmailForm, TermsForm)
from .models import AccountManager
def confirm(request, user_id, secret, request_id=None):
if request.user.is_authenticated():
messages.add_message(request, messages.ERROR,
_('You are logged in and cannot use a confirmation link.'))
return redirect('account-show')
user = get_object_or_404(auth.get_user_model(), pk=int(user_id))
if user.is_active:
return redirect('account-login')
account_manager = AccountManager(user)
if account_manager.confirm_account(secret, request_id):
messages.add_message(request, messages.WARNING,
_('Your email address is now confirmed and you are logged in. You should change your password now by filling out the form below.'))
login_user(request, user)
if request_id is not None:
foirequest = FoiRequest.confirmed_request(user, request_id)
if foirequest:
messages.add_message(request, messages.SUCCESS,
_('Your request "%s" has now been sent') % foirequest.title)
next = request.GET.get('next', request.session.get('next'))
if next:
if 'next' in request.session:
del request.session['next']
return redirect(next)
return redirect(reverse('account-settings') + "?new#change-password-now")
else:
messages.add_message(request, messages.ERROR,
_('You can only use the confirmation link once, please login with your password.'))
return redirect('account-login')
def go(request, user_id, secret, url):
if request.user.is_authenticated():
if request.user.id != int(user_id):
messages.add_message(request, messages.INFO,
_('You are logged in with a different user account. Please logout first before using this link.'))
else:
user = get_object_or_404(auth.get_user_model(), pk=int(user_id))
if not user.is_active:
messages.add_message(request, messages.ERROR,
_('Your account is not active.'))
raise Http404
account_manager = AccountManager(user)
if account_manager.check_autologin_secret(secret):
login_user(request, user)
return redirect(url)
def show(request, context=None, status=200):
if not request.user.is_authenticated():
return redirect('account-login')
my_requests = FoiRequest.objects.filter(user=request.user).order_by("-last_message")
if not context:
context = {}
if 'new' in request.GET:
request.user.is_new = True
own_foirequests = FoiRequest.objects.get_dashboard_requests(request.user)
followed_requests = FoiRequestFollower.objects.filter(user=request.user)\
.select_related('request')
followed_foirequest_ids = list(map(lambda x: x.request_id, followed_requests))
following = False
events = []
if followed_foirequest_ids:
following = len(followed_foirequest_ids)
since = datetime.utcnow() - timedelta(days=14)
events = FoiEvent.objects.filter(public=True,
request__in=followed_foirequest_ids,
timestamp__gte=since).order_by(
'request', 'timestamp')
context.update({
'own_requests': own_foirequests,
'followed_requests': followed_requests,
'followed_events': events,
'following': following,
'foirequests': my_requests
})
return render(request, 'account/show.html', context, status=status)
def profile(request, slug):
user = get_object_or_404(auth.get_user_model(), username=slug)
if user.private:
raise Http404
foirequests = FoiRequest.published.filter(user=user).order_by('-first_message')
foievents = FoiEvent.objects.filter(public=True, user=user)[:20]
return render(request, 'account/profile.html', {
'profile_user': user,
'requests': foirequests,
'events': foievents
})
@require_POST
def logout(request):
auth.logout(request)
messages.add_message(request, messages.INFO,
_('You have been logged out.'))
return redirect("/")
def login(request, base="base.html", context=None,
template='account/login.html', status=200):
simple = False
initial = None
if not context:
context = {}
if "reset_form" not in context:
context['reset_form'] = PasswordResetForm()
if "signup_form" not in context:
context['signup_form'] = NewUserForm()
if request.GET.get("simple") is not None:
base = "simple_base.html"
simple = True
if request.GET.get('email'):
initial = {'email': request.GET.get('email')}
else:
if request.user.is_authenticated():
return redirect('account-show')
if request.method == "POST" and status == 200:
status = 400 # if ok, we are going to redirect anyways
next = request.POST.get('next')
form = UserLoginForm(request.POST)
if form.is_valid():
user = auth.authenticate(username=form.cleaned_data['email'],
password=form.cleaned_data['password'])
if user is not None:
if user.is_active:
auth.login(request, user)
messages.add_message(request, messages.INFO,
_('You are now logged in.'))
if simple:
return redirect(reverse('account-login') + "?simple")
else:
if next:
return redirect(next)
return redirect('account-show')
else:
messages.add_message(request, messages.ERROR,
_('Please activate your mail address before logging in.'))
else:
messages.add_message(request, messages.ERROR,
_('E-mail and password do not match.'))
else:
form = UserLoginForm(initial=initial)
context.update({
"form": form,
"custom_base": base,
"simple": simple,
'next': request.GET.get('next')
})
return render(request, template, context, status=status)
@require_POST
def signup(request):
next = request.POST.get('next')
next_url = next if next else '/'
if request.user.is_authenticated():
messages.add_message(request, messages.ERROR,
_('You are currently logged in, you cannot signup.'))
return redirect(next_url)
form = UserLoginForm()
signup_form = NewUserForm(request.POST)
next = request.POST.get('next')
if signup_form.is_valid():
user, password = AccountManager.create_user(**signup_form.cleaned_data)
AccountManager(user).send_confirmation_mail(password=password)
messages.add_message(request, messages.SUCCESS,
_('Please check your emails for a mail from us with a confirmation link.'))
if next:
request.session['next'] = next
return redirect(next_url)
return render(request, 'account/login.html', {
"form": form,
"signup_form": signup_form,
"custom_base": "base.html",
"simple": False
}, status=400)
@require_POST
def change_password(request):
if not request.user.is_authenticated():
messages.add_message(request, messages.ERROR,
_('You are not currently logged in, you cannot change your password.'))
return render_403(request)
form = request.user.get_password_change_form(request.POST)
if form.is_valid():
form.save()
messages.add_message(request, messages.SUCCESS,
_('Your password has been changed.'))
return redirect('account-show')
return show(request, context={"password_change_form": form}, status=400)
@require_POST
def send_reset_password_link(request):
next = request.POST.get('next')
next_url = next if next else '/'
if request.user.is_authenticated():
messages.add_message(request, messages.ERROR,
_('You are currently logged in, you cannot get a password reset link.'))
return redirect(next_url)
form = auth.forms.PasswordResetForm(request.POST)
if form.is_valid():
if next:
request.session['next'] = next
form.save(use_https=True, email_template_name="account/password_reset_email.txt")
messages.add_message(request, messages.SUCCESS,
_("Check your mail, we sent you a password reset link."
" If you don't receive an email, check if you entered your"
" email correctly or if you really have an account "))
return redirect(next_url)
return login(request, context={"reset_form": form}, status=400)
def password_reset_confirm(request, uidb64=None, token=None):
# TODO: Fix this code
# - don't sniff response
# - make redirect
response = django_password_reset_confirm(request, uidb64=uidb64, token=token,
template_name='account/password_reset_confirm.html',
post_reset_redirect=reverse('account-show'))
if response.status_code == 302:
uid = urlsafe_base64_decode(uidb64)
user = auth.get_user_model().objects.get(pk=uid)
login_user(request, user)
messages.add_message(request, messages.SUCCESS,
_('Your password has been set and you are now logged in.'))
if 'next' in request.session and is_safe_url(
url=request.session['next'],
host=request.get_host()):
response['Location'] = request.session['next']
del request.session['next']
return response
@require_POST
def change_address(request):
if not request.user.is_authenticated():
messages.add_message(request, messages.ERROR,
_('You are not currently logged in, you cannot change your address.'))
return render_403(request)
form = UserChangeAddressForm(request.user, request.POST)
if form.is_valid():
form.save()
messages.add_message(request, messages.SUCCESS,
_('Your address has been changed.'))
return redirect('account-show')
return show(request, context={"address_change_form": form}, status=400)
def csrf_failure(request, reason=''):
return render_403(request, message=_("You probably do not have cookies enabled, but you need cookies to use this site! Cookies are only ever sent securely. The technical reason is: %(reason)s") % {"reason": reason})
def account_settings(request, context=None, status=200):
if not request.user.is_authenticated():
return redirect('account-login')
if not context:
context = {}
if 'new' in request.GET:
request.user.is_new = True
if 'user_delete_form' not in context:
context['user_delete_form'] = UserDeleteForm(request.user)
if 'change_email_form' not in context:
context['change_email_form'] = UserChangeEmailForm()
return render(request, 'account/settings.html', context, status=status)
def change_email(request):
if not request.user.is_authenticated():
messages.add_message(request, messages.ERROR,
_('You are not currently logged in, you cannot change your email address.'))
return render_403(request)
if request.POST:
form = UserChangeEmailForm(request.POST)
if not form.is_valid():
messages.add_message(request, messages.ERROR,
_('Your email address could not be changed.'))
return account_settings(
request,
context={
'change_email_form': form
},
status=400
)
AccountManager(request.user).send_email_change_mail(
form.cleaned_data['email']
)
messages.add_message(request, messages.SUCCESS,
_('We sent a confirmation email to your new address. Please click the link in there.'))
return redirect('account-settings')
form = UserEmailConfirmationForm(request.user, request.GET)
if form.is_valid():
form.save()
messages.add_message(request, messages.SUCCESS,
_('Your email address has been changed.'))
else:
messages.add_message(request, messages.ERROR,
_('The email confirmation link was invalid or expired.'))
return redirect('account-settings')
@require_POST
def delete_account(request):
if not request.user.is_authenticated():
messages.add_message(request, messages.ERROR,
_('You are not currently logged in, you cannot delete your account.'))
return render_403(request)
form = UserDeleteForm(request.user, request.POST)
if not form.is_valid():
messages.add_message(request, messages.ERROR,
_('Password or confirmation phrase were wrong. Account was not deleted.'))
return account_settings(
request,
context={
'user_delete_form': form
},
status=400
)
# Removing all personal data from account
user = request.user
user.organization = ''
user.organization_url = ''
user.private = True
user.address = ''
user.save()
user.first_name = ''
user.last_name = ''
user.is_active = False
user.email = ''
user.username = 'u%s' % user.pk
user.save()
auth.logout(request)
messages.add_message(request, messages.INFO,
_('Your account has been deleted and you have been logged out.'))
return redirect('/')
def new_terms(request, next=None):
if next is None:
next = request.GET.get('next', '/')
if not is_safe_url(url=next, host=request.get_host()):
next = '/'
if not request.user.is_authenticated():
return redirect(next)
if request.user.terms:
return redirect(next)
form = TermsForm()
if request.POST:
form = TermsForm(request.POST)
if form.is_valid():
form.save(request.user)
messages.add_message(request, messages.SUCCESS,
_('Thank you for accepting our new terms!'))
return redirect(next)
else:
messages.add_message(request, messages.ERROR,
_('You need to accept our new terms to continue.'))
return render(request, 'account/new_terms.html', {
'terms_form': form,
'next': next
})
|
|
#!/usr/bin/env python
from spider import *
sys.path.append("..")
from utils import Utils
class ProjectPaperSpider(Spider):
def __init__(self):
Spider.__init__(self)
self.utils = Utils()
self.school = "project-papers"
self.dirs = "eecs/papers/project-papers/"
def doWork(self):
'''
self.getSTAIRPapers()
self.getSTARTPapers()
self.getSparkPapers()
self.getHadoopPapers()
#self.getRoboBrainPapers()
self.getMobileyePapers()
self.getAutonomousDrivingPapers()
self.getCALOPapers()
'''
#self.getWastonPapers()
self.getDeepmindPapers()
#self.getFacebookResearchPapers()
#self.getGoogleBrainPapers()
#self.getAI2Papers()
#self.getBaiduResearchPapers()
def getBaiduResearchPapers(self):
urls = ['http://research.baidu.com/silicon-valley-ai-lab/', 'http://research.baidu.com/institute-of-deep-learning', 'http://research.baidu.com/big-data-lab/']
file_name = self.get_file_name(self.dirs + "baidu-research", self.school)
file_lines = self.countFileLineNum(file_name)
f = self.open_db(file_name + ".tmp")
self.count = 0
papers = {}
for url in urls:
r = requests.get(url)
soup = BeautifulSoup(r.text)
title = ''
link = ''
authors = 'author:'
summary = 'summary:'
desc = 'description:'
for p in soup.find_all('p'):
title = ''
link = ''
authors = 'author:'
summary = 'summary:'
desc = 'description:'
data = p.text.split('\n')
if len(data) == 3 or (len(data) == 1 and p.em != None):
if data[0] == '':
continue
if len(data) == 3:
title = data[0]
if p.a != None:
link = p.a['href']
authors += data[1] + ' '
desc += data[2] + ' '
#print title
self.count += 1
id = 'baidu-research-' + str(self.count)
if self.count < 10:
id = 'baidu-research-0' + str(self.count)
papers[id] = [title, link, authors, desc]
title = ''
link = ''
authors = 'author:'
summary = 'summary:'
desc = 'description:'
elif len(data) == 1:
summary += data[0] + ' '
id = 'baidu-research-' + str(self.count)
if self.count < 10:
id = 'baidu-research-0' + str(self.count)
papers[id].append(summary)
for k, v in sorted(papers.items(), key=lambda papers:papers[0][papers[0].rfind('-') + 1 :]):
print v
if len(v) == 5:
self.write_db(f, k, v[0], v[1], v[2] + v[4] + v[3])
else:
self.write_db(f, k, v[0], v[1], v[2] + v[3])
if file_lines != self.count and self.count > 0:
self.do_upgrade_db(file_name)
print "before lines: " + str(file_lines) + " after update: " + str(self.count) + " \n\n"
else:
self.cancel_upgrade(file_name)
print "no need upgrade\n"
def getAI2Papers(self):
r = requests.get('http://allenai.org/papers.html')
soup = BeautifulSoup(r.text)
ul = soup.find('ul', class_='filter-data')
soup = BeautifulSoup(ul.prettify())
file_name = self.get_file_name(self.dirs + "ai2", self.school)
file_lines = self.countFileLineNum(file_name)
f = self.open_db(file_name + ".tmp")
self.count = 0
for li in soup.find_all('li'):
title = li.a.text.strip()
link = li.a['href']
sp = BeautifulSoup(li.prettify())
authors = "author:"
desc = 'description:'
count = 0
for em in sp.find_all('em'):
count += 1
if count == 1:
authors += em.text.replace('\n', '').strip() + ' '
else:
desc += em.text.replace('\n', '').strip() + ' '
print authors
print desc
self.count += 1
self.write_db(f, 'ai2-' + str(self.count), title, link, authors + desc)
if file_lines != self.count and self.count > 0:
self.do_upgrade_db(file_name)
print "before lines: " + str(file_lines) + " after update: " + str(self.count) + " \n\n"
else:
self.cancel_upgrade(file_name)
print "no need upgrade\n"
def getGoogleBrainPapers(self):
r = requests.get('https://research.google.com/pubs/BrainTeam.html')
soup = BeautifulSoup(r.text)
ul = soup.find('ul', class_='pub-list')
soup = BeautifulSoup(ul.prettify())
file_name = self.get_file_name(self.dirs + "googlebrain", self.school)
file_lines = self.countFileLineNum(file_name)
f = self.open_db(file_name + ".tmp")
self.count = 0
for li in soup.find_all('li'):
sp = BeautifulSoup(li.prettify())
link = sp.find('a', class_='pdf-icon tooltip')
if link != None:
link = 'https://research.google.com' + link['href']
else:
link = ''
title = sp.find('p', class_='pub-title')
count = 0
authors ="author:"
desc = 'description:'
for p in sp.find_all('p'):
count += 1
if count == 1:
title = self.utils.removeDoubleSpace(p.text.strip().replace('\n', ''))
if count == 2:
authors += self.utils.removeDoubleSpace(p.text.replace('\n', '')).strip() + ' '
authors = authors.replace(' ,', ',').strip() + ' '
if count == 3:
desc += self.utils.removeDoubleSpace(p.text.replace('\n', '')).strip() + ' '
print title
print link
print authors
print desc
self.count += 1
self.write_db(f, 'googlebrain-' + str(self.count), title, link, authors + desc)
self.close_db(f)
if file_lines != self.count and self.count > 0:
self.do_upgrade_db(file_name)
print "before lines: " + str(file_lines) + " after update: " + str(self.count) + " \n\n"
else:
self.cancel_upgrade(file_name)
print "no need upgrade\n"
def getFacebookResearchPapers(self):
count = 0
file_name = self.get_file_name(self.dirs + "facebook-ai", self.school)
file_lines = self.countFileLineNum(file_name)
f = self.open_db(file_name + ".tmp")
self.count = 0
while True:
count += 1
#https://research.facebook.com/publications/machinelearning,machinelearningarea/
r = self.requestWithProxy2('https://research.facebook.com/publications/ai/?p=' + str(count))
print r.status_code
if r.status_code != 200:
break
if r.text.find('More contents coming soon') != -1:
break
soup = BeautifulSoup(r.text)
divs = soup.find_all('div', class_='_3y3h')
for div in divs:
sp = BeautifulSoup(div.prettify())
title = ''
link = ''
authors = 'author:'
published = 'published:'
summary = 'summary:'
category = 'category:'
desc = 'description:'
div = sp.find('div', class_='_3y3i')
if div == None:
continue
if div.a != None:
title += div.a.text.strip() + ' '
link = 'https://research.facebook.com' + div.a['href']
div = sp.find('div', class_='_3y34')
if div != None:
for li in BeautifulSoup(div.prettify()).find_all('li'):
authors += li.text.strip() + ', '
authors = authors[0 : len(authors) - 2] + ' '
div = sp.find('div', class_='_3y3n')
if div != None:
desc += div.text.strip() + ' '
div = sp.find('div', class_='_3y3l')
if div != None:
published += div.text.strip() + ' '
div = sp.find('div', class_='_1c-z')
if div != None:
summary += div.text.strip() + ' '
div = sp.find('div', class_='_3y3m')
if div != None:
category += self.utils.removeDoubleSpace(div.text.replace('\n', '')).strip() + ' '
print title
self.count += 1
self.write_db(f, 'facebook-ai-' + str(self.count), title, link, authors + published + category + summary + desc)
self.close_db(f)
if file_lines != self.count and self.count > 0:
self.do_upgrade_db(file_name)
print "before lines: " + str(file_lines) + " after update: " + str(self.count) + " \n\n"
else:
self.cancel_upgrade(file_name)
print "no need upgrade\n"
def getDeepmindPapers(self):
file_name = self.get_file_name(self.dirs + "deepmind", self.school)
file_lines = self.countFileLineNum(file_name)
f = self.open_db(file_name + ".tmp")
self.count = 0
'''
for ul in soup.find_all('ul', class_='publication'):
#print ul
sp = BeautifulSoup(ul.prettify())
title = sp.find('li', class_='title').text.strip()
link = sp.find('li', class_='title').a['href']
authors = ''
for span in sp.find_all('span', class_='author'):
authors += span.text.strip() + ', '
authors = authors[0 : len(authors) - 2]
desc = self.utils.removeDoubleSpace(sp.find('li', class_='abstract').text.strip().replace('\n', ' '))
print title
print link
print authors
print desc
self.count += 1
self.write_db(f, 'deepmind-' + str(self.count), title, link, 'author:' + authors + ' description:' + desc)
'''
all_authors = {}
for page in range(0 , 100):
url = 'https://deepmind.com/research/publications/?page=' + str(page + 1)
print url
r = self.requestWithProxy(url)
if r.status_code != 200:
break
soup = BeautifulSoup(r.text)
divs = soup.find_all('div', class_='research-list--item-heading')
if len(divs) == 0:
break
for div in divs:
print div.h1.text
desc = div.p.text
data = self.utils.removeDoubleSpace(div.text[div.text.find('Authors:') + 8 :].strip().replace('\n', ' '))
for author in data.split(','):
author = author.strip()
if all_authors.has_key(author) == False:
all_authors[author] = author
print data
self.count += 1
self.write_db(f, 'deepmind-paper-' + str(self.count), div.h1.text, '', 'author:' + data + ' description:' + desc)
print ', '.join(all_authors.keys())
self.close_db(f)
if file_lines != self.count and self.count > 0:
self.do_upgrade_db(file_name)
print "before lines: " + str(file_lines) + " after update: " + str(self.count) + " \n\n"
else:
self.cancel_upgrade(file_name)
print "no need upgrade\n"
def getCALOPapers(self):
r = requests.get('http://www.ai.sri.com/pubs/search.php?project=179')
soup = BeautifulSoup(r.text)
file_name = self.get_file_name(self.dirs + "CALO", self.school)
file_lines = self.countFileLineNum(file_name)
f = self.open_db(file_name + ".tmp")
self.count = 0
for li in soup.find_all('li'):
title = li.p.strong.text.strip()
link = 'http://www.ai.sri.com' + li.p.a['href']
print title
self.count += 1
self.write_db(f, 'calo-' + str(self.count), title, link)
self.close_db(f)
if file_lines != self.count and self.count > 0:
self.do_upgrade_db(file_name)
print "before lines: " + str(file_lines) + " after update: " + str(self.count) + " \n\n"
else:
self.cancel_upgrade(file_name)
print "no need upgrade\n"
def getAutonomousDrivingPapers(self):
r = requests.get("http://driving.stanford.edu/papers.html")
soup = BeautifulSoup(r.text)
title = ""
author = ""
journal = ""
desc = ""
url = ""
file_name = self.get_file_name(self.dirs + "AutonomousDriving", self.school)
file_lines = self.countFileLineNum(file_name)
f = self.open_db(file_name + ".tmp")
self.count = 0
for p in soup.find_all("p"):
if p.span != None:
sp = BeautifulSoup(p.prettify())
title = sp.find('span', class_='papertitle').text.strip()
author = "author:" + self.utils.removeDoubleSpace(sp.find('span', class_='authors').text.strip().replace('\n', '')) + " "
journal = "journal:" + sp.find('span', class_='meeting').text.strip()
journal += " " + sp.find('span', class_='year').text.strip() + " "
desc = "description:" + self.utils.removeDoubleSpace(sp.find('span', class_='summary').text.strip().replace('\n', ''))
if p.a != None and p.a['href'].find(".pdf") != -1:
if p.a['href'].startswith('http'):
url = p.a['href']
else:
url = 'http://driving.stanford.edu/' + p.a['href']
self.count += 1
self.write_db(f, "autonomousdriving-paper-" + str(self.count), title, url, author + journal + desc)
title = ""
author = ""
journal = ""
desc = ""
url = ""
self.close_db(f)
if file_lines != self.count and self.count > 0:
self.do_upgrade_db(file_name)
print "before lines: " + str(file_lines) + " after update: " + str(self.count) + " \n\n"
else:
self.cancel_upgrade(file_name)
print "no need upgrade\n"
def getMobileyePapers(self):
file_name = self.get_file_name(self.dirs + "Mobileye", self.school)
file_lines = self.countFileLineNum(file_name)
f = self.open_db(file_name + ".tmp")
self.count = 0
for i in range(1, 3):
r = requests.get("http://www.mobileye.com/technology/mobileye-research/page/" + str(i))
soup = BeautifulSoup(r.text)
for div in soup.find_all("div", class_="ContentItemText"):
title = div.h2.text.strip()
link = div.h2.a['href']
author = "author:" + div.p.text.strip()
print title
self.count += 1
self.write_db(f, "mobileye-paper-" + str(self.count), title, link, author)
self.close_db(f)
if file_lines != self.count and self.count > 0:
self.do_upgrade_db(file_name)
print "before lines: " + str(file_lines) + " after update: " + str(self.count) + " \n\n"
else:
self.cancel_upgrade(file_name)
print "no need upgrade\n"
def getHadoopPapers(self):
r = requests.get("http://wiki.apache.org/hadoop/Papers")
soup = BeautifulSoup(r.text)
file_name = self.get_file_name(self.dirs + "hadoop", self.school)
file_lines = self.countFileLineNum(file_name)
f = self.open_db(file_name + ".tmp")
self.count = 0
for li in soup.find_all("li"):
if li.p != None:
print li.p.a.text
self.count += 1
self.write_db(f, "hadoop-paper-" + str(self.count), li.p.a.text, li.p.a["href"])
self.close_db(f)
if file_lines != self.count and self.count > 0:
self.do_upgrade_db(file_name)
print "before lines: " + str(file_lines) + " after update: " + str(self.count) + " \n\n"
else:
self.cancel_upgrade(file_name)
print "no need upgrade\n"
def getSparkPapers(self):
r = requests.get("http://spark.apache.org/documentation.html")
soup = BeautifulSoup(r.text)
file_name = self.get_file_name(self.dirs + "spark", self.school)
file_lines = self.countFileLineNum(file_name)
f = self.open_db(file_name + ".tmp")
self.count = 0
for li in soup.find_all("li"):
if li.a != None and li.a["href"].find("pdf") != -1 and li.em != None:
title = li.a.text
author = "author:" + li.prettify()[li.prettify().find('</a>') + 7: li.prettify().find('<em>')].strip() + " "
journal = "journal:" + li.em.text
print title
self.count += 1
self.write_db(f, "spark-paper-" + str(self.count), title , li.a["href"], author + journal)
self.close_db(f)
if file_lines != self.count and self.count > 0:
self.do_upgrade_db(file_name)
print "before lines: " + str(file_lines) + " after update: " + str(self.count) + " \n\n"
else:
self.cancel_upgrade(file_name)
print "no need upgrade\n"
def getSTARTPapers(self):
r = requests.get("http://start.mit.edu/publications.php")
soup = BeautifulSoup(r.text)
file_name = self.get_file_name(self.dirs + "START", self.school)
file_lines = self.countFileLineNum(file_name)
f = self.open_db(file_name + ".tmp")
self.count = 0
link = ""
title = ""
journal = ""
author = ""
for td in soup.find_all("td"):
if td.a != None and td.strong == None and td.a["href"] != "index.php":
print ""
if td.a["href"].find("http") == -1:
link = "http://start.mit.edu/" + td.a["href"]
else:
link = td.a["href"]
print link
else:
if td.strong != None:
if td.em != None:
journal = "journal:" + td.em.text + " "
print journal
if td.strong != None:
title = td.strong.text
print title
else:
if td.em != None:
title = td.em.text
print title
if td.text.find(".") != -1:
author = "author:" + td.text[0 : td.text.find(".")] + " "
print author
print ""
self.count += 1
self.write_db(f, "start-paper-" + str(self.count), title, link, author + journal)
title = ""
link = ""
author = ""
journal = ""
self.close_db(f)
if file_lines != self.count and self.count > 0:
self.do_upgrade_db(file_name)
print "before lines: " + str(file_lines) + " after update: " + str(self.count) + " \n\n"
else:
self.cancel_upgrade(file_name)
print "no need upgrade\n"
def getSTAIRPapers(self):
r = requests.get("http://stair.stanford.edu/papers.php")
soup = BeautifulSoup(r.text)
file_name = self.get_file_name(self.dirs + "STAIRP", self.school)
file_lines = self.countFileLineNum(file_name)
f = self.open_db(file_name + ".tmp")
self.count = 0
for li in soup.find_all("li"):
title = ""
link = ""
if li.span == None:
continue
title = li.span.text
if li.a != None:
link = li.a['href']
self.count += 1
self.write_db(f, "STAIRP-paper-" + str(self.count), title, link)
self.close_db(f)
if file_lines != self.count and self.count > 0:
self.do_upgrade_db(file_name)
print "before lines: " + str(file_lines) + " after update: " + str(self.count) + " \n\n"
else:
self.cancel_upgrade(file_name)
print "no need upgrade\n"
def getRoboBrainPapers(self):
r = requests.get("http://robobrain.me/#/about")
soup = BeautifulSoup(r.text)
file_name = self.get_file_name(self.dirs + "robotbrain", self.school)
file_lines = self.countFileLineNum(file_name)
f = self.open_db(file_name + ".tmp")
self.count = 0
print r.text
self.close_db(f)
if file_lines != self.count and self.count > 0:
self.do_upgrade_db(file_name)
print "before lines: " + str(file_lines) + " after update: " + str(self.count) + " \n\n"
else:
self.cancel_upgrade(file_name)
print "no need upgrade\n"
def getWastonPapers(self):
r = requests.get('http://researcher.watson.ibm.com/researcher/view_group_pubs.php?grp=2099')
soup = BeautifulSoup(r.text)
file_name = self.get_file_name(self.dirs + "waston", self.school)
file_lines = self.countFileLineNum(file_name)
f = self.open_db(file_name + ".tmp")
self.count = 0
for div in soup.find_all("div", class_="publication"):
link = ""
authors = ""
journal = ""
title = div.h4.text.strip()
if div.h4.a != None:
link = div.h4.a['href']
sp = BeautifulSoup(div.prettify())
count = 0
for span in sp.find_all("span", class_="pubspan"):
count += 1
data = self.utils.removeDoubleSpace(span.text.strip().replace("\n", ""))
if count == 1:
authors = "author:" + data + " "
if count == 2:
journal = "journal:" + data
print title
print authors
print journal
print link
self.count += 1
self.write_db(f, "waston-paper-" + str(self.count), title, link, authors + journal)
self.close_db(f)
if file_lines != self.count and self.count > 0:
self.do_upgrade_db(file_name)
print "before lines: " + str(file_lines) + " after update: " + str(self.count) + " \n\n"
else:
self.cancel_upgrade(file_name)
print "no need upgrade\n"
start = ProjectPaperSpider()
start.doWork()
|
|
""" Module for assessing impact of intervening galaxies
(DLAs) on FRB measurements
Based on calclations presented in Prochaska & Neeleman 2017
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import pdb
from scipy.interpolate import interp1d
from astropy import units as u
from frb.io import load_dla_fits
from frb.turb_scattering import Turbulence
from frb import defs
def approx_avgDM(zeval, dla_model='atan', verbose=False):
""" Calculate the average DM from intervening galaxies
This method is approximate (and fast) and accurate
to better than 1% in-so-far as the analysis is correct.
From Prochaska & Neeleman 2017
Parameters
----------
zeval : float or ndarray
Redshift(s) for evaluation
dla_model : str, optional
Returns
-------
avgDM : Quantity (depending on type of input z)
Units of pc/cm**3
"""
# Init
mlz = _model_lz(dla_model)
if isinstance(zeval, float):
flg_float = True
zeval = np.array([zeval])
else:
flg_float = False
# Error on upper bound
if np.max(zeval) > 5.:
raise IOError("Calculation is only valid to z=5")
# Calculate
zcalc = np.linspace(0., 5., 10000)
dz = np.median(zcalc-np.roll(zcalc,1))
# Load DLA fits model
dla_fits = load_dla_fits()
# Evaluate l(z)
lz = mlz['eval'](zcalc)
# Average NHI
avgNHI = _avgN_dbl_pow(dla_fits=dla_fits)
# Take ne/nH
nenH_p = dla_fits['nenH']['loglog']
nenH = nenH_p['bp'] + nenH_p['m'] * (avgNHI - 20.3)
# Integrate lz for n(z)
cumul = np.cumsum(lz * dz)
# Average <z>
avgz = np.cumsum(zcalc * lz * dz) / cumul
'''
# <DM> for a single DLA (rest-frame)
DM_DLA = 10. ** (avgNHI + nenH) / u.cm ** 2
if verbose:
print("DM for an average DLA = {} (rest-frame)".format(DM_DLA.to('pc/cm**3')))
'''
# Altogether now
avgDM_values = 10 ** avgNHI * 10 ** nenH * cumul / (1 + avgz) #/ u.cm ** 2
# Finish up
DM_values = np.zeros_like(zeval)
for kk,iz in enumerate(zeval):
iminz = np.argmin(np.abs(iz - zcalc))
DM_values[kk] = avgDM_values[iminz]
# Return
return (DM_values / u.cm**2).to('pc/cm**3')
def monte_DM(zeval, model='atan', nrand=100, verbose=False):
"""
Parameters
----------
zeval : float or ndarray
Array of redshifts for evaluation
model
nrand : int, optional
Number of samples on NHI
verbose : bool, optional
Returns
-------
rand_DM : ndarray
Random DM values
Reported in pc/cm**3 (unitless array)
"""
# Convert to array
if isinstance(zeval, float):
zeval = np.array([zeval])
# Init
dla_fits = load_dla_fits()
nenH_param = dla_fits['nenH']['loglog']
mlz = _model_lz(model)
lgNmax = np.linspace(20.3, 22., 10000)
intfN = _int_dbl_pow(dla_fits['fN']['dpow'], lgNmax=lgNmax)
# Interpolate (cubic is *very* slow)
interp_fN = interp1d(intfN/intfN[-1], lgNmax)
# l(z) model
# Evaluate l(z) in small z intervals
zmax = np.max(zeval)
z = np.linspace(0., zmax, 50000)
dz = np.median(z-np.roll(z,1))
lz = mlz['eval'](z, param=mlz['param'])
# Setup for n(z) and drawing zdla
nzc = np.cumsum(lz*dz) # Note nzc[0] is not 0
avgz = np.cumsum(z*lz*dz) / nzc
interp_avgz = interp1d(z, avgz)
nzc[0] = 0.
interp_nz = interp1d(z, nzc)
interp_revnz = interp1d((nzc-nzc[0])/nzc[-1], z) # Accurate to ~1%
#
rand_DM = np.zeros((nrand, zeval.size))
nz = interp_nz(zeval)
for kk,inz in enumerate(nz):
# Random number of DLAs
rn = np.random.poisson(inz, size=nrand)
ndla = np.sum(rn)
if ndla == 0:
continue
# Draw NHI
rval = np.random.uniform(size=ndla)
rNHI = interp_fN(rval)
# nenH
nenH = nenH_param['bp'] + nenH_param['m'] * (rNHI-20.3)
# Draw zdla
rval2 = np.random.uniform(size=ndla)
zdla = interp_revnz(rval2*inz/nzc[-1])
# DM values
DMi = 10.**(rNHI + nenH) / (1+zdla)
# Generate a dummy array
DMarr = np.zeros((nrand, max(rn)))
cnt = 0
for jj in range(nrand): # Fill
if rn[jj] > 0:
DMarr[jj,:rn[jj]] = DMi[cnt:cnt+rn[jj]]
cnt += rn[jj]
# Fill em up
rand_DM[:,kk] = np.sum(DMarr,axis=1)
# Return
unit_conv = (1/u.cm**2).to('pc/cm**3').value
return rand_DM * unit_conv
def monte_tau(zeval, nrand=100, nHI=0.1, avg_ne=-2.6,
sigma_ne=0.5, cosmo=None, lobs=50*u.cm, turb=None):
""" Generate random draws of tau at a series of redshifts
Parameters
----------
zeval : ndarray
Array of redshifts for evaluation
nrand : int, optional
Number of samples on NHI
avg_ne : float, optional
Average log10 electron density / cm**3
sigma_ne : float, optional
Error in log10 ne
nHI : float, optional
Fiducial value for n_HI; used for DL value
lobs : Quantity
Wavelength for analysis
turb : Turbulence object, optional
Usually defined internally and that is the highly recommended approach
cosmo : astropy.cosmology, optional
Defaults to defs.frb_cosmo
Returns
-------
rand_tau : ndarray (nrand, nz)
Random tau values reported in ms (but without explicit astropy Units)
"""
# Init
ne_param = dict(value=avg_ne, sigma=sigma_ne) # Neeleman+15
dla_fits = load_dla_fits()
if cosmo is None:
cosmo = defs.frb_cosmo
# Setup NHI
lgNmax = np.linspace(20.3, 22., 10000)
intfN = _int_dbl_pow(dla_fits['fN']['dpow'], lgNmax=lgNmax)
# Spline
interp_fN = interp1d(intfN/intfN[-1], lgNmax)#, kind='cubic')
# Setup z
zvals = np.linspace(0., 7., 10000)
nz_s = _dla_nz(zvals)
nz_s[0] = 0.
# Turbulence
if turb is None:
turb = _init_dla_turb()
f_ne=turb.ne
zsource = 2.
turb.set_rdiff(lobs)
fiducial_tau = turb.temporal_smearing(lobs, zsource)
# Take out the cosmology
f_D_S = cosmo.angular_diameter_distance(zsource)
f_D_L = cosmo.angular_diameter_distance(turb.zL)
f_D_LS = cosmo.angular_diameter_distance_z1z2(turb.zL, zsource)
fiducial_tau = fiducial_tau / f_D_LS / f_D_L * f_D_S * (1+turb.zL)**3 # ms/Mpc
kpc_cm = (1*u.kpc).to('cm').value
rand_tau = np.zeros((nrand, zeval.size))
# Loop on zeval
for ss,izeval in enumerate(zeval):
avg_nz = _dla_nz(izeval)
rn = np.random.poisson(avg_nz, size=nrand)
ndla = np.sum(rn)
if ndla == 0:
continue
# Get random NHI
rval = np.random.uniform(size=ndla)
rNHI = interp_fN(rval)
DL = 10.**rNHI / nHI / kpc_cm
# Get random z
imin = np.argmin(np.abs(zvals-izeval))
interp_z = interp1d(nz_s[0:imin]/nz_s[imin-1], zvals[0:imin])#, kind='cubic')
rval = np.random.uniform(size=ndla)
rz = interp_z(rval)
# Cosmology
D_S = cosmo.angular_diameter_distance(izeval)
D_L = cosmo.angular_diameter_distance(rz)
D_LS = cosmo.angular_diameter_distance_z1z2(rz, izeval)
# Get random n_e
rne = 10.**(ne_param['value'] + ne_param['sigma']*np.random.normal(size=ndla))
# Calculate (scale)
rtau = fiducial_tau * (D_LS * D_L / D_S) * (rne/f_ne.to('cm**-3').value)**2 / (1+rz)**3
# Generate, fill
taus = np.zeros((nrand, np.max(rn)))
kk = 0
for jj,irn in enumerate(rn):
if irn > 0:
taus[jj,0:irn] = rtau[kk:kk+irn]
kk += irn
# Finish -- add in quadrature
final_tau = np.sqrt(np.sum(taus**2, axis=1))
# Save
rand_tau[:,ss] = final_tau
# Return
return rand_tau
def _avgN_dbl_pow(lgNmin=20.3, dla_fits=None):
""" Calculate <NHI> for the double power-law
Parameters
----------
lgNmin : float, optional
Returns
-------
avglgN : float
log10 <NHI>
"""
if dla_fits is None:
dla_fits = load_dla_fits()
# Parameters
param = dla_fits['fN']['dpow']
# Calculate
fterm = 1/(param['a3']+2) - 1./(param['a4']+2)
sterm = (10**(lgNmin-param['Nd']))**(param['a3']+2) / (param['a3']+2)
# Numerator
num = (10**param['Nd'])**2 *(fterm-sterm)
# Denom
denom = _int_dbl_pow(param, lgNmin=lgNmin)
return np.log10(num/denom)
def _atan_lz(zval, param=None):
""" arctan l(z) model
Parameters
----------
zval : float or ndarray
Returns
-------
atan_lz : float or ndarray
"""
if param is None:
dfits = load_dla_fits()
param = dfits['lz']['atan']
lz = param['A'] + param['B'] * np.arctan(zval-param['C'])
return lz
def _dla_nz(zarr, mlz=None, model='atan'):
""" Calculate the number of DLAs intersected on average
to a given redshift
Parameters
----------
zarr : ndarray
mlz : model, optional
model : str, optional
Returns
-------
nz : ndarray
"""
# Load model
if mlz is None:
mlz = _model_lz(model)
z = np.linspace(0., 10., 10000)
dz = np.median(z-np.roll(z,1))
lz = mlz['eval'](z, param=mlz['param'])
# Sum
nz = np.cumsum(lz*dz)
# Interpolate onto input redshifts
interp_nz = interp1d(z, nz)
# Return
return interp_nz(zarr)
def _init_dla_turb(ne=4e-3/u.cm**3, zL=1.):
""" Initialize a Turbulence object for a fiducial DLA
Parameters
----------
ne : Quantity
Electron density
Default is based on Neeleman+15
zL : float
Redshift of the DLA
Returns
-------
turb : Turbulence object
"""
# Sizes
l0 = 1 * u.AU
L0 = 0.001 * u.pc
DL = 1 * u.kpc
# Init
turb = Turbulence(ne, l0, L0, zL)
turb.set_SM_obj(DL)
# Return
return turb
def _int_dbl_pow(param, lgNmin=20.3, lgNmax=None):
""" Integrate the double power-law for f(N)
For normalizing with l(z) and for doing random draws
Parameters
----------
lgNmin : float, optional
lgNmax : ndarray, optional
If None, integrate to Infinity
Returns
-------
val : float or ndarray
Integral of f(N) dN [modulo the j(z) term]
Really just the integral of h(N) dN
"""
# Calculate
if lgNmax is None: # Integrate to Infinity
fterm = 1/(param['a3']+1) - 1./(param['a4']+1)
else: # Indefinite integral
fterm = np.zeros_like(lgNmax)
high = lgNmax > param['Nd']
fterm[high] = 1/(param['a3']+1) - 1./(param['a4']+1)
fterm[high] += (10**(lgNmax[high]-param['Nd']))**(param['a4']+1) / (param['a4']+1)
fterm[~high] = (10**(lgNmax[~high]-param['Nd']))**(param['a3']+1) / (param['a3']+1)
# Nmin term
sterm = (10**(lgNmin-param['Nd']))**(param['a3']+1) / (param['a3']+1)
# Finish
val = 10**param['Nd'] * (fterm-sterm)
return val
def _model_lz(name):
""" Return the model for l(z)
Enables multiple ways to model the DLA observations
Returns
-------
mlz : dict
"""
# Fit parameters
dfits = load_dla_fits()
#
mlz = dict(name=name)
if name == 'atan':
mlz['param'] = dfits['lz']['atan']
mlz['eval'] = _atan_lz
else:
raise IOError("Bad lz model name!!")
return mlz
|
|
# CASpy - Python implementation of DVB Simulcrypt CAS
# Currently, only implement EMM Generator (EMMG)
# Implementing DVB-Simulcrypt message :
import binascii
import socket
import ctypes
from time import sleep
import math
# Utility functions, to allow for serialization of the ctypes.Structure classes
def serialize(ctypesObj):
"""
FAQ: How do I copy bytes to Python from a ctypes.Structure?
"""
return buffer(ctypesObj)[:]
def deserialize(ctypesObj, inputBytes):
"""
FAQ: How do I copy bytes to a ctypes.Structure from Python?
"""
fit = min(len(inputBytes), ctypes.sizeof(ctypesObj))
ctypes.memmove(ctypes.addressof(ctypesObj), inputBytes, fit)
INPUT_BUFFER_LENGTH = 1024 # Units : [Bytes]. Input buffer size of received TCP messages
SEND_EMM_PERIOD_TIME = 50e-3 # Units : [Number of Seconds] / [IP packet]
SEND_EMM_FREQUENCY = 1.0 / SEND_EMM_PERIOD_TIME # Units : [Number of IP Packets] / [Sec]
BYTE_TO_BIT_SCALE = 8
KBPS_TO_BPS_SCALE = 1000.0
BPS_TO_KBPS_SCALE = 0.001
#EMM_DUMMY_DATA = binascii.unhexlify('01B040FFFFC7000009160500E03213012014030078001403007C0014030FFF4009040602E0C109040D01E1F409110500E0AB130120140300E800140300E8107A38F9E2')
# ****************************************************
# DVB Simulcrypt messages semantic
# ****************************************************
simulcrypt_protocol_version = 0x02
# Parameters type value according to Simulcrypt standard, 6.22 Table 5 - Parameters :
client_id_type = 0x0001
client_id_length = 4
section_TSpkt_flag_type = 0x0002
section_TSpkt_flag_length = 1
data_channel_id_type = 0x0003
data_channel_id_length = 2
data_stream_id_type = 0x0004
data_stream_id_length = 2
datagram_type = 0x0005 # Parameter length : Variable
bandwidth_type = 0x0006 # Units: kbit/s
bandwidth_length = 2
data_type_type = 0x0007
data_type_length = 1
data_id_type = 0x0008
data_id_length = 2
error_status_type = 0x7000 # See clause 6.2.6
error_status_length = 2
# According to 6.4, 6.5 - Messages :
Channel_setup_message_Type = 0x0011
Channel_test_message_Type = 0x0012
Channel_status_message_Type = 0x0013
Channel_close_message_Type = 0x0014
Channel_error_message_Type = 0x0015
Stream_setup_message_Type = 0x0111
Stream_test_message_Type = 0x0112
Stream_status_message_Type = 0x0113
Stream_close_request_message_Type = 0x0114
Stream_close_response_message_Type = 0x0115
Stream_error_message_Type = 0x0116
Stream_BW_request_message_Type = 0x0117
Stream_BW_allocation_message_Type = 0x0118
Data_provision_messageType = 0x0211
# ****************************************************
# SIMULCRYPT TLV CLASS
# ****************************************************
class CMessageHeader (ctypes.BigEndianStructure):
"""
message_header
"""
_pack_ = 1
_length_ = 5
_fields_ = [("protocol_version", ctypes.c_ubyte),
("message_type", ctypes.c_ushort),
("message_length", ctypes.c_ushort)]
class CTypeLength (ctypes.BigEndianStructure):
"""
Type Length, of the TLV struct
"""
_pack_ = 1
_length_ = 4
_fields_ = [("param_type", ctypes.c_ushort),
("param_length", ctypes.c_ushort)]
class CByteParam (ctypes.BigEndianStructure):
"""
One byte unsigned parameter
"""
_pack_ = 1
_length_ = 5
_fields_ = [("param_type", ctypes.c_ushort),
("param_length", ctypes.c_ushort),
("param_value", ctypes.c_ubyte)]
class CShortParam (ctypes.BigEndianStructure):
"""
Two byte unsigned parameter
"""
_pack_ = 1
_length_ = 6
_fields_ = [("param_type", ctypes.c_ushort),
("param_length", ctypes.c_ushort),
("param_value", ctypes.c_ushort)]
class CLongParam (ctypes.BigEndianStructure):
"""
Four byte unsigned parameter
"""
_pack_ = 1
_length_ = 8
_fields_ = [("param_type", ctypes.c_ushort),
("param_length", ctypes.c_ushort),
("param_value", ctypes.c_ulong)]
# ****************************************************
# EMMG CLASS
# ****************************************************
class CEMMG:
"""
Template for EMMG Simulator
"""
def __init__( self,
client_id, # #1
section_TSpkt_flag, # #2
data_channel_id, # #3
data_stream_id, # #4
bandwidth, # #5 # Unit : Kbps
data_id, # #6
data_type, # #7
inputFile): # #8
"""
These 7 parameters (excluding the actual data) form a single EMM stream.
One channel, One stream, One data id .
"""
# #1
self.client_id = CLongParam(client_id_type,
client_id_length,
client_id)
# #2
self.section_TSpkt_flag = CByteParam(section_TSpkt_flag_type,
section_TSpkt_flag_length,
section_TSpkt_flag)
# #3
self.data_channel_id = CShortParam(data_channel_id_type,
data_channel_id_length,
data_channel_id)
# #4
self.data_stream_id = CShortParam(data_stream_id_type,
data_stream_id_length,
data_stream_id)
# #6
self.data_id = CShortParam(data_id_type,
data_id_length,
data_id)
# #7
self.data_type = CByteParam(data_type_type,
data_type_length,
data_type)
#8
#Initialize members
self.Section = 0
self.IpEmmSectionPerCycle = 0
self.IpEmmSection = 0
self.IpEmmSectionLen = 0
self.dataTypeLength = 0
self._buildIpEmm_(inputFile, bandwidth)
#5
#Initialize members
self.minimumBPS = 0
self.ReqBWfromMux = 0
self.requestBW = 0
self._buildReqBWfromMux_()
# DEBUG PRINTS
print 'Section file \"%s\" length %d, will be sent at %d mSec per cycle ' % (inputFile, len(self.Section), SEND_EMM_PERIOD_TIME * KBPS_TO_BPS_SCALE)
print 'Minimum bitrate for this section (one section per cycle) : %.3f Kbps ' % (self.minimumBPS * BPS_TO_KBPS_SCALE)
print 'User requested bandwidth %d Kbps, which is %d sections per cycle, %.3f Kbps ' % (bandwidth, self.IpEmmSectionPerCycle, (self.minimumBPS * self.IpEmmSectionPerCycle * BPS_TO_KBPS_SCALE))
print 'Actual Requested Bandwidth from Multiplexer will be %d Kbps (%d sections per cycle)' % (self.ReqBWfromMux, (self.IpEmmSectionPerCycle + 1))
def _buildIpEmm_ (self, inputFile, bandwidth):
"""
Build data segment sent to TCP each SEND_EMM_PERIOD_TIME seconds
EMMG sends EMMs every fixed cycle time : SEND_EMM_PERIOD_TIME
Hence, the actual bitrate is determined by the number of sections being sent each time.
Calculation of the number of EMM sections each cycle is done in EmmSecPerCycle
"""
fd = open(inputFile, 'rb')
self.Section = fd.read()
fd.close()
self.IpEmmSectionPerCycle = self._calcNumberSectionsPerCycle_(bandwidth, len(self.Section))
self.IpEmmSection = self.Section * self.IpEmmSectionPerCycle
self.IpEmmSectionLen = len(self.IpEmmSection)
self.dataTypeLength = CTypeLength (datagram_type,
self.IpEmmSectionLen)
def _buildReqBWfromMux_ (self):
"""
Build requested BW from Multiplexer
"""
self.minimumBPS = SEND_EMM_FREQUENCY * len(self.Section) * BYTE_TO_BIT_SCALE
self.ReqBWfromMux = int( self.minimumBPS * (self.IpEmmSectionPerCycle + 1) * BPS_TO_KBPS_SCALE )
self.requestBW = CShortParam(bandwidth_type,
bandwidth_length,
self.ReqBWfromMux)
def _calcNumberSectionsPerCycle_ (self, requiredBW, sectionLength):
"""
Calculate the number of sections needed to be transmitted each cycle of SEND_EMM_PERIOD_TIME seconds,
given requiredBW in Kbps
"""
SectionPerCycle = ((requiredBW * KBPS_TO_BPS_SCALE) * SEND_EMM_PERIOD_TIME) / (sectionLength * BYTE_TO_BIT_SCALE)
SectionPerCycle = int (math.ceil(SectionPerCycle)) # Round up and cast to int the number of EMM sections being transmitted per cycle
return SectionPerCycle
def updateIpEmm (self, inputFile, requiredBW):
"""
Update IP Emm length according to new requiredBW and new file.
This method DOES NOT negotiate with Mux new BW Threshold, if requiredBW exceeds present threshold
"""
self._buildIpEmm_(inputFile, requiredBW)
def prepare_channel_setup_Msg (self):
"""
Prepares and packs the emm_channel_setup message
"""
msgBody = serialize(self.client_id) + \
serialize(self.data_channel_id) + \
serialize(self.section_TSpkt_flag)
msgHeader = serialize(CMessageHeader (simulcrypt_protocol_version, # DVB simulcrypt protocol : 0x02
Channel_setup_message_Type,
len(msgBody))) # Calculate total message length
totalMsg = msgHeader + msgBody
return totalMsg
def prepare_stream_setup_Msg (self):
"""
Prepares and packs the emm_stream_setup message
"""
msgBody = serialize(self.client_id) + \
serialize(self.data_channel_id) + \
serialize(self.data_stream_id) + \
serialize(self.data_id) + \
serialize(self.data_type)
msgHeader = serialize(CMessageHeader (simulcrypt_protocol_version, # DVB simulcrypt protocol : 0x02
Stream_setup_message_Type,
len(msgBody))) # Calculate total message length
totalMsg = msgHeader + msgBody
return totalMsg
def prepare_stream_BW_request_Msg (self):
"""
Prepares and packs the stream bandwidth request message
"""
msgBody = serialize(self.client_id) + \
serialize(self.data_channel_id) + \
serialize(self.data_stream_id) + \
serialize(self.requestBW)
msgHeader = serialize(CMessageHeader (simulcrypt_protocol_version, # DVB simulcrypt protocol : 0x02
Stream_BW_request_message_Type,
len(msgBody))) # Calculate total message length
totalMsg = msgHeader + msgBody
return totalMsg
def prepare_Provision_Data_Msg (self):
"""
Prepares and packs the Provision Data message
"""
msgBody = serialize(self.client_id) + \
serialize(self.data_channel_id) + \
serialize(self.data_stream_id) + \
serialize(self.data_id) + \
serialize(self.dataTypeLength) + \
self.IpEmmSection
msgHeader = serialize(CMessageHeader (simulcrypt_protocol_version, # DVB simulcrypt protocol : 0x02
Data_provision_messageType,
len(msgBody))) # Calculate total message length
totalMsg = msgHeader + msgBody
return totalMsg
def receiveMessage(self, strMsg):
"""
Parse buffer received from mux
"""
header = CMessageHeader(0,0,0)
paramTL = CTypeLength (0,0) # Parameter Type-Length values (from the trio TYPE-LENGTH-VALUE)
longParam = CLongParam (0,0,0)
shortParam = CShortParam (0,0,0)
byteParam = CByteParam (0,0,0)
JUSTIFY_LEN = 25
# Parse header
deserialize (header, strMsg)
if header.message_type == Channel_setup_message_Type :
print "Message type Channel_setup_message "
elif header.message_type == Channel_test_message_Type :
print "Message type Channel_test_message "
elif header.message_type == Channel_status_message_Type :
print "Message type Channel_status_message "
elif header.message_type == Channel_close_message_Type :
print "Message type Channel_close_message "
elif header.message_type == Channel_error_message_Type :
print "Message type Channel_error_message "
elif header.message_type == Stream_setup_message_Type :
print "Message type Stream_setup_message "
elif header.message_type == Stream_test_message_Type :
print "Message type Stream_test_message "
elif header.message_type == Stream_status_message_Type :
print "Message type Stream_status_message "
elif header.message_type == Stream_close_request_message_Type :
print "Message type Stream_close_request_message "
elif header.message_type == Stream_close_response_message_Type :
print "Message type Stream_close_response_message "
elif header.message_type == Stream_error_message_Type :
print "Message type Stream_error_message "
elif header.message_type == Stream_BW_request_message_Type :
print "Message type Stream_BW_request_message "
elif header.message_type == Stream_BW_allocation_message_Type :
print "Message type Stream_BW_allocation_message "
elif header.message_type == Data_provision_messageType :
print "Message type Data_provision_message "
print "Message Length : ".ljust(JUSTIFY_LEN), header.message_length
# Parse message body. Iterate on TLV structures
strMsg = strMsg[CMessageHeader._length_ : ]
while len(strMsg) > 0 :
deserialize(paramTL, strMsg)
if paramTL.param_type == client_id_type :
deserialize (longParam, strMsg)
print "Client id : ".ljust(JUSTIFY_LEN), "0x%X" % longParam.param_value
strMsg = strMsg [CLongParam._length_ : ]
elif paramTL.param_type == section_TSpkt_flag_type :
deserialize (byteParam, strMsg)
print "section TSpkt flag : ".ljust(JUSTIFY_LEN), byteParam.param_value
strMsg = strMsg [CByteParam._length_ : ]
elif paramTL.param_type == data_channel_id_type :
deserialize (shortParam, strMsg)
print "data channel id : ".ljust(JUSTIFY_LEN), shortParam.param_value
strMsg = strMsg [CShortParam._length_ : ]
elif paramTL.param_type == data_stream_id_type :
deserialize (shortParam, strMsg)
print "data stream id : ".ljust(JUSTIFY_LEN), shortParam.param_value
strMsg = strMsg [CShortParam._length_ : ]
elif paramTL.param_type == bandwidth_type :
deserialize (shortParam, strMsg)
print "bandwidth : ".ljust(JUSTIFY_LEN), shortParam.param_value
strMsg = strMsg [CShortParam._length_ : ]
elif paramTL.param_type == data_type_type :
deserialize (byteParam, strMsg)
print "data type : ".ljust(JUSTIFY_LEN), byteParam.param_value
strMsg = strMsg [CByteParam._length_ : ]
elif paramTL.param_type == data_id_type :
deserialize (shortParam, strMsg)
print "data id : ".ljust(JUSTIFY_LEN), shortParam.param_value
strMsg = strMsg [CShortParam._length_ : ]
elif paramTL.param_type == error_status_type :
deserialize (shortParam, strMsg)
print "error status : ".ljust(JUSTIFY_LEN), shortParam.param_value
strMsg = strMsg [CShortParam._length_ : ]
if __name__ == '__main__':
EMM_INPUT_FILE = r'D:\EmmgSimulator\section'
#--- Prepare EMMG
EMMG1 = CEMMG ( client_id = 0x00099999,
section_TSpkt_flag = 0x0,
data_channel_id = 0x1,
data_stream_id = 0x32,
bandwidth = 20, # Units : Kbps. bandwidth request
data_id = 0x1,
data_type = 0x1,
inputFile = EMM_INPUT_FILE)
#--- Connect to Mux EMM TCP SERVER
muxEmmSocket = socket.socket()
host = '10.40.2.195'
port = 20000
muxEmmSocket.connect ((host, port))
print
print " CHANNEL SETUP"
print " *************"
#--- Send Channel Setup Message
sendMsg = EMMG1.prepare_channel_setup_Msg()
muxEmmSocket.send(sendMsg);
#--- Get Channel Status Message
muxEmmMsg = muxEmmSocket.recv(INPUT_BUFFER_LENGTH)
EMMG1.receiveMessage(muxEmmMsg)
print
print " STREAM SETUP"
print " ************"
#--- Send Stream Setup Message
sendMsg = EMMG1.prepare_stream_setup_Msg()
muxEmmSocket.send(sendMsg);
#--- Get Stream Status Message
muxEmmMsg = muxEmmSocket.recv(INPUT_BUFFER_LENGTH)
EMMG1.receiveMessage(muxEmmMsg)
print
print " Stream BW Allocation"
print " ********************"
#--- Send stream BW request Message
sendMsg = EMMG1.prepare_stream_BW_request_Msg()
muxEmmSocket.send(sendMsg);
#--- Get Stream BW allocation Message
muxEmmMsg = muxEmmSocket.recv(INPUT_BUFFER_LENGTH)
EMMG1.receiveMessage(muxEmmMsg)
print
print " Send provision Data"
print " *******************"
#--- Send stream BW request Message
sendMsg = EMMG1.prepare_Provision_Data_Msg()
counter = 0
while True :
# Send TCP message with the required EMM
muxEmmSocket.send(sendMsg);
counter += 1
# Change EMM bitrate
if counter == 400:
EMMG1.updateIpEmm(EMM_INPUT_FILE, 60)
sendMsg = EMMG1.prepare_Provision_Data_Msg()
# Change EMM bitrate
elif counter == 800:
EMMG1.updateIpEmm(EMM_INPUT_FILE, 150)
sendMsg = EMMG1.prepare_Provision_Data_Msg()
# Change EMM bitrate
elif counter == 1200:
EMMG1.updateIpEmm(EMM_INPUT_FILE, 20)
sendMsg = EMMG1.prepare_Provision_Data_Msg()
# Wait for SEND_EMM_PERIOD_TIME before sending again
sleep (SEND_EMM_PERIOD_TIME)
# Print statistics
if counter == 8000:
break
elif (counter % 80) == 0 :
print "Sent " , counter, " packets."
print "Message length : %d" % len(sendMsg)
|
|
"""
"""
import sys
import traceback
from functools import wraps
from PyQt4.QtGui import (
QWidget, QPlainTextEdit, QVBoxLayout, QTextCursor, QTextCharFormat,
QFont, QSizePolicy
)
from PyQt4.QtCore import Qt, QObject, QEvent, QCoreApplication, QThread, QSize
from PyQt4.QtCore import pyqtSignal as Signal
class TerminalView(QPlainTextEdit):
def __init__(self, *args, **kwargs):
QPlainTextEdit.__init__(self, *args, **kwargs)
self.setFrameStyle(QPlainTextEdit.NoFrame)
self.setTextInteractionFlags(Qt.TextBrowserInteraction)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)
self.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
font = self.font()
if hasattr(QFont, "Monospace"):
# Why is this not available on Debian squeeze
font.setStyleHint(QFont.Monospace)
else:
font.setStyleHint(QFont.Courier)
font.setFamily("Monaco")
font.setPointSize(12)
self.setFont(font)
def sizeHint(self):
metrics = self.fontMetrics()
width = metrics.boundingRect("_" * 81).width()
height = metrics.lineSpacing()
scroll_width = self.verticalScrollBar().width()
size = QSize(width + scroll_width, height * 25)
return size
class OutputView(QWidget):
def __init__(self, parent=None, **kwargs):
QWidget.__init__(self, parent, **kwargs)
self.__lines = 5000
self.setLayout(QVBoxLayout())
self.layout().setContentsMargins(0, 0, 0, 0)
self.__text = TerminalView()
self.__currentCharFormat = self.__text.currentCharFormat()
self.layout().addWidget(self.__text)
def setMaximumLines(self, lines):
"""
Set the maximum number of lines to keep displayed.
"""
if self.__lines != lines:
self.__lines = lines
self.__text.setMaximumBlockCount(lines)
def maximumLines(self):
"""
Return the maximum number of lines in the display.
"""
return self.__lines
def clear(self):
"""
Clear the displayed text.
"""
self.__text.clear()
def setCurrentCharFormat(self, charformat):
"""Set the QTextCharFormat to be used when writing.
"""
if self.__currentCharFormat != charformat:
self.__currentCharFormat = charformat
def currentCharFormat(self):
return self.__currentCharFormat
def toPlainText(self):
"""
Return the full contents of the output view.
"""
return self.__text.toPlainText()
# A file like interface.
def write(self, string):
self.__text.moveCursor(QTextCursor.End, QTextCursor.MoveAnchor)
self.__text.setCurrentCharFormat(self.__currentCharFormat)
self.__text.insertPlainText(string)
def writelines(self, lines):
self.write("".join(lines))
def flush(self):
pass
def writeWithFormat(self, string, charformat):
self.__text.moveCursor(QTextCursor.End, QTextCursor.MoveAnchor)
self.__text.setCurrentCharFormat(charformat)
self.__text.insertPlainText(string)
def writelinesWithFormat(self, lines, charformat):
self.writeWithFormat("".join(lines), charformat)
def formated(self, color=None, background=None, weight=None,
italic=None, underline=None, font=None):
"""
Return a formated file like object proxy.
"""
charformat = update_char_format(
self.currentCharFormat(), color, background, weight,
italic, underline, font
)
return formater(self, charformat)
def update_char_format(baseformat, color=None, background=None, weight=None,
italic=None, underline=None, font=None):
"""
Return a copy of `baseformat` :class:`QTextCharFormat` with
updated color, weight, background and font properties.
"""
charformat = QTextCharFormat(baseformat)
if color is not None:
charformat.setForeground(color)
if background is not None:
charformat.setBackground(background)
if font is not None:
charformat.setFont(font)
else:
font = update_font(baseformat.font(), weight, italic, underline)
charformat.setFont(font)
return charformat
def update_font(basefont, weight=None, italic=None, underline=None,
pixelSize=None, pointSize=None):
"""
Return a copy of `basefont` :class:`QFont` with updated properties.
"""
font = QFont(basefont)
if weight is not None:
font.setWeight(weight)
if italic is not None:
font.setItalic(italic)
if underline is not None:
font.setUnderline(underline)
if pixelSize is not None:
font.setPixelSize(pixelSize)
if pointSize is not None:
font.setPointSize(pointSize)
return font
class formater(object):
def __init__(self, outputview, charformat):
self.outputview = outputview
self.charformat = charformat
def write(self, string):
self.outputview.writeWithFormat(string, self.charformat)
def writelines(self, lines):
self.outputview.writelines(lines, self.charformat)
def flush(self):
self.outputview.flush()
def formated(self, color=None, background=None, weight=None,
italic=None, underline=None, font=None):
charformat = update_char_format(self.charformat, color, background,
weight, italic, underline, font)
return formater(self.outputview, charformat)
def __enter__(self):
return self
def __exit__(self, *args):
self.outputview = None
self.charformat = None
class QueuedCallEvent(QEvent):
QueuedCall = QEvent.registerEventType()
def __init__(self, function, args, kwargs):
QEvent.__init__(self, QueuedCallEvent.QueuedCall)
self.function = function
self.args = args
self.kwargs = kwargs
self._result = None
self._exc_info = None
self._state = 0
def call(self):
try:
self._result = self.function(*self.args, **self.kwargs)
self._state = 1
except Exception as ex:
self._exc_info = (type(ex), ex.args, None)
raise
def result(self):
if self._state == 1:
return self._result
elif self._exc_info:
raise self._exc_info[0](self._exc_info[1])
else:
# Should this block, add timeout?
raise RuntimeError("Result not yet ready")
def isready(self):
return self._state == 1 or self._exc_info
def queued(method):
"""
Run method from the event queue.
"""
@wraps(method)
def delay_method_call(self, *args, **kwargs):
event = QueuedCallEvent(method.__get__(self), args, kwargs)
QCoreApplication.postEvent(self, event)
return delay_method_call
def queued_blocking(method):
"""
Run method from the event queue and wait until the event is processed.
Return the call's return value.
"""
@wraps(method)
def delay_method_call(self, *args, **kwargs):
event = QueuedCallEvent(method, (self,) + args, kwargs)
QCoreApplication.postEvent(self, event)
QCoreApplication.sendPostedEvents()
return event.result()
return delay_method_call
class TextStream(QObject):
stream = Signal(str)
flushed = Signal()
def __init__(self, parent=None):
QObject.__init__(self, parent)
@queued
def write(self, string):
self.stream.emit(string)
@queued
def writelines(self, lines):
self.stream.emit("".join(lines))
@queued
def flush(self):
self.flushed.emit()
def customEvent(self, event):
if event.type() == QueuedCallEvent.QueuedCall:
event.call()
event.accept()
class ExceptHook(QObject):
handledException = Signal()
def __init__(self, parent=None, stream=None):
QObject.__init__(self, parent)
self.stream = stream
def __call__(self, exc_type, exc_value, tb):
text = traceback.format_exception(exc_type, exc_value, tb)
separator = "-" * 80 + "\n"
if QThread.currentThread() != QCoreApplication.instance().thread():
header = exc_type.__name__ + " (in non GUI thread)"
else:
header = exc_type.__name__
header_fmt = "%%%is\n"
if tb:
header += (header_fmt % (80 - len(header))) % text[0].strip()
del text[0]
else:
header
if self.stream is None:
stream = sys.stderr
else:
stream = self.stream
stream.writelines([separator, header] + text)
self.handledException.emit()
|
|
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The FilterScheduler is for creating instances locally.
You can customize this scheduler by specifying your own Host Filters and
Weighing Functions.
"""
import random
from oslo.config import cfg
from nova.compute import rpcapi as compute_rpcapi
from nova import exception
from nova.objects import instance_group as instance_group_obj
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.pci import pci_request
from nova import rpc
from nova.scheduler import driver
from nova.scheduler import scheduler_options
from nova.scheduler import utils as scheduler_utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
filter_scheduler_opts = [
cfg.IntOpt('scheduler_host_subset_size',
default=1,
help='New instances will be scheduled on a host chosen '
'randomly from a subset of the N best hosts. This '
'property defines the subset size that a host is '
'chosen from. A value of 1 chooses the '
'first host returned by the weighing functions. '
'This value must be at least 1. Any value less than 1 '
'will be ignored, and 1 will be used instead')
]
CONF.register_opts(filter_scheduler_opts)
class FilterScheduler(driver.Scheduler):
"""Scheduler that can be used for filtering and weighing."""
def __init__(self, *args, **kwargs):
super(FilterScheduler, self).__init__(*args, **kwargs)
self.options = scheduler_options.SchedulerOptions()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.notifier = rpc.get_notifier('scheduler')
def schedule_run_instance(self, context, request_spec,
admin_password, injected_files,
requested_networks, is_first_time,
filter_properties, legacy_bdm_in_spec):
"""This method is called from nova.compute.api to provision
an instance. We first create a build plan (a list of WeightedHosts)
and then provision.
Returns a list of the instances created.
"""
payload = dict(request_spec=request_spec)
self.notifier.info(context, 'scheduler.run_instance.start', payload)
instance_uuids = request_spec.get('instance_uuids')
LOG.info(_("Attempting to build %(num_instances)d instance(s) "
"uuids: %(instance_uuids)s"),
{'num_instances': len(instance_uuids),
'instance_uuids': instance_uuids})
LOG.debug(_("Request Spec: %s") % request_spec)
weighed_hosts = self._schedule(context, request_spec,
filter_properties, instance_uuids)
# NOTE: Pop instance_uuids as individual creates do not need the
# set of uuids. Do not pop before here as the upper exception
# handler fo NoValidHost needs the uuid to set error state
instance_uuids = request_spec.pop('instance_uuids')
# NOTE(comstud): Make sure we do not pass this through. It
# contains an instance of RpcContext that cannot be serialized.
filter_properties.pop('context', None)
for num, instance_uuid in enumerate(instance_uuids):
request_spec['instance_properties']['launch_index'] = num
try:
try:
weighed_host = weighed_hosts.pop(0)
LOG.info(_("Choosing host %(weighed_host)s "
"for instance %(instance_uuid)s"),
{'weighed_host': weighed_host,
'instance_uuid': instance_uuid})
except IndexError:
raise exception.NoValidHost(reason="")
self._provision_resource(context, weighed_host,
request_spec,
filter_properties,
requested_networks,
injected_files, admin_password,
is_first_time,
instance_uuid=instance_uuid,
legacy_bdm_in_spec=legacy_bdm_in_spec)
except Exception as ex:
# NOTE(vish): we don't reraise the exception here to make sure
# that all instances in the request get set to
# error properly
driver.handle_schedule_error(context, ex, instance_uuid,
request_spec)
# scrub retry host list in case we're scheduling multiple
# instances:
retry = filter_properties.get('retry', {})
retry['hosts'] = []
self.notifier.info(context, 'scheduler.run_instance.end', payload)
def select_destinations(self, context, request_spec, filter_properties):
"""Selects a filtered set of hosts and nodes."""
num_instances = request_spec['num_instances']
instance_uuids = request_spec.get('instance_uuids')
selected_hosts = self._schedule(context, request_spec,
filter_properties, instance_uuids)
# Couldn't fulfill the request_spec
if len(selected_hosts) < num_instances:
raise exception.NoValidHost(reason='')
dests = [dict(host=host.obj.host, nodename=host.obj.nodename,
limits=host.obj.limits) for host in selected_hosts]
return dests
def _provision_resource(self, context, weighed_host, request_spec,
filter_properties, requested_networks, injected_files,
admin_password, is_first_time, instance_uuid=None,
legacy_bdm_in_spec=True):
"""Create the requested resource in this Zone."""
# NOTE(vish): add our current instance back into the request spec
request_spec['instance_uuids'] = [instance_uuid]
payload = dict(request_spec=request_spec,
weighted_host=weighed_host.to_dict(),
instance_id=instance_uuid)
self.notifier.info(context,
'scheduler.run_instance.scheduled', payload)
# Update the metadata if necessary
scheduler_hints = filter_properties.get('scheduler_hints') or {}
try:
updated_instance = driver.instance_update_db(context,
instance_uuid)
except exception.InstanceNotFound:
LOG.warning(_("Instance disappeared during scheduling"),
context=context, instance_uuid=instance_uuid)
else:
scheduler_utils.populate_filter_properties(filter_properties,
weighed_host.obj)
self.compute_rpcapi.run_instance(context,
instance=updated_instance,
host=weighed_host.obj.host,
request_spec=request_spec,
filter_properties=filter_properties,
requested_networks=requested_networks,
injected_files=injected_files,
admin_password=admin_password, is_first_time=is_first_time,
node=weighed_host.obj.nodename,
legacy_bdm_in_spec=legacy_bdm_in_spec)
def _get_configuration_options(self):
"""Fetch options dictionary. Broken out for testing."""
return self.options.get_configuration()
def populate_filter_properties(self, request_spec, filter_properties):
"""Stuff things into filter_properties. Can be overridden in a
subclass to add more data.
"""
# Save useful information from the request spec for filter processing:
project_id = request_spec['instance_properties']['project_id']
os_type = request_spec['instance_properties']['os_type']
filter_properties['project_id'] = project_id
filter_properties['os_type'] = os_type
pci_requests = pci_request.get_pci_requests_from_flavor(
request_spec.get('instance_type') or {})
if pci_requests:
filter_properties['pci_requests'] = pci_requests
def _max_attempts(self):
max_attempts = CONF.scheduler_max_attempts
if max_attempts < 1:
raise exception.NovaException(_("Invalid value for "
"'scheduler_max_attempts', must be >= 1"))
return max_attempts
def _log_compute_error(self, instance_uuid, retry):
"""If the request contained an exception from a previous compute
build/resize operation, log it to aid debugging
"""
exc = retry.pop('exc', None) # string-ified exception from compute
if not exc:
return # no exception info from a previous attempt, skip
hosts = retry.get('hosts', None)
if not hosts:
return # no previously attempted hosts, skip
last_host, last_node = hosts[-1]
LOG.error(_('Error from last host: %(last_host)s (node %(last_node)s):'
' %(exc)s'),
{'last_host': last_host,
'last_node': last_node,
'exc': exc},
instance_uuid=instance_uuid)
def _populate_retry(self, filter_properties, instance_properties):
"""Populate filter properties with history of retries for this
request. If maximum retries is exceeded, raise NoValidHost.
"""
max_attempts = self._max_attempts()
force_hosts = filter_properties.get('force_hosts', [])
force_nodes = filter_properties.get('force_nodes', [])
if max_attempts == 1 or force_hosts or force_nodes:
# re-scheduling is disabled.
return
retry = filter_properties.pop('retry', {})
# retry is enabled, update attempt count:
if retry:
retry['num_attempts'] += 1
else:
retry = {
'num_attempts': 1,
'hosts': [] # list of compute hosts tried
}
filter_properties['retry'] = retry
instance_uuid = instance_properties.get('uuid')
self._log_compute_error(instance_uuid, retry)
if retry['num_attempts'] > max_attempts:
msg = (_('Exceeded max scheduling attempts %(max_attempts)d for '
'instance %(instance_uuid)s')
% {'max_attempts': max_attempts,
'instance_uuid': instance_uuid})
raise exception.NoValidHost(reason=msg)
@staticmethod
def _setup_instance_group(context, filter_properties):
update_group_hosts = False
scheduler_hints = filter_properties.get('scheduler_hints') or {}
group_hint = scheduler_hints.get('group', None)
if group_hint:
group = instance_group_obj.InstanceGroup.get_by_hint(context,
group_hint)
policies = set(('anti-affinity', 'affinity'))
if any((policy in policies) for policy in group.policies):
update_group_hosts = True
filter_properties.setdefault('group_hosts', set())
user_hosts = set(filter_properties['group_hosts'])
group_hosts = set(group.get_hosts(context))
filter_properties['group_hosts'] = user_hosts | group_hosts
filter_properties['group_policies'] = group.policies
return update_group_hosts
def _schedule(self, context, request_spec, filter_properties,
instance_uuids=None):
"""Returns a list of hosts that meet the required specs,
ordered by their fitness.
"""
elevated = context.elevated()
instance_properties = request_spec['instance_properties']
instance_type = request_spec.get("instance_type", None)
update_group_hosts = self._setup_instance_group(context,
filter_properties)
config_options = self._get_configuration_options()
# check retry policy. Rather ugly use of instance_uuids[0]...
# but if we've exceeded max retries... then we really only
# have a single instance.
properties = instance_properties.copy()
if instance_uuids:
properties['uuid'] = instance_uuids[0]
self._populate_retry(filter_properties, properties)
filter_properties.update({'context': context,
'request_spec': request_spec,
'config_options': config_options,
'instance_type': instance_type})
self.populate_filter_properties(request_spec,
filter_properties)
# Find our local list of acceptable hosts by repeatedly
# filtering and weighing our options. Each time we choose a
# host, we virtually consume resources on it so subsequent
# selections can adjust accordingly.
# Note: remember, we are using an iterator here. So only
# traverse this list once. This can bite you if the hosts
# are being scanned in a filter or weighing function.
hosts = self._get_all_host_states(elevated)
selected_hosts = []
if instance_uuids:
num_instances = len(instance_uuids)
else:
num_instances = request_spec.get('num_instances', 1)
for num in xrange(num_instances):
# Filter local hosts based on requirements ...
hosts = self.host_manager.get_filtered_hosts(hosts,
filter_properties, index=num)
if not hosts:
# Can't get any more locally.
break
LOG.debug(_("Filtered %(hosts)s"), {'hosts': hosts})
weighed_hosts = self.host_manager.get_weighed_hosts(hosts,
filter_properties)
LOG.debug(_("Weighed %(hosts)s"), {'hosts': weighed_hosts})
scheduler_host_subset_size = CONF.scheduler_host_subset_size
if scheduler_host_subset_size > len(weighed_hosts):
scheduler_host_subset_size = len(weighed_hosts)
if scheduler_host_subset_size < 1:
scheduler_host_subset_size = 1
chosen_host = random.choice(
weighed_hosts[0:scheduler_host_subset_size])
selected_hosts.append(chosen_host)
# Now consume the resources so the filter/weights
# will change for the next instance.
chosen_host.obj.consume_from_instance(instance_properties)
if update_group_hosts is True:
filter_properties['group_hosts'].add(chosen_host.obj.host)
return selected_hosts
def _get_all_host_states(self, context):
"""Template method, so a subclass can implement caching."""
return self.host_manager.get_all_host_states(context)
|
|
"""
<Module Name>
functions.py
<Author>
Santiago Torres-Arias <santiago@nyu.edu>
<Started>
Nov 15, 2017
<Copyright>
See LICENSE for licensing information.
<Purpose>
publicly-usable functions for exporting public-keys, signing data and
verifying signatures.
"""
import logging
import time
from securesystemslib import exceptions
from securesystemslib import formats
from securesystemslib.gpg.common import (
get_pubkey_bundle, parse_signature_packet)
from securesystemslib.gpg.exceptions import (
CommandError, KeyExpirationError)
from securesystemslib.gpg.constants import (
FULLY_SUPPORTED_MIN_VERSION,
GPG_EXPORT_PUBKEY_COMMAND,
GPG_SIGN_COMMAND,
HAVE_GPG,
NO_GPG_MSG,
SHA256)
from securesystemslib.gpg.handlers import (
SIGNATURE_HANDLERS)
from securesystemslib import process
from securesystemslib.gpg.rsa import CRYPTO
log = logging.getLogger(__name__)
NO_CRYPTO_MSG = "GPG support requires the cryptography library"
def create_signature(content, keyid=None, homedir=None):
"""
<Purpose>
Calls the gpg command line utility to sign the passed content with the key
identified by the passed keyid from the gpg keyring at the passed homedir.
The executed base command is defined in
securesystemslib.gpgp.constants.GPG_SIGN_COMMAND.
NOTE: On not fully supported versions of GPG, i.e. versions below
securesystemslib.gpg.constants.FULLY_SUPPORTED_MIN_VERSION the returned
signature does not contain the full keyid. As a work around, we export the
public key bundle identified by the short keyid to compute the full keyid
and add it to the returned signature.
<Arguments>
content:
The content to be signed. (bytes)
keyid: (optional)
The keyid of the gpg signing keyid. If not passed the default
key in the keyring is used.
homedir: (optional)
Path to the gpg keyring. If not passed the default keyring is used.
<Exceptions>
securesystemslib.exceptions.FormatError:
If the keyid was passed and does not match
securesystemslib.formats.KEYID_SCHEMA
ValueError:
If the gpg command failed to create a valid signature.
OSError:
If the gpg command is not present or non-executable.
securesystemslib.exceptions.UnsupportedLibraryError:
If the gpg command is not available, or
the cryptography library is not installed.
securesystemslib.gpg.exceptions.CommandError:
If the gpg command returned a non-zero exit code
securesystemslib.gpg.exceptions.KeyNotFoundError:
If the used gpg version is not fully supported
and no public key can be found for short keyid.
<Side Effects>
None.
<Returns>
The created signature in the format:
securesystemslib.formats.GPG_SIGNATURE_SCHEMA.
"""
if not HAVE_GPG: # pragma: no cover
raise exceptions.UnsupportedLibraryError(NO_GPG_MSG)
if not CRYPTO: # pragma: no cover
raise exceptions.UnsupportedLibraryError(NO_CRYPTO_MSG)
keyarg = ""
if keyid:
formats.KEYID_SCHEMA.check_match(keyid)
keyarg = "--local-user {}".format(keyid)
homearg = ""
if homedir:
homearg = "--homedir {}".format(homedir).replace("\\", "/")
command = GPG_SIGN_COMMAND.format(keyarg=keyarg, homearg=homearg)
gpg_process = process.run(command, input=content, check=False,
stdout=process.PIPE, stderr=process.PIPE)
# TODO: It's suggested to take a look at `--status-fd` for proper error
# reporting, as there is no clear distinction between the return codes
# https://lists.gnupg.org/pipermail/gnupg-devel/2005-December/022559.html
if gpg_process.returncode != 0:
raise CommandError("Command '{}' returned "
"non-zero exit status '{}', stderr was:\n{}.".format(gpg_process.args,
gpg_process.returncode, gpg_process.stderr.decode()))
signature_data = gpg_process.stdout
signature = parse_signature_packet(signature_data)
# On GPG < 2.1 we cannot derive the full keyid from the signature data.
# Instead we try to compute the keyid from the public part of the signing
# key or its subkeys, identified by the short keyid.
# parse_signature_packet is guaranteed to return at least one of keyid or
# short_keyid.
# Exclude the following code from coverage for consistent coverage across
# test environments.
if not signature["keyid"]: # pragma: no cover
log.warning("The created signature does not include the hashed subpacket"
" '33' (full keyid). You probably have a gpg version <{}."
" We will export the public keys associated with the short keyid to"
" compute the full keyid.".format(FULLY_SUPPORTED_MIN_VERSION))
short_keyid = signature["short_keyid"]
# Export public key bundle (master key including with optional subkeys)
public_key_bundle = export_pubkey(short_keyid, homedir)
# Test if the short keyid matches the master key ...
master_key_full_keyid = public_key_bundle["keyid"]
if master_key_full_keyid.endswith(short_keyid.lower()):
signature["keyid"] = master_key_full_keyid
# ... or one of the subkeys, and add the full keyid to the signature dict.
else:
for sub_key_full_keyid in list(
public_key_bundle.get("subkeys", {}).keys()):
if sub_key_full_keyid.endswith(short_keyid.lower()):
signature["keyid"] = sub_key_full_keyid
break
# If there is still no full keyid something went wrong
if not signature["keyid"]: # pragma: no cover
raise ValueError("Full keyid could not be determined for signature '{}'".
format(signature))
# It is okay now to remove the optional short keyid to save space
signature.pop("short_keyid", None)
return signature
def verify_signature(signature_object, pubkey_info, content):
"""
<Purpose>
Verifies the passed signature against the passed content using the
passed public key, or one of its subkeys, associated by the signature's
keyid.
The function selects the appropriate verification algorithm (rsa or dsa)
based on the "type" field in the passed public key object.
<Arguments>
signature_object:
A signature object in the format:
securesystemslib.formats.GPG_SIGNATURE_SCHEMA
pubkey_info:
A public key object in the format:
securesystemslib.formats.GPG_PUBKEY_SCHEMA
content:
The content to be verified. (bytes)
<Exceptions>
securesystemslib.gpg.exceptions.KeyExpirationError:
if the passed public key has expired
securesystemslib.exceptions.UnsupportedLibraryError:
if the cryptography module is unavailable
<Side Effects>
None.
<Returns>
True if signature verification passes, False otherwise.
"""
if not CRYPTO: # pragma: no cover
raise exceptions.UnsupportedLibraryError(NO_CRYPTO_MSG)
formats.GPG_PUBKEY_SCHEMA.check_match(pubkey_info)
formats.GPG_SIGNATURE_SCHEMA.check_match(signature_object)
handler = SIGNATURE_HANDLERS[pubkey_info['type']]
sig_keyid = signature_object["keyid"]
verification_key = pubkey_info
# If the keyid on the signature matches a subkey of the passed key,
# we use that subkey for verification instead of the master key.
if sig_keyid in list(pubkey_info.get("subkeys", {}).keys()):
verification_key = pubkey_info["subkeys"][sig_keyid]
creation_time = verification_key.get("creation_time")
validity_period = verification_key.get("validity_period")
if creation_time and validity_period and \
creation_time + validity_period < time.time():
raise KeyExpirationError(verification_key)
return handler.verify_signature(
signature_object, verification_key, content, SHA256)
def export_pubkey(keyid, homedir=None):
"""Exports a public key from a GnuPG keyring.
Arguments:
keyid: An OpenPGP keyid in KEYID_SCHEMA format.
homedir (optional): A path to the GnuPG home directory. If not set the
default GnuPG home directory is used.
Raises:
ValueError: Keyid is not a string.
UnsupportedLibraryError: The gpg command or pyca/cryptography are not
available.
KeyNotFoundError: No key or subkey was found for that keyid.
Side Effects:
Calls system gpg command in a subprocess.
Returns:
An OpenPGP public key object in GPG_PUBKEY_SCHEMA format.
"""
if not HAVE_GPG: # pragma: no cover
raise exceptions.UnsupportedLibraryError(NO_GPG_MSG)
if not CRYPTO: # pragma: no cover
raise exceptions.UnsupportedLibraryError(NO_CRYPTO_MSG)
if not formats.KEYID_SCHEMA.matches(keyid):
# FIXME: probably needs smarter parsing of what a valid keyid is so as to
# not export more than one pubkey packet.
raise ValueError("we need to export an individual key. Please provide a "
" valid keyid! Keyid was '{}'.".format(keyid))
homearg = ""
if homedir:
homearg = "--homedir {}".format(homedir).replace("\\", "/")
# TODO: Consider adopting command error handling from `create_signature`
# above, e.g. in a common 'run gpg command' utility function
command = GPG_EXPORT_PUBKEY_COMMAND.format(keyid=keyid, homearg=homearg)
gpg_process = process.run(command, stdout=process.PIPE, stderr=process.PIPE)
key_packet = gpg_process.stdout
key_bundle = get_pubkey_bundle(key_packet, keyid)
return key_bundle
def export_pubkeys(keyids, homedir=None):
"""Exports multiple public keys from a GnuPG keyring.
Arguments:
keyids: A list of OpenPGP keyids in KEYID_SCHEMA format.
homedir (optional): A path to the GnuPG home directory. If not set the
default GnuPG home directory is used.
Raises:
TypeError: Keyids is not iterable.
ValueError: A Keyid is not a string.
UnsupportedLibraryError: The gpg command or pyca/cryptography are not
available.
KeyNotFoundError: No key or subkey was found for that keyid.
Side Effects:
Calls system gpg command in a subprocess.
Returns:
A dict of OpenPGP public key objects in GPG_PUBKEY_SCHEMA format as values,
and their keyids as dict keys.
"""
public_key_dict = {}
for gpg_keyid in keyids:
public_key = export_pubkey(gpg_keyid, homedir=homedir)
keyid = public_key["keyid"]
public_key_dict[keyid] = public_key
return public_key_dict
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.