gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
import logging
import re
import time
from typing import List
from requests.exceptions import ConnectTimeout
from requests.exceptions import ConnectionError
from django.db import transaction
from tkapi import TKApi
from tkapi.util import queries
from tkapi.persoon import Persoon as TKPersoon
from tkapi.dossier import Dossier as TKDossier
from tkapi.document import Document as TKDocument
from tkapi.besluit import Besluit as TKBesluit
from tkapi.zaak import Zaak
from tkapi.zaak import ZaakSoort
from tkapi.activiteit import ActiviteitStatus
import scraper.documents
from document.create import get_dossier_ids, DossierId
from document.models import CategoryDossier
from document.models import Dossier
from document.models import Kamerstuk
from openkamer.document import DocumentFactory
from openkamer.document import DocumentData
from openkamer.document import get_categories
from openkamer.decision import create_dossier_decisions
from openkamer.kamerstuk import create_kamerstuk
from openkamer.voting import VotingFactory
logger = logging.getLogger(__name__)
def create_dossier_retry_on_error(dossier_id, max_tries=3):
dossier_id = str(dossier_id)
tries = 0
while True:
try:
tries += 1
create_or_update_dossier(dossier_id)
except (ConnectionError, ConnectTimeout) as error:
logger.exception(error)
time.sleep(5) # wait 5 seconds for external servers to relax
if tries < max_tries:
logger.error('trying again!')
continue
logger.error('max tries reached, skipping dossier: ' + dossier_id)
break
@transaction.atomic
def create_or_update_dossier(dossier_id):
logger.info('BEGIN - dossier id: {}'.format(dossier_id))
Dossier.objects.filter(dossier_id=dossier_id).delete()
dossier_url = 'https://zoek.officielebekendmakingen.nl/dossier/{}'.format(dossier_id)
dossier_id_main, dossier_id_sub = Dossier.split_dossier_id(dossier_id)
dossier_filter = TKDossier.create_filter()
dossier_filter.filter_nummer(dossier_id_main)
if dossier_id_sub:
dossier_filter.filter_toevoeging(dossier_id_sub)
dossiers = TKApi.get_dossiers(filter=dossier_filter)
if len(dossiers) != 1:
logger.error('{} dossiers found while one expected for {}'.format(len(dossiers), dossier_id))
tk_dossier = dossiers[0]
# TODO BR: create a list of related dossier decisions instead of one, see dossier 34792 for example
logger.info('dossier id main: {} | dossier id sub: {}'.format(dossier_id_main, dossier_id_sub))
last_besluit = get_besluit_last_with_voting(dossier_id_main, dossier_id_sub)
if not last_besluit:
last_besluit = get_besluit_last(dossier_id_main, dossier_id_sub)
decision_text = 'Onbekend'
if last_besluit:
decision_text = last_besluit.tekst.replace('.', '')
dossier_new = Dossier.objects.create(
dossier_id=dossier_id,
dossier_main_id=dossier_id_main,
dossier_sub_id=dossier_id_sub,
title=tk_dossier.titel,
url=dossier_url,
decision_text=decision_text
)
create_dossier_documents(dossier_new, dossier_id)
create_dossier_decisions(dossier_id_main, dossier_id_sub, dossier_new)
voting_factory = VotingFactory()
voting_factory.create_votings(dossier_id)
dossier_new.set_derived_fields()
logger.info('END - dossier id: ' + str(dossier_id))
return dossier_new
def get_document_data(tk_document: TKDocument, tk_zaak: Zaak, dossier_id):
dossier_id = re.sub(r'-\(.*\)', '', dossier_id) # Rijkswet ID is not used in url
overheid_document_id = 'kst-{}-{}'.format(dossier_id, tk_document.volgnummer)
metadata = scraper.documents.get_metadata(overheid_document_id)
try:
content_html = scraper.documents.get_html_content(overheid_document_id)
except:
logger.exception('error getting document html for document id: {}'.format(overheid_document_id))
content_html = ''
document_data = DocumentData(
document_id=overheid_document_id,
tk_document=tk_document,
tk_zaak=tk_zaak,
metadata=metadata,
content_html=content_html,
)
return document_data
@transaction.atomic
def create_dossier_documents(dossier, dossier_id):
logger.info('create_dossier_documents - BEGIN')
tk_dossier = queries.get_dossier(nummer=dossier.dossier_main_id, toevoeging=dossier.dossier_sub_id)
outputs = []
for tk_zaak in tk_dossier.zaken:
for doc in tk_zaak.documenten:
if int(doc.volgnummer) == -1:
# TODO BR: this document is not found at overheid.nl, fix this
continue
outputs.append(get_document_data(doc, tk_zaak, dossier_id))
logger.info('create_dossier_documents - outputs: {}'.format(len(outputs)))
for data in outputs:
properties = {
'dossier': dossier,
'title_full': data.tk_document.onderwerp,
'title_short': data.tk_document.onderwerp,
'publication_type': data.tk_document.soort.value,
'date_published': data.tk_document.datum,
'source_url': data.url,
'content_html': data.content_html,
}
document = DocumentFactory.create_or_update_document(data, properties)
if not Kamerstuk.objects.filter(id_main=dossier_id, id_sub=data.tk_document.volgnummer).exists():
create_kamerstuk(
document=document,
dossier_id=dossier_id,
number=data.tk_document.volgnummer,
type_long=data.tk_document.onderwerp,
type_short=data.tk_document.soort.value
)
category_list = get_categories(text=data.category, category_class=CategoryDossier, sep_char='|')
dossier.categories.add(*category_list)
def get_inactive_dossier_ids(year=None) -> List[DossierId]:
dossier_ids_inactive = list(Dossier.objects.filter(status__in=[
Dossier.VERWORPEN, Dossier.AANGENOMEN, Dossier.INGETROKKEN, Dossier.CONTROVERSIEEL
]).values_list('dossier_id', flat=True))
if year is not None:
dossier_ids_inactive_year = []
for dossier_id in dossier_ids_inactive:
dossier = Dossier.objects.get(dossier_id=dossier_id)
if dossier.start_date and dossier.start_date.year == int(year):
dossier_ids_inactive_year.append(dossier_id)
dossier_ids_inactive = dossier_ids_inactive_year
return [DossierId(*Dossier.split_dossier_id(dossier_id)) for dossier_id in dossier_ids_inactive]
def create_wetsvoorstellen_active(skip_existing=False, max_tries=3):
logger.info('BEGIN')
dossiers = get_dossier_ids()
logger.info('active dossiers found: {}'.format(len(dossiers)))
dossier_ids_inactive = get_inactive_dossier_ids()
dossier_ids_inactive = [str(dossier_id) for dossier_id in dossier_ids_inactive]
dossier_ids_active = []
for dossier in dossiers:
if str(dossier) not in dossier_ids_inactive:
dossier_ids_active.append(dossier)
dossier_ids_active.reverse()
logger.info('dossiers active: {}'.format(dossier_ids_active))
failed_dossiers = create_wetsvoorstellen(dossier_ids_active, skip_existing=skip_existing, max_tries=max_tries)
logger.info('END')
return failed_dossiers
def create_wetsvoorstellen_inactive(year=None, skip_existing=False, max_tries=3):
logger.info('BEGIN - year: {}'.format(year))
dossier_ids_inactive = get_inactive_dossier_ids(year=year)
dossier_ids_inactive.reverse()
logger.info('inactive dossiers found: {}'.format(len(dossier_ids_inactive)))
failed_dossiers = create_wetsvoorstellen(dossier_ids_inactive, skip_existing=skip_existing, max_tries=max_tries)
logger.info('END')
return failed_dossiers
def create_wetsvoorstellen_all(skip_existing=False, max_tries=3):
logger.info('BEGIN')
dossier_ids = get_dossier_ids()
dossier_ids.reverse()
failed_dossiers = create_wetsvoorstellen(dossier_ids, skip_existing=skip_existing, max_tries=max_tries)
logger.info('END')
return failed_dossiers
def create_wetsvoorstellen(dossier_ids: List[DossierId], skip_existing=False, max_tries=3):
logger.info('BEGIN')
failed_dossiers = []
for dossier in dossier_ids:
dossier_id = Dossier.create_dossier_id(dossier.dossier_id, dossier.dossier_sub_id)
logger.info('dossier id: {}'.format(dossier_id))
dossiers = Dossier.objects.filter(dossier_id=dossier_id)
if skip_existing and dossiers.exists():
logger.info('dossier already exists, skip')
continue
try:
create_dossier_retry_on_error(dossier_id=dossier_id, max_tries=max_tries)
except Exception as error:
failed_dossiers.append(dossier_id)
logger.exception('error for dossier id: ' + str(dossier_id))
logger.info('END')
return failed_dossiers
def get_tk_besluiten_dossier_main(dossier_id_main, dossier_id_sub=None) -> List[TKBesluit]:
tk_besluiten = queries.get_dossier_besluiten(nummer=dossier_id_main, toevoeging=dossier_id_sub)
besluiten_dossier = []
# only get main dossier besluiten; ignore kamerstuk besluiten (motie, amendement, etc)
for tk_besluit in tk_besluiten:
if str(tk_besluit.zaak.volgnummer) == '0':
besluiten_dossier.append(tk_besluit)
return besluiten_dossier
def get_besluit_last(dossier_id_main, dossier_id_sub=None, filter_has_votings=False) -> TKBesluit:
tk_besluiten = get_tk_besluiten_dossier_main(dossier_id_main=dossier_id_main, dossier_id_sub=dossier_id_sub)
last_besluit = None
for tk_besluit in tk_besluiten:
if filter_has_votings and not tk_besluit.stemmingen:
continue
if tk_besluit.agendapunt.activiteit.status == ActiviteitStatus.GEPLAND:
# TODO: create dossier agendapunt with planned activiteit
continue
if last_besluit is None or tk_besluit.agendapunt.activiteit.begin > last_besluit.agendapunt.activiteit.begin:
last_besluit = tk_besluit
return last_besluit
def get_besluit_last_with_voting(dossier_id_main, dossier_id_sub=None) -> TKBesluit:
return get_besluit_last(dossier_id_main=dossier_id_main, dossier_id_sub=dossier_id_sub, filter_has_votings=True)
def get_zaken_dossier_main(dossier_id_main, dossier_id_sub=None) -> List[Zaak]:
# TODO BR: filter by Wetgeving OR Initiatiefwetgeving if tkapi makes it possible
filter = Zaak.create_filter()
filter.filter_kamerstukdossier(nummer=dossier_id_main, toevoeging=dossier_id_sub)
filter.filter_soort(ZaakSoort.WETGEVING)
zaken = TKApi.get_zaken(filter=filter)
if not zaken:
filter = Zaak.create_filter()
filter.filter_kamerstukdossier(nummer=dossier_id_main, toevoeging=dossier_id_sub)
filter.filter_soort(ZaakSoort.INITIATIEF_WETGEVING)
zaken = TKApi.get_zaken(filter=filter)
if not zaken:
filter = Zaak.create_filter()
filter.filter_kamerstukdossier(nummer=dossier_id_main, toevoeging=dossier_id_sub)
filter.filter_soort(ZaakSoort.BEGROTING)
zaken = TKApi.get_zaken(filter=filter)
return zaken
| |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VGG16 benchmark in TensorFlow"""
import tensorflow as tf
import numpy as np
import argparse
import time
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--batch_size', type=int, default=128, help="Batch size for training.")
parser.add_argument(
'--skip_batch_num',
type=int,
default=5,
help='The first num of minibatch num to skip, for better performance test')
parser.add_argument(
'--iterations', type=int, default=80, help='The number of minibatches.')
parser.add_argument(
'--learning_rate',
type=float,
default=1e-3,
help="Learning rate for training.")
parser.add_argument('--num_passes', type=int, default=50, help="No. of passes.")
parser.add_argument(
'--device',
type=str,
default='GPU',
choices=['CPU', 'GPU'],
help="The device type.")
parser.add_argument(
'--data_format',
type=str,
default='NHWC',
choices=['NCHW', 'NHWC'],
help='The data order, NCHW=[batch, channels, height, width].'
'Only support NHWC right now.')
parser.add_argument(
'--data_set',
type=str,
default='cifar10',
choices=['cifar10', 'flowers'],
help='Optional dataset for benchmark.')
args = parser.parse_args()
class VGG16Model(object):
def __init__(self):
self.parameters = []
def batch_norm_relu(self, inputs, is_training):
"""Performs a batch normalization followed by a ReLU."""
# We set fused=True for a significant speed boost. See
# https://www.tensorflow.org/speed/speed_guide#common_fused_ops
inputs = tf.layers.batch_normalization(
inputs=inputs,
axis=1 if args.data_format == 'NCHW' else -1,
momentum=0.9,
epsilon=1e-05,
center=True,
scale=True,
training=is_training,
fused=True)
inputs = tf.nn.relu(inputs)
return inputs
def conv_bn_layer(self,
name,
images,
kernel_shape,
is_training,
drop_rate=0.0):
with tf.name_scope(name) as scope:
kernel = tf.Variable(
tf.truncated_normal(
kernel_shape, dtype=tf.float32, stddev=1e-1),
name='weights')
conv = tf.nn.conv2d(
images,
kernel, [1, 1, 1, 1],
data_format=args.data_format,
padding='SAME')
biases = tf.Variable(
tf.constant(
0.0, shape=[kernel_shape[-1]], dtype=tf.float32),
trainable=True,
name='biases')
out = tf.nn.bias_add(conv, biases)
out = self.batch_norm_relu(out, is_training)
out = tf.layers.dropout(out, rate=drop_rate, training=is_training)
return out
def fc_layer(self, name, inputs, shape):
with tf.name_scope(name) as scope:
fc_w = tf.Variable(
tf.truncated_normal(
shape, dtype=tf.float32, stddev=1e-1),
name='weights')
fc_b = tf.Variable(
tf.constant(
0.0, shape=[shape[-1]], dtype=tf.float32),
trainable=True,
name='biases')
out = tf.nn.bias_add(tf.matmul(inputs, fc_w), fc_b)
return out
def network(self, images, class_dim, is_training):
""" VGG16 model structure.
TODO(kuke): enable this network to support the 'NCHW' data format
"""
# conv1
conv1_1 = self.conv_bn_layer(
'conv1_1', images, [3, 3, 3, 64], is_training, drop_rate=0.3)
conv1_2 = self.conv_bn_layer(
'conv1_2', conv1_1, [3, 3, 64, 64], is_training, drop_rate=0.0)
# pool1
pool1 = tf.nn.max_pool(
conv1_2,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool1')
# conv2
conv2_1 = self.conv_bn_layer(
'conv2_1', pool1, [3, 3, 64, 128], is_training, drop_rate=0.4)
conv2_2 = self.conv_bn_layer(
'conv2_2', conv2_1, [3, 3, 128, 128], is_training, drop_rate=0.0)
# pool2
pool2 = tf.nn.max_pool(
conv2_2,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool2')
# conv3
conv3_1 = self.conv_bn_layer(
'conv3_1', pool2, [3, 3, 128, 256], is_training, drop_rate=0.4)
conv3_2 = self.conv_bn_layer(
'conv3_2', conv3_1, [3, 3, 256, 256], is_training, drop_rate=0.4)
conv3_3 = self.conv_bn_layer(
'conv3_3', conv3_2, [3, 3, 256, 256], is_training, drop_rate=0.0)
# pool3
pool3 = tf.nn.max_pool(
conv3_3,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool3')
# conv4
conv4_1 = self.conv_bn_layer(
'conv4_1', pool3, [3, 3, 256, 512], is_training, drop_rate=0.4)
conv4_2 = self.conv_bn_layer(
'conv4_2', conv4_1, [3, 3, 512, 512], is_training, drop_rate=0.4)
conv4_3 = self.conv_bn_layer(
'conv4_3', conv4_2, [3, 3, 512, 512], is_training, drop_rate=0.0)
# pool4
pool4 = tf.nn.max_pool(
conv4_3,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool4')
# conv5
conv5_1 = self.conv_bn_layer(
'conv5_1', pool4, [3, 3, 512, 512], is_training, drop_rate=0.4)
conv5_2 = self.conv_bn_layer(
'conv5_2', conv5_1, [3, 3, 512, 512], is_training, drop_rate=0.4)
conv5_3 = self.conv_bn_layer(
'conv5_3', conv5_2, [3, 3, 512, 512], is_training, drop_rate=0.0)
# pool5
pool5 = tf.nn.max_pool(
conv5_3,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool4')
# flatten
shape = int(np.prod(pool5.get_shape()[1:]))
pool5_flat = tf.reshape(pool5, [-1, shape])
# fc1
drop = tf.layers.dropout(pool5_flat, rate=0.5, training=is_training)
fc1 = self.fc_layer('fc1', drop, [shape, 512])
# fc2
bn = self.batch_norm_relu(fc1, is_training)
drop = tf.layers.dropout(bn, rate=0.5, training=is_training)
fc2 = self.fc_layer('fc2', drop, [512, 512])
fc3 = self.fc_layer('fc3', fc2, [512, class_dim])
return fc3
def run_benchmark():
"""Run benchmark on cifar10 or flowers."""
if args.data_set == "cifar10":
class_dim = 10
raw_shape = (3, 32, 32)
dat_shape = (None, 32, 32, 3) if args.data_format == 'NHWC' else (
None, 3, 32, 32)
else:
class_dim = 102
raw_shape = (3, 224, 224)
dat_shape = (None, 224, 224, 3) if args.data_format == 'NHWC' else (
None, 3, 224, 224)
device = '/cpu:0' if args.device == 'CPU' else '/device:GPU:0'
with tf.device(device):
images = tf.placeholder(tf.float32, shape=dat_shape)
labels = tf.placeholder(tf.int64, shape=(None, ))
is_training = tf.placeholder('bool')
onehot_labels = tf.one_hot(labels, depth=class_dim)
vgg16 = VGG16Model()
logits = vgg16.network(images, class_dim, is_training)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
avg_loss = tf.reduce_mean(loss)
correct = tf.equal(tf.argmax(logits, 1), labels)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(avg_loss)
# data reader
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.cifar.train10()
if args.data_set == 'cifar10' else paddle.dataset.flowers.train(),
buf_size=5120),
batch_size=args.batch_size)
test_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.cifar.test10()
if args.data_set == 'cifar10' else paddle.dataset.flowers.test(),
buf_size=5120),
batch_size=args.batch_size)
# test
def test():
test_accs = []
for batch_id, data in enumerate(test_reader()):
test_images = np.array(
map(lambda x: np.transpose(x[0].reshape(raw_shape),
axes=[1, 2, 0]) if args.data_format == 'NHWC' else x[0], data)).astype("float32")
test_labels = np.array(map(lambda x: x[1], data)).astype('int64')
test_accs.append(
accuracy.eval(feed_dict={
images: test_images,
labels: test_labels,
is_training: False
}))
return np.mean(test_accs)
config = tf.ConfigProto(
intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
init_g = tf.global_variables_initializer()
init_l = tf.local_variables_initializer()
sess.run(init_g)
sess.run(init_l)
iters, num_samples, start_time = 0, 0, time.time()
for pass_id in range(args.num_passes):
# train
num_samples = 0
start_time = time.time()
for batch_id, data in enumerate(train_reader()):
if iters == args.skip_batch_num:
start_time = time.time()
num_samples = 0
if iters == args.iterations:
break
train_images = np.array(
map(lambda x: np.transpose(x[0].reshape(raw_shape),
axes=[1, 2, 0]) if args.data_format == 'NHWC' else x[0], data)).astype("float32")
train_labels = np.array(map(lambda x: x[1], data)).astype(
'int64')
_, loss, acc = sess.run([train_op, avg_loss, accuracy],
feed_dict={
images: train_images,
labels: train_labels,
is_training: True
})
iters += 1
num_samples += len(data)
print("Pass = %d, Iters = %d, Loss = %f, Accuracy = %f" %
(pass_id, iters, loss, acc))
train_elapsed = time.time() - start_time
# test
pass_test_acc = test()
print("Pass = %d, Train speed = %f imgs/s, Test accuracy = %f\n" %
(pass_id, num_samples / train_elapsed, pass_test_acc))
def print_arguments():
print('----------- Configuration Arguments -----------')
for arg, value in sorted(vars(args).iteritems()):
print('%s: %s' % (arg, value))
print('------------------------------------------------')
if __name__ == '__main__':
print_arguments()
run_benchmark()
| |
"""
For Handling db calls on Ideas
"""
import json
import datetime
from lib.model import (User, Organization, MiniOrganization, IdeaMeta, IdeaVersion,
MiniIdea, Comment, Reply)
from mongoengine import Q
def create_idea(creator, org_id, **kwargs):
#Get my creator's object and my org
my_owner = User.objects.get(google_id=creator)
my_org = Organization.objects.get(unique=org_id)
#Create object and set mini + creator + followers
new_idea = IdeaMeta(**kwargs)
new_idea.minified = MiniIdea(**kwargs)
new_idea.created_by = my_owner.google_id
new_idea.followers = [my_owner.google_id]
new_idea.my_org = my_org.minified
new_idea.created_on = datetime.datetime.now()
new_idea.num_comments = 0
new_idea.save()
my_org.ideas.append(new_idea.minified)
my_org.save()
return json.loads(new_idea.to_json())
def create_version(creator, idea_id, **kwargs):
my_creator = User.objects.get(google_id=creator)
new_version = IdeaVersion(**kwargs)
new_version.thinker = my_creator.google_id
new_version.created_on = datetime.datetime.now()
my_idea = IdeaMeta.objects.get(unique=idea_id)
my_idea.update(push__versions=new_version)
return json.loads(new_version.to_json())
def get_all_ideas(org_id):
all_ideas = []
my_org = Organization.objects.get(unique=org_id)
for ideas in my_org.ideas:
all_ideas.append(json.loads(ideas.to_json()))
return all_ideas
def match_ideas(org_id, search_string):
list_o_ideas = IdeaMeta.objects(Q(my_org__unique__exact=org_id) and (Q(title__icontains=search_string) | Q(short_description__icontains=search_string)))[:10]
data = []
for ideas in list_o_ideas:
data.append(json.loads(ideas.to_json()))
return data
def delete_idea(user_id, org_id, idea_id):
old_idea = IdeaMeta.objects.get(unique=idea_id)
my_org = Organization.objects.get(unique=org_id)
my_org.update(pull__ideas__unique=idea_id)
old_idea.delete()
return json.loads(old_idea.to_json())
def update_idea(idea_id, **kwargs):
idea_keys = ['title', 'short_description']
my_idea = IdeaMeta.objects.get(unique=idea_id)
for k in idea_keys:
if k in kwargs.keys():
my_idea.minified[k] = kwargs[k]
my_idea[k] = kwargs[k]
#Remember to do same for projects in the future!!
my_orgs = Organization.objects(ideas__unique=idea_id)
for an_org in my_orgs:
an_org.update(pull__ideas__unique=idea_id)
an_org.update(push__ideas=my_idea.minified)
my_idea.last_edit = datetime.datetime.now()
my_idea.save()
##Need to update mini
return json.loads(my_idea.to_json())
def get_idea(idea_id):
my_idea = IdeaMeta.objects.get(unique=idea_id)
return json.loads(my_idea.to_json())
def add_follower(user_id, idea_id):
my_user = User.objects.get(google_id=user_id)
my_idea = IdeaMeta.objects.get(unique=idea_id)
my_idea.update(add_to_set__followers=my_user.google_id)
return my_user
def remove_follower(user_id, idea_id):
my_idea = IdeaMeta.objects.get(unique=idea_id)
my_idea.update(pull__followers=user_id)
return user_id
def update_version(idea_id, version_id, **kwargs):
idea_keys = ['text']
my_idea = IdeaMeta.objects.get(unique=idea_id)
for versions in my_idea.versions:
if versions.unique == version_id:
my_version = versions
for k in idea_keys:
if k in kwargs.keys():
my_version[k] = kwargs[k]
my_version.last_edit = datetime.datetime.now()
my_idea.save()
return json.loads(my_idea.to_json())
def remove_version(idea_id, version_id):
my_idea = IdeaMeta.objects.get(unique=idea_id)
my_idea.update(pull__versions__unique=version_id)
return my_idea
def change_karma(user_id, idea_id, version_id):
my_idea = IdeaMeta.objects.get(unique=idea_id)
my_idea.karma[user_id] = version_id
my_idea.save()
return my_idea.karma
def create_comment(user_id, idea_id, **kwargs):
my_idea = IdeaMeta.objects.get(unique=idea_id)
my_comment = Comment(**kwargs)
my_comment.index = my_idea.num_comments
my_idea.update(inc__num_comments=1)
my_comment.num_replies = 0
my_comment.time = datetime.datetime.now()
my_comment.commenter = user_id
my_idea.update(push__comments=my_comment)
return json.loads(my_comment.to_json())
def update_comment(idea_id, **kwargs):
comment_keys = ['text']
my_idea = IdeaMeta.objects.get(unique=idea_id)
my_comment = my_idea.comments[kwargs['index']]
for k in comment_keys:
if k in kwargs.keys():
my_comment[k] = kwargs[k]
my_comment.time = datetime.datetime.now()
my_idea.save()
return json.loads(my_comment.to_json())
def remove_comment(idea_id, comment_id):
my_idea = IdeaMeta.objects.get(unique=idea_id)
my_idea.comments[comment_id].text = "Comment removed by author."
my_idea.save()
return json.loads(my_idea.comments[comment_id].to_json())
def create_reply(user_id, idea_id, comment_id, **kwargs):
my_idea = IdeaMeta.objects.get(unique=idea_id)
my_comment = my_idea.comments[comment_id]
my_reply = Reply(**kwargs)
my_reply.index = my_comment.num_replies
my_comment.num_replies += 1
my_reply.time = datetime.datetime.now()
my_reply.replier = user_id
my_idea.comments[comment_id].replies.append(my_reply)
my_idea.save()
return json.loads(my_reply.to_json())
def update_reply(idea_id, comment_id, **kwargs):
reply_keys = ['text']
my_idea = IdeaMeta.objects.get(unique=idea_id)
my_reply = my_idea.comments[comment_id].replies[kwargs['index']]
for k in reply_keys:
if k in kwargs.keys():
my_reply[k] = kwargs[k]
my_reply.time = datetime.datetime.now()
my_idea.save()
return json.loads(my_reply.to_json())
def remove_reply(idea_id, comment_id, reply_id):
my_idea = IdeaMeta.objects.get(unique=idea_id)
my_idea.comments[comment_id].replies[reply_id].text = "Reply removed by author."
my_idea.save()
return json.loads(my_idea.comments[comment_id].replies[reply_id].to_json())
| |
from unittest import TestCase
from iota import Address
from iota.crypto.addresses import AddressGenerator
from iota.crypto.types import Seed
class AddressGeneratorTestCase(TestCase):
maxDiff = None
def setUp(self):
super(AddressGeneratorTestCase, self).setUp()
self.seed_1 =\
Seed(
b'TESTVALUE9DONTUSEINPRODUCTION999999GFDDC'
b'PFIIEHBCWFN9KHRBEIHHREFCKBVGUGEDXCFHDFPAL',
)
self.seed_2 =\
Seed(
b'TESTVALUE9DONTUSEINPRODUCTION99999DCZGVE'
b'JIZEKEGEEHYE9DOHCHLHMGAFDGEEQFUDVGGDGHRDR',
)
def test_get_addresses_single(self):
"""
Generating a single address.
"""
ag = AddressGenerator(self.seed_1)
self.assertListEqual(
ag.get_addresses(start=0),
[
Address(
b'DLEIS9XU9V9T9OURAKDUSQWBQEYFGJLRPRVEWKN9'
b'SSUGIHBEIPBPEWISSAURGTQKWKWNHXGCBQTWNOGIY',
),
],
)
self.assertListEqual(
ag.get_addresses(start=10),
[
Address(
b'XLXFTFBXUOOHRJDVBDBFEBDQDUKSLSOCLUYWGLAP'
b'R9FUROUHPFINIUFKYSRTFMNWKNEPDZATWXIVWJMDD',
),
],
)
def test_get_addresses_multiple(self):
"""
Generating multiple addresses in one go.
"""
ag = AddressGenerator(self.seed_2)
self.assertListEqual(
ag.get_addresses(start=0, count=3),
[
Address(
b'FNKCVJPUANHNWNBAHFBTCONMCUBC9KCZ9EKREBCJ'
b'AFMABCTEPLGGXDJXVGPXDCFOUCRBWFJFLEAVOEUPY',
),
Address(
b'MSYILYYZLSJ99TDMGQHDOBWGHTBARCBGJZE9PIMQ'
b'LTEXJXKTDREGVTPA9NDGGLQHTMGISGRAKSLYPGWMB',
),
Address(
b'IIREHGHXUHARKVZDMHGUUCHZLUEQQULLEUSJHIIB'
b'WFYZIZDUFTOVHAWCKRJXUZ9CSUVLTRYSUGBVRMTOW',
),
],
)
self.assertListEqual(
ag.get_addresses(start=10, count=3),
[
Address(
b'BPXMVV9UPKBTVPJXPBHHOJYAFLALOYCGTSEDLZBH'
b'NFMGEHREBQTRIPZAPREANPMZJNZZNCDIUFOYYGGFY',
),
Address(
b'RUCZQJWKXVDIXTLHHOKGMHOV9AKVDBG9HUQHPWNZ'
b'UNKJNFVMULUSLKFJGSTBSNJMRYSJOBVBQSKVXISZB',
),
Address(
b'FQAKF9XVCLTBESJKWCHFOCTVABYEEJP9RXUVAEUW'
b'ENFUUQK9VCHFEORHCYDUJQHNUDWNRDUDZTUGKHSPD',
),
],
)
def test_get_addresses_error_start_too_small(self):
"""
Providing a negative ``start`` value to ``get_addresses``.
:py:class:`AddressGenerator` can potentially generate an infinite
number of addresses, so there is no "end" to offset against.
"""
ag = AddressGenerator(seed=b'')
with self.assertRaises(ValueError):
ag.get_addresses(start=-1)
def test_get_addresses_error_count_too_small(self):
"""
Providing a ``count`` value less than 1 to ``get_addresses``.
:py:class:`AddressGenerator` can potentially generate an infinite
number of addresses, so there is no "end" to offset against.
"""
ag = AddressGenerator(seed=b'')
with self.assertRaises(ValueError):
ag.get_addresses(start=0, count=0)
def test_get_addresses_error_step_zero(self):
"""
Providing a ``step`` value of 0 to ``get_addresses``.
"""
ag = AddressGenerator(seed=b'')
with self.assertRaises(ValueError):
ag.get_addresses(start=0, step=0)
def test_get_addresses_step_negative(self):
"""
Providing a negative ``step`` value to ``get_addresses``.
This is probably a weird use case, but what the heck.
"""
ag = AddressGenerator(self.seed_1)
self.assertListEqual(
ag.get_addresses(start=1, count=2, step=-1),
[
Address(
b'PNLOTLFSALMICK9PSW9ZWLE9KJAKPKGJZQJDAFMO'
b'VLHXMJCJXFPVHOTTOYDIAUAYELXKZWZUITCQBIQKY',
),
Address(
b'DLEIS9XU9V9T9OURAKDUSQWBQEYFGJLRPRVEWKN9'
b'SSUGIHBEIPBPEWISSAURGTQKWKWNHXGCBQTWNOGIY',
),
],
)
def test_generator(self):
"""
Creating a generator.
"""
ag = AddressGenerator(self.seed_2)
generator = ag.create_iterator()
self.assertEqual(
next(generator),
Address(
b'FNKCVJPUANHNWNBAHFBTCONMCUBC9KCZ9EKREBCJ'
b'AFMABCTEPLGGXDJXVGPXDCFOUCRBWFJFLEAVOEUPY',
),
)
self.assertEqual(
next(generator),
Address(
b'MSYILYYZLSJ99TDMGQHDOBWGHTBARCBGJZE9PIMQ'
b'LTEXJXKTDREGVTPA9NDGGLQHTMGISGRAKSLYPGWMB',
),
)
# ... ad infinitum ...
def test_generator_with_offset(self):
"""
Creating a generator that starts at an offset greater than 0.
"""
ag = AddressGenerator(self.seed_1)
generator = ag.create_iterator(start=1, step=2)
self.assertEqual(
next(generator),
Address(
b'PNLOTLFSALMICK9PSW9ZWLE9KJAKPKGJZQJDAFMO'
b'VLHXMJCJXFPVHOTTOYDIAUAYELXKZWZUITCQBIQKY',
),
)
self.assertEqual(
next(generator),
Address(
b'IWWMMHBFWCWOZQLBNXDJ9OOTIGXXU9WNUHFGUZWR'
b'9FWGIUUUQUECHPKXJLIEKZBOVSEA9BCT9DLOCNCEC',
),
)
def test_security_level_lowered(self):
"""
Generating addresses with a lower security level.
"""
ag = AddressGenerator(self.seed_1, security_level=1)
self.assertListEqual(
ag.get_addresses(start=0, count=3),
[
Address(
b'KNDWDEEWWFVZLISLYRABGVWZCHZNZLNSEJXFKVGA'
b'UFLL9UMZYEZMEJB9BDLAASWTHEKFREUDIUPY9ICKW',
),
Address(
b'CHOBTRTQWTMH9GWFWGWUODRSGPOJOIVJUNIQIBZL'
b'HSWNYPHOD9APWJBMJMGLHFZENWFKDYWHX9JDFXTAB',
),
Address(
b'YHTOYQUCLDHAIDILFNPITVPYSTOCFAZIUNDYTRDZ'
b'CVMVGZPONPINNVPJTOAOKHHZWLOKIZPVASTOGAKPA',
),
],
)
def test_security_level_elevated(self):
"""
Generating addresses with a higher security level.
"""
ag = AddressGenerator(self.seed_1, security_level=3)
self.assertListEqual(
ag.get_addresses(start=0, count=3),
[
Address(
b'BGHTGOUKKNTYFHYUAAPSRUEVN9QQXFOGVCH9Y9BO'
b'NWXUBDLSKAWEOFZIVMHXBAYVPGDZEYCKNTUJCLPAX',
),
Address(
b'EGMRJEUIYFUGWAIXXZCHCZUVUUYITICVHDSHCQXG'
b'FHJIVDCLTI9ZVRIKRLZQWW9CPOIXVDCBAHVGLUHI9',
),
Address(
b'ENPSARVJZGMMPWZTAIRHADEOZCEVIFNJWSZQHNEI'
b'RVEVI9GYMFNEOGNUYCPGPSEFCSDHUHOQKDPVGDKYC',
),
],
)
def test_generator_checksum(self):
"""
Creating a generator with checksums on the addresses.
"""
ag = AddressGenerator(
self.seed_2,
security_level=AddressGenerator.DEFAULT_SECURITY_LEVEL,
checksum=True
)
generator = ag.create_iterator()
self.assertEqual(
next(generator),
Address(
b'FNKCVJPUANHNWNBAHFBTCONMCUBC9KCZ9EKREBCJ'
b'AFMABCTEPLGGXDJXVGPXDCFOUCRBWFJFLEAVOEUPY'
b'ADHVCBXFD',
),
)
self.assertEqual(
next(generator),
Address(
b'MSYILYYZLSJ99TDMGQHDOBWGHTBARCBGJZE9PIMQ'
b'LTEXJXKTDREGVTPA9NDGGLQHTMGISGRAKSLYPGWMB'
b'WIKQRCIOD',
),
)
| |
#!/usr/bin/env python
import math
import os
import shutil
import six
import stat
import collections
"""
.. module:: pycopy
:platform: Unix, Windows
:synopsis: Recursively copy a directory.
.. moduleauthor:: Brandon Huber
"""
__version__ = '0.1'
try:
import scandir
_SCANDIR_FN = scandir
except ImportError:
_SCANDIR_FN = fake_scandir.scandir
#try:
# from math import isclose as _ISCLOSE_FN
#except ImportError:
# raise NotImplementedError()
def default_xform_name(src_startdir, src_subdir, src_basename):
##########################################################################
#
# Because we have a valid path to the source file, we can check to see if
# it's a directory or not should we need to.
#
# if os.path.is_dir(os.path.join(src_startdir, src_subdir, src_basename)):
# ...
#
##########################################################################
##########################################################################
#
# If we decide we want to skip this file or directory, we just return None.
#
# return None
#
##########################################################################
##########################################################################
#
# If we need to make any changes to what we want the name to be in the
# destination directory, we can do so. E.g., we could replace all spaces
# with underscores.
#
# return src_basename.replace(' ', '_')
#
##########################################################################
return src_basename
def should_copy(
src_stat_result, dst_stat_result, use_size=True, use_mtime=True,
mtime_tolerance=2.0
):
"""Tells whether a file should get overwritten.
If the destination file *appears* to be the same as the source file,
this function will indicate so by returning False. Otherwise, it will
return True.
:param stat_result src_stat_result:
The object returned by ``os.listdir`` for the source file.
:param stat_result dst_stat_result:
The object returned by ``os.listdir`` for the destination file.
:param bool use_size:
Whether or not to compare the sizes of the two files. (Default: True)
:param bool use_mtime:
Whether or not to compare the mtimes of the two files. (Default: True)
:param float mtime_tolerance:
By how many seconds the two files' mtimes are allowed to differ and
still be considered equivalent. Note that FAT filesystems have a
2-second resolution. (Default: 2.0)
:returns:
True if the two files appear to be dissimilar. False otherwise.
:rtype: bool
"""
if use_size and src_stat_result.st_size != dst_stat_result.st_size:
return True
if (
use_mtime
and math.abs(stat_result.st_mtime - x.stat.st_mtime()) >
mtime_tolerance
):
return True
return False
class DirEntry(object):
def __init__(
self,
src_startdir, src_subdir, src_basename,
dst_startdir, dst_subdir, dst_basename,
_dbg_os_stat=os.stat
):
self.src_startdir = src_startdir
self.src_subdir = src_subdir
self.src_basename = src_basename
self.dst_startdir = dst_startdir
self.dst_subdir = dst_subdir
self.dst_basename = dst_basename
self.stat = _dbg_os_stat(
os.path.join(self.src_startdir, self.src_subdir, self.src_basename)
)
def is_dir(self):
return stat.S_ISDIR(self.stat.st_mode)
def is_file(self):
return stat.S_ISREG(self.stat.st_mode)
def size(self):
return self.stat.st_size
def pycopy(
src,
dst,
copy_fn=shutil.copy,
xform_fn=default_xform_name
):
"""Recursively copy a ``src`` directory to ``dst``.
Recursively copies files and subdirectories from ``src`` to ``dst``,
optionally excluding and/or changing the names of files and subdirectories
based upon the function ``xform_fn``.
``xform_fn``, if given, should be a function that meets the following
criteria:
1. It takes three strings as parameters: ``src_base``, ``src_dirname``,
and ``src_basename``.
2. If the given source filespec is to be copied, a single string should
be returned. This string will be the desired basename of the
destination file or directory.
3. If the given source filespec should NOT be copied, then ``None``
must be returned. Note that if ``None`` is returned for a source
filespec that represents a directory, pycopy will not recurse into
that directory.
"""
for dir_entry in cmp_dirs(src, dst, copy_fn=copy_fn, xform_fn=xform_fn):
if dir_entry.is_dir():
###################################################################
#
# Create the destination directory if it does not already exist.
#
if not os.path.exists(
os.path.join(
dir_entry.dst_startdir,
dir_entry.dst_subdir,
dir_entry.dst_basename,
)
):
os.makedirs(
os.path.join(
dir_entry.dst_startdir,
dir_entry.dst_subdir,
dir_entry.dst_basename
)
)
#
###################################################################
else:
dst_pathname = os.path.join(
dir_entry.dst_startdir,
dir_entry.dst_subdir,
dir_entry.dst_basename
)
if (
os.path.exists(dst_pathname)
and not should_copy(dir_entry.stat, os.stat(dst_pathname))
):
continue
copy_fn(
os.path.join(
dir_entry.src_startdir, dir_entry.src_subdir,
dir_entry.src_basename
),
dst_pathname
)
def cmp_dirs(
src,
dst,
copy_fn=shutil.copy,
xform_fn=default_xform_name
):
"""Recursively compare a ``src`` directory to ``dst``.
Recursively copies files and subdirectories from ``src`` to ``dst``,
optionally excluding and/or changing the names of files and subdirectories
based upon the function ``xform_fn``.
``xform_fn``, if given, should be a function that meets the following
criteria:
1. It takes three strings as parameters: ``src_base``, ``src_dirname``,
and ``src_basename``.
2. If the given source filespec is to be copied, a single string should
be returned. This string will be the desired basename of the
destination file or directory.
3. If the given source filespec should NOT be copied, then ``None``
must be returned. Note that if ``None`` is returned for a source
filespec that represents a directory, pycopy will not recurse into
that directory.
"""
todo = collections.deque()
todo.append(DirEntry(src, '', '', dst, '', ''))
while len(todo) > 0:
x = todo.pop()
xformed_name = xform_fn(x.src_startdir, x.src_subdir, x.src_basename)
if xformed_name is None:
continue
if x.is_dir():
for y in os.listdir(
os.path.join(x.src_startdir, x.src_subdir, x.src_basename)
):
todo.append(
DirEntry(
x.src_startdir,
os.path.join(x.src_subdir, x.src_basename),
y,
x.dst_startdir,
os.path.join(x.dst_subdir, x.dst_basename),
xformed_name
)
)
yield x
#def pycopy(
# src,
# dst,
# copy_fn=shutil.copy,
# xform_fn=lambda src_base, src_dirname, src_basename: src_basename,
#):
# """Recursively copy a ``src`` directory to ``dst``.
#
# Recursively copies files and subdirectories from ``src`` to ``dst``,
# optionally excluding and/or changing the names of files and subdirectories
# based upon the function ``xform_fn``.
#
# ``xform_fn``, if given, should be a function that meets the following
# criteria:
# 1. It takes three strings as parameters: ``src_base``, ``src_dirname``,
# and ``src_basename``.
# 2. If the given source filespec is to be copied, a single string should
# be returned. This string will be the desired pathname of the
# destination file or directory.
# 3. If the given source filespec should NOT be copied, then ``None``
# must be returned. Note that if ``None`` is returned for a source
# filespec that represents a directory, pycopy will not recurse into
# that directory.
#
# """
# todo = collections.deque( [ DirEntry(src, '', x) for x in os.listdir(src) ] )
#
# while len(todo) > 0:
# x = todo.pop()
# #if xform_fn(src, x['dirname'], x['basename']) == None:
# # continue
# if x.is_dir():
# ###################################################################
# #
# # Skip over this directory if xform_fn wants us to.
# #
# xformed_name = xform_fn(x.src_dir, x.src_subdir, x.basename)
# if xformed_name is None:
# continue
# #
# ###################################################################
# ###################################################################
# #
# # Add all children to the "todo" queue.
# #
# for y in os.listdir(
# os.path.join(x.src_dir, x.src_subdir, x.basename)
# ):
# todo.append(
# DirEntry(
# x.src_dir,
# os.path.join(x.src_subdir, x.basename),
# y
# )
# )
# #
# ###################################################################
# ###################################################################
# #
# # Create the destination directory if it does not already exist.
# #
# if not os.path.exists(xformed_name):
# os.makedirs(xformed_name)
# #
# ###################################################################
# else:
# xformed_name = xform_fn(x.src_dir, x.src_subdir, x.basename)
# if xformed_name is None:
# continue
#
# # # NOTE: This version of determining if the file should be copied
# # # might ignore case if the dst is a case-insensitive file
# # # system.
# #
# #try:
# # if not should_copy(x.stat, os.stat(xformed_name)):
# # continue
# #except OSError, e:
# # if e.errno == 2:
# # pass
#
# if (
# os.path.exists(xformed_name)
# and not should_copy(x.stat, os.stat(xformed_name))
# ):
# continue
#
# copy_fn(os.path.join(x.src_dir, x.src_subdir, x.name), xformed_name)
#
#def pycopy(
# src, dst,
# copy_fn=shutil.copy,
# xform_fn=lambda src_base, src_dirname, src_basename: src_basename,
# _dbg_scandir_fn=_SCANDIR_FN
#):
# todo = collections.deque(_dbg_scandir_fn(src))
#
# while len(todo) > 0:
# x = todo.pop()
# #if xform_fn(src, x['dirname'], x['basename']) == None:
# # continue
# if x.is_dir():
# for y in _dbg_scandir_fn(os.path.join(x.path, x.name)):
# todo.append(y)
# else:
# xformed_name = xform_fn(src, x.path, x.name)
# if xformed_name is None:
# #if os.path.exists(filespec):
# # os.remove(filespec)
# continue
# if should_copy(x.stat, os.stat(xformed_name))
# copy_fn(os.path.join(x.path, x.name), xformed_name)
| |
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
""" compile javascript to caffscript (js -> cjs)
starts block ('{') - increase indentation
if more as one block starts at single line, then 2+ lines in cs are needed; handle this with cs_delay variable
ends block ('}') - decrease indentation
none of previous:
last char is ([{, (cannot follow ;) - do nothing
last char is ; - remove it
otherwise - add \
"""
tab_spaces = 4 # tab's will be expand to <tab_spaces> spaces (but only in the whitespace around the code)
jsindent = 4*' ' # whitespace for javascript indentation
def hujs(husrc):
jslines = []
context = {'jsindent_level': 0, # current indentation level
'indent_stack': [], # stack of indent string
'multiline_comment': False, # are we inside the multiline comment /*..*/ ?
'multiline_string': False, # are we inside the multiline string `..` ?
'new_block_ln': None, # line position, where to open new block ('{') (i.e. last code line)
'new_block_pos': None, # char position, where to open new block ('{') in last code line
'lno', 0, # number of current processed line
'after_continuation': False # True if previous source line has finished with '\'
}
for lno, huln in enumerate(husrc.split('\n')):
context['lno'] = lno
parsed_ln = []
huln = parse_strip(huln, parsed_ln, context)
if huln: # if not empty line (parse_stripped should work, but we avoid the unnecessary call for empty line)
parse_stripped(huln, parsed_ln, context)
append_jsline(jslines, parsed_ln, context)
return ''.join(jslines)
def parse_strip(huln, parsed_ln, context):
"""parses eol and indentation, return stripped line"""
def raise_IndentationError():
raise IndentationError('line %s' % context['lno'])
# eol
if huln[-1] == '\r':
parsed_ln.append(('e', '\r\n'))
huln = huln[:-1]
else:
parsed_ln.append(('e', '\n'))
if not context['multiline_string']: # in `..` are both leading and trailing whitespace important
huln = huln.rstrip()
# indentation
block_closer = ''
huln_stripped = huln.lstrip()
indent_string = huln[:len(huln) - len(huln_stripped)]
multiline = context['multiline_comment'] or context['multiline_string']
if multiline or context['after_continuation']:
parsed_ln.insert(-1, ('i', 0))
return huln
elif context['after_continuation']:
parsed_ln.insert(-1, ('i', len(detab(indent_string))))
context['after_continuation'] = False
return huln_stripped
elif huln: # not an empty line
try:
previous_indent = context['indent_stack'][-1]
except IndexError:
if indent_string:
raise_IndentationError()
else:
context['indent_stack'].append('')
else:
if indent_string != previous_indent:
try:
new_jsindent_level = context['indent_stack'].index(indent_string)
while new_jsindent_level < context['jsindent_level']:
# decreased indentation - add }
context['indent_stack'].pop()
context['jsindent_level'] -= 1
block_closer += '} '
except ValueError:
if len(indent_string) > len(previous_indent) and indent_string[:len(previous_indent)] == previous_indent:
# increased indentation - add {
context['indent_stack'].append(indent_string)
context['jsindent_level'] += 1
prev_row = jslines[context['new_block_ln']]
jslines[context['new_block_ln']] = prev_row[:context['new_block_pos']] + ' {' + prev_row[context['new_block_pos']:]
else:
raise_IndentationError()
parsed_ln.insert(-1, ('i', len(context['indent_stack']) - 1))
if block_closer:
parsed_ln.insert(-1, ('b', block_closer)
return huln_stripped
return '' # empty line
def parse_stripped(huln, parsed_ln, context):
"""parses source code with removed indentation"""
def flush_buffer(item_type, buffer, parsed_ln):
"""for types (c)omment or (s)ource only"""
def add_whitespace(full, stripped, parsed_ln, whitespace_right=True):
spaces = len(full) - len(stripped)
if spaces:
if whitespace_right:
whitespace = full[-spaces:]
else:
whitespace = full[:spaces]
parsed_ln.insert(-1, ('w', detab(whitespace)))
return spaces
if buffer[0]:
if item_type == 'c':
parsed_ln.insert(-1, ('c', buffer))
else: # code, item_type == 's'
if buffer[2]['multiline_string_left']:
buffer_lstrip = buffer[0]
earlier = 0
else:
buffer_lstrip = buffer[0].lstrip()
earlier = add_whitespace(buffer[0], buffer_lstrip, parsed_ln)
if buffer_lstrip:
if context['multiline_string']:
buffer_strip = buffer_lstrip
buffer[2]['multiline_string_right'] = True
else:
buffer_strip = buffer_lstrip.rstrip()
context['after_continuation'] = (' ' + buffer_strip)[-1] in '\,([{'
parsed_ln.insert(-1, ('s', buffer_strip, map(lambda x: x - earlier, buffer[1])), buffer[2])
# buffer[1] = semicolon positions (outside literals); decrease them for lstripped whitespace
add_whitespace(buffer_lstrip, buffer_strip, parsed_ln, whitespace_right=True) # does nothing inside 'multiline_string'
buffer[0] = '' # parsed source
buffer[1] = [] # positions of ';' (outside literals) in parsed source code
buffer[2] = init_extra()
def init_extra():
return {'multiline_string_left': context['multiline_string']}
buffer = ['', [], init_extra()]
literal_escape = False
multiline_off_after = 0
current_delimiter = '`' if context['multiline_string'] else None
for pos, char in enumerate(huln):
if current_delimiter: # inside code, inside the literal
buffer[0] += char
if literal_escape:
literal_escape = False
elif char == current_delimiter: # end of this literal
current_delimiter = None
context['multiline_string'] = False
elif char == '\\':
literal_escape = True # next character is escaped
elif context['multiline_comment']: # inside the multiline comment
buffer[0] += char
if char == "/" and pos > multiline_off_after and ln[pos-1:pos+1] == "*/":
flush_buffer('c', buffer, parsed_ln)
context['multiline_comment'] = False
elif char == "/" and ln[pos:pos+2] == "/*": # multiline comment starts here
flush_buffer('s', buffer, parsed_ln)
buffer[0] = char
context['multiline_comment'] = True
multiline_off_after = pos+2 # /*/ means start but not end of same comment
elif char == "/" and ln[pos:pos+2] == "//": # rest of line is (single-line) comment
flush_buffer('s', buffer, parsed_ln) # here 'buffer' will be cleared: ['', [], {...}]
flush_buffer('c', [ln[pos:], []], parsed_ln) # here 'buffer' stay without change: ['', [], {...}]
break # stop parsing this line
else: # inside code, outside of the literal
if char in '"\'/`': # here starts the literal
current_delimiter = char
if char == '`':
context['multiline_string'] = True
elif char == ';':
buffer[1].append(len(buffer[0]))
buffer[0] += char
flush_buffer('c' if context['multiline_comment'] else 's', buffer, parsed_ln)
def append_jsline(jslines, parsed_ln, context):
jsline = ''
for element in parsed_ln:
if element[0] == 's': # commands
new_part = cmds_conversion(element)
jsline += new_part
context['new_block_ln'] = context['lno']
context['new_block_pos'] = len(jsline) + 1
elif element[0] == 'i': # indentation
jsline += element[1] * jsindent # element[1] in 'i' is indentation level
else: # comments, whitespace, block closer (, ..)
jsline += element[1]
jslines.append(jsline)
def cmds_conversion(src_element):
"""conversion of commands (separated by ";")"""
cmds = []
after_prev_semicolon_pos = None
extra = {'multiline_string_left': src_element[3].get('multiline_string_left', False)}
for semicolon_pos in src_element[2]:
cmds.append(cmd_conversion(src_element[1], after_prev_semicolon_pos, semicolon_pos + 1, src_element[3]))
after_prev_semicolon_pos = semicolon_pos + 1
extra['multiline_string_right'] = src_element[3].get('multiline_string_right', False) # apply to the last cmd only
cmds.append(cmd_conversion(src_element[1], after_prev_semicolon_pos, None, src_element[3]))
return ' '.join(cmds)
def cmd_conversion(cmds, start, end, extra):
"""conversion of command from cmds string, started at start position, finished at end position
if last char isn't ';' then this is the (opened) last command on the row and this will be managed
"""
cmd = cmds[start:end]
if start is not None or not extra['multiline_string_left']: # multiline_string_left applies to the first cmd only
cmd = cmd.lstrip()
if end is not None or not extra['multiline_string_right']: # multiline_string_right applies to the last cmd only
cmd = cmd.rstrip()
if cmd and end is None:
if cmd[-1]=='\\':
# '\' is last char - remove '\'
cmd = cmd[:-1]
elif cmd[-1]==':' and cmd[:5]!=='case ':
cmd = cmd[:-1].rstrip()
cmd = condition_to_brackets(cmd)
elif cmd[:3]=='if ' or cmd[:5]=='else ' or cmd[:4]=='for ' or cmd[:6]=='while ' or cmd[:7]=='switch ' or cmd[:9]=='function ' or cmd[:6]=='catch ':
cmd = condition_to_brackets(cmd)
elif cmd[-1] not in ',;([{:':
cmd = add_semicolon(cmd)
def condition_to_brackets(cmd):
if cmd[:6]=='while ' and cmd[-1]==';':
cmd = cmd[:-1].rstrip()
removed_semicolon = True
else:
removed_semicolon = False
if cmd[-1]!=')':
try:
after_space_pos = cmd.index(' ') + 1
except ValueError:
pass
else:
cmd_after = cmd[after_space_pos:]
if cmd_after.strip(): # not for: do {}, try {}
cmd_before = cmd[:after_space_pos]
if cmd_before=='else ':
try:
candidate_after_space_pos = cmd.index('if ') + 1
except ValueError:
pass
else:
if cmd[:candidate_after_space_pos].replace(' ', '')[:6]=='elseif':
after_space_pos = candidate_after_space_pos
# if cmd_before=='def '
# cmd_before = 'function '
# if cmd_before=='for '
# replace 1 "param" with 3 "params"
# if cmd_before=='elif ' or cmd_before=='elseif '
# cmd_before = 'else if '
cmd = cmd_before + '(' + cmd[after_space_pos:] + ')'
if removed_semicolon:
cmd += ';'
elif cmd[:5]=='while' and cmd[-1]==')' and cmd.replace(' ', '(')[:6]=='while(':
cmd = add_semicolon(cmd)
return cmd
def add_semicolon(cmd):
# TODO: this should test if source indentation will not increase
return cmd + ';'
return cmd
def detab(txt):
"""replace tabs with spaces"""
return txt.replace('\t', tab_spaces * ' ')
| |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Primitives for dealing with datastore indexes.
Example index.yaml file:
------------------------
indexes:
- kind: Cat
ancestor: no
properties:
- name: name
- name: age
direction: desc
- kind: Cat
properties:
- name: name
direction: ascending
- name: whiskers
direction: descending
- kind: Store
ancestor: yes
properties:
- name: business
direction: asc
- name: owner
direction: asc
"""
import itertools
from google.appengine.api import datastore_types
from google.appengine.api import validation
from google.appengine.api import yaml_errors
from google.appengine.api import yaml_object
from google.appengine.datastore import datastore_pb
from google.appengine.datastore import entity_pb
class Property(validation.Validated):
"""Representation for an individual property of an index.
Attributes:
name: Name of attribute to sort by.
direction: Direction of sort.
"""
ATTRIBUTES = {
'name': validation.Type(str, convert=False),
'direction': validation.Options(('asc', ('ascending',)),
('desc', ('descending',)),
default='asc'),
}
class Index(validation.Validated):
"""Individual index definition.
Order of the properties determines a given indexes sort priority.
Attributes:
kind: Datastore kind that index belongs to.
ancestors: Include ancestors in index.
properties: Properties to sort on.
"""
ATTRIBUTES = {
'kind': validation.Type(str, convert=False),
'ancestor': validation.Type(bool, convert=False, default=False),
'properties': validation.Optional(validation.Repeated(Property)),
}
class IndexDefinitions(validation.Validated):
"""Top level for index definition file.
Attributes:
indexes: List of Index definitions.
"""
ATTRIBUTES = {
'indexes': validation.Optional(validation.Repeated(Index)),
}
def ParseIndexDefinitions(document, open_fn=None):
"""Parse an individual index definitions document from string or stream.
Args:
document: Yaml document as a string or file-like stream.
open_fn: Function for opening files. Unused.
Raises:
EmptyConfigurationFile when the configuration file is empty.
MultipleConfigurationFile when the configuration file contains more than
one document.
Returns:
Single parsed yaml file if one is defined, else None.
"""
try:
return yaml_object.BuildSingleObject(IndexDefinitions, document)
except yaml_errors.EmptyConfigurationFile:
return None
def ParseMultipleIndexDefinitions(document):
"""Parse multiple index definitions documents from a string or stream.
Args:
document: Yaml document as a string or file-like stream.
Returns:
A list of datstore_index.IndexDefinitions objects, one for each document.
"""
return yaml_object.BuildObjects(IndexDefinitions, document)
def IndexDefinitionsToKeys(indexes):
"""Convert IndexDefinitions to set of keys.
Args:
indexes: A datastore_index.IndexDefinitions instance, or None.
Returns:
A set of keys constructed from the argument, each key being a
tuple of the form (kind, ancestor, properties) where properties is
a tuple of (name, direction) pairs, direction being ASCENDING or
DESCENDING (the enums).
"""
keyset = set()
if indexes is not None:
if indexes.indexes:
for index in indexes.indexes:
keyset.add(IndexToKey(index))
return keyset
def IndexToKey(index):
"""Convert Index to key.
Args:
index: A datastore_index.Index instance (not None!).
Returns:
A tuple of the form (kind, ancestor, properties) where properties
is a tuple of (name, direction) pairs, direction being ASCENDING
or DESCENDING (the enums).
"""
props = []
if index.properties is not None:
for prop in index.properties:
if prop.direction == 'asc':
direction = ASCENDING
else:
direction = DESCENDING
props.append((prop.name, direction))
return index.kind, index.ancestor, tuple(props)
ASCENDING = datastore_pb.Query_Order.ASCENDING
DESCENDING = datastore_pb.Query_Order.DESCENDING
EQUALITY_OPERATORS = set((datastore_pb.Query_Filter.EQUAL,
))
INEQUALITY_OPERATORS = set((datastore_pb.Query_Filter.LESS_THAN,
datastore_pb.Query_Filter.LESS_THAN_OR_EQUAL,
datastore_pb.Query_Filter.GREATER_THAN,
datastore_pb.Query_Filter.GREATER_THAN_OR_EQUAL,
))
EXISTS_OPERATORS = set((datastore_pb.Query_Filter.EXISTS,
))
_DIRECTION_MAP = {
'asc': entity_pb.Index_Property.ASCENDING,
'ascending': entity_pb.Index_Property.ASCENDING,
'desc': entity_pb.Index_Property.DESCENDING,
'descending': entity_pb.Index_Property.DESCENDING,
}
def Normalize(filters, orders, exists):
""" Normalizes filter and order query components.
The resulting components have the same effect as the given components if used
in a query.
Args:
filters: the filters set on the query
orders: the orders set on the query
exists: the names of properties that require an exists filter if
not already specified
Returns:
(filter, orders) the reduced set of filters and orders
"""
eq_properties = set()
inequality_properties = set()
for f in filters:
if f.op() == datastore_pb.Query_Filter.IN and f.property_size() == 1:
f.set_op(datastore_pb.Query_Filter.EQUAL)
if f.op() in EQUALITY_OPERATORS:
eq_properties.add(f.property(0).name())
elif f.op() in INEQUALITY_OPERATORS:
inequality_properties.add(f.property(0).name())
eq_properties -= inequality_properties
remove_set = eq_properties.copy()
new_orders = []
for o in orders:
if o.property() not in remove_set:
remove_set.add(o.property())
new_orders.append(o)
orders = new_orders
remove_set.update(inequality_properties)
new_filters = []
for f in filters:
if f.op() not in EXISTS_OPERATORS:
new_filters.append(f)
continue
name = f.property(0).name()
if name not in remove_set:
remove_set.add(name)
new_filters.append(f)
for prop in exists:
if prop not in remove_set:
remove_set.add(prop)
new_filter = datastore_pb.Query_Filter()
new_filter.set_op(datastore_pb.Query_Filter.EXISTS)
new_prop = new_filter.add_property()
new_prop.set_name(prop)
new_prop.set_multiple(False)
new_prop.mutable_value()
new_filters.append(new_filter)
filters = new_filters
if datastore_types.KEY_SPECIAL_PROPERTY in eq_properties:
orders = []
new_orders = []
for o in orders:
if o.property() == datastore_types.KEY_SPECIAL_PROPERTY:
new_orders.append(o)
break
new_orders.append(o)
orders = new_orders
return (filters, orders)
def RemoveNativelySupportedComponents(filters, orders, exists):
""" Removes query components that are natively supported by the datastore.
The resulting filters and orders should not be used in an actual query.
Args:
filters: the filters set on the query
orders: the orders set on the query
exists: the names of properties that require an exists filter if
not already specified
Returns:
(filters, orders) the reduced set of filters and orders
"""
(filters, orders) = Normalize(filters, orders, exists)
for f in filters:
if f.op() in EXISTS_OPERATORS:
return (filters, orders)
has_key_desc_order = False
if orders and orders[-1].property() == datastore_types.KEY_SPECIAL_PROPERTY:
if orders[-1].direction() == ASCENDING:
orders = orders[:-1]
else:
has_key_desc_order = True
if not has_key_desc_order:
for f in filters:
if (f.op() in INEQUALITY_OPERATORS and
f.property(0).name() != datastore_types.KEY_SPECIAL_PROPERTY):
break
else:
filters = [f for f in filters
if f.property(0).name() != datastore_types.KEY_SPECIAL_PROPERTY]
return (filters, orders)
def CompositeIndexForQuery(query):
"""Return the composite index needed for a query.
A query is translated into a tuple, as follows:
- The first item is the kind string, or None if we're not filtering
on kind (see below).
- The second item is a bool giving whether the query specifies an
ancestor.
- After that come (property, ASCENDING) pairs for those Filter
entries whose operator is EQUAL or IN. Since the order of these
doesn't matter, they are sorted by property name to normalize them
in order to avoid duplicates.
- After that comes at most one (property, ASCENDING) pair for a
Filter entry whose operator is on of the four inequalities. There
can be at most one of these.
- After that come all the (property, direction) pairs for the Order
entries, in the order given in the query. Exceptions:
(a) if there is a Filter entry with an inequality operator that matches
the first Order entry, the first order pair is omitted (or,
equivalently, in this case the inequality pair is omitted).
(b) if an Order entry corresponds to an equality filter, it is ignored
(since there will only ever be one value returned).
(c) if there is an equality filter on __key__ all orders are dropped
(since there will be at most one result returned).
(d) if there is an order on __key__ all further orders are dropped (since
keys are unique).
(e) orders on __key__ ASCENDING are dropped (since this is supported
natively by the datastore).
- Finally, if there are Filter entries whose operator is EXISTS, and
whose property names are not already listed, they are added, with
the direction set to ASCENDING.
This algorithm should consume all Filter and Order entries.
Additional notes:
- The low-level implementation allows queries that don't specify a
kind; but the Python API doesn't support this yet.
- If there's an inequality filter and one or more sort orders, the
first sort order *must* match the inequality filter.
- The following indexes are always built in and should be suppressed:
- query on kind only;
- query on kind and one filter *or* one order;
- query on ancestor only, without kind (not exposed in Python yet);
- query on kind and equality filters only, no order (with or without
ancestor).
- While the protocol buffer allows a Filter to contain multiple
properties, we don't use this. It is only needed for the IN operator
but this is (currently) handled on the client side, so in practice
each Filter is expected to have exactly one property.
Args:
query: A datastore_pb.Query instance.
Returns:
A tuple of the form (required, kind, ancestor, properties).
required: boolean, whether the index is required;
kind: the kind or None;
ancestor: True if this is an ancestor query;
properties: A tuple consisting of:
- the prefix, represented by a set of property names
- the postfix, represented by a tuple consisting of any number of:
- Sets of property names: Indicates these properties can appear in any
order with any direction.
- Tuples of (property name, direction) tuples. Indicating the properties
must appear in the exact order with the given direction. direction can
be None if direction does not matter.
"""
required = True
kind = query.kind()
ancestor = query.has_ancestor()
filters = query.filter_list()
orders = query.order_list()
for filter in filters:
assert filter.op() != datastore_pb.Query_Filter.IN, 'Filter.op()==IN'
nprops = len(filter.property_list())
assert nprops == 1, 'Filter has %s properties, expected 1' % nprops
if not kind:
required = False
exists = list(query.property_name_list())
exists.extend(query.group_by_property_name_list())
filters, orders = RemoveNativelySupportedComponents(filters, orders, exists)
eq_filters = [f for f in filters if f.op() in EQUALITY_OPERATORS]
ineq_filters = [f for f in filters if f.op() in INEQUALITY_OPERATORS]
exists_filters = [f for f in filters if f.op() in EXISTS_OPERATORS]
assert (len(eq_filters) + len(ineq_filters) +
len(exists_filters)) == len(filters), 'Not all filters used'
if (kind and not ineq_filters and not exists_filters and
not orders):
names = set(f.property(0).name() for f in eq_filters)
if not names.intersection(datastore_types._SPECIAL_PROPERTIES):
required = False
ineq_property = None
if ineq_filters:
for filter in ineq_filters:
if (filter.property(0).name() ==
datastore_types._UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY):
continue
if not ineq_property:
ineq_property = filter.property(0).name()
else:
assert filter.property(0).name() == ineq_property
group_by_props = set(query.group_by_property_name_list())
prefix = frozenset(f.property(0).name() for f in eq_filters)
postfix_ordered = [(order.property(), order.direction()) for order in orders]
postfix_group_by = frozenset(f.property(0).name() for f in exists_filters
if f.property(0).name() in group_by_props)
postfix_unordered = frozenset(f.property(0).name() for f in exists_filters
if f.property(0).name() not in group_by_props)
if ineq_property:
if orders:
assert ineq_property == orders[0].property()
else:
postfix_ordered.append((ineq_property, None))
property_count = (len(prefix) + len(postfix_ordered) + len(postfix_group_by)
+ len(postfix_unordered))
if kind and not ancestor and property_count <= 1:
required = False
if postfix_ordered:
prop, dir = postfix_ordered[0]
if prop == datastore_types.KEY_SPECIAL_PROPERTY and dir is DESCENDING:
required = True
props = prefix, (tuple(postfix_ordered), postfix_group_by, postfix_unordered)
return required, kind, ancestor, props
def GetRecommendedIndexProperties(properties):
"""Converts the properties returned by datastore_index.CompositeIndexForQuery
into a recommended list of index properties and directions.
All unordered components are sorted and assigned an ASCENDING direction. All
ordered components with out a direction are assigned an ASCEDNING direction.
Args:
properties: See datastore_index.CompositeIndexForQuery
Returns:
A tuple of (name, direction) tuples where:
name: a property name
direction: datastore_pb.Query_Order.ASCENDING or ...DESCENDING
"""
prefix, postfix = properties
result = []
for sub_list in itertools.chain((prefix,), postfix):
if isinstance(sub_list, (frozenset, set)):
for prop in sorted(sub_list):
result.append((prop, ASCENDING))
else:
for prop, dir in sub_list:
result.append((prop, dir if dir is not None else ASCENDING))
return tuple(result)
def _MatchPostfix(postfix_props, index_props):
"""Matches a postfix constraint with an existing index.
postfix_props constraints are specified through a list of:
- sets of string: any order any direction;
- list of tuples(string, direction): the given order, and, if specified, the
given direction.
For example:
[set('A', 'B'), [('C', None), ('D', ASC)]]
matches:
[('F', ASC), ('B', ASC), ('A', DESC), ('C', DESC), ('D', ASC)]
with a return value of [('F', ASC)], but does not match:
[('F', ASC), ('A', DESC), ('C', DESC), ('D', ASC)]
[('B', ASC), ('F', ASC), ('A', DESC), ('C', DESC), ('D', ASC)]
[('F', ASC), ('B', ASC), ('A', DESC), ('C', DESC), ('D', DESC)]
Args:
postfix_props: A tuple of sets and lists, as output by
CompositeIndexForQuery. They should define the requirements for the
postfix of the index.
index_props: A list of tuples (property_name, property_direction), that
define the index to try and match.
Returns:
The list of tuples that define the prefix properties in the given index.
None if the constraints could not be satisfied.
"""
index_props_rev = reversed(index_props)
for property_group in reversed(postfix_props):
index_group_iter = itertools.islice(index_props_rev, len(property_group))
if isinstance(property_group, (frozenset, set)):
index_group = set(prop for prop, _ in index_group_iter)
if index_group != property_group:
return None
else:
index_group = list(index_group_iter)
if len(index_group) != len(property_group):
return None
for (index_prop, index_dir), (prop, direction) in itertools.izip(
index_group, reversed(property_group)):
if index_prop != prop or (direction and index_dir != direction):
return None
remaining = list(index_props_rev)
remaining.reverse()
return remaining
def MinimalCompositeIndexForQuery(query, index_defs):
"""Computes the minimal composite index for this query.
Unlike datastore_index.CompositeIndexForQuery, this function takes into
account indexes that already exist in the system.
Args:
query: the datastore_pb.Query to compute suggestions for
index_defs: a list of datastore_index.Index objects that already exist.
Returns:
None if no index is needed, otherwise the minimal index in the form
(is_most_efficient, kind, ancestor, properties). Where is_most_efficient is a
boolean denoting if the suggested index is the most efficient (i.e. the one
returned by datastore_index.CompositeIndexForQuery). kind and ancestor
are the same variables returned by datastore_index.CompositeIndexForQuery.
properties is a tuple consisting of the prefix and postfix properties
returend by datastore_index.CompositeIndexForQuery.
"""
required, kind, ancestor, (prefix, postfix) = CompositeIndexForQuery(query)
if not required:
return None
remaining_dict = {}
for definition in index_defs:
if (kind != definition.kind or
(not ancestor and definition.ancestor)):
continue
_, _, index_props = IndexToKey(definition)
index_prefix = _MatchPostfix(postfix, index_props)
if index_prefix is None:
continue
remaining_index_props = set([prop for prop, _ in index_prefix])
if remaining_index_props - prefix:
continue
index_postfix = tuple(index_props[len(index_prefix):])
remaining = remaining_dict.get(index_postfix)
if remaining is None:
remaining = prefix.copy(), ancestor
props_remaining, ancestor_remaining = remaining
props_remaining -= remaining_index_props
if definition.ancestor:
ancestor_remaining = False
if not (props_remaining or ancestor_remaining):
return None
if (props_remaining, ancestor_remaining) == remaining:
continue
remaining_dict[index_postfix] = (props_remaining, ancestor_remaining)
if not remaining_dict:
return (True, kind, ancestor, (prefix, postfix))
def calc_cost(minimal_props, minimal_ancestor):
result = len(minimal_props)
if minimal_ancestor:
result += 2
return result
minimal_postfix, remaining = remaining_dict.popitem()
minimal_props, minimal_ancestor = remaining
minimal_cost = calc_cost(minimal_props, minimal_ancestor)
for index_postfix, (props_remaining, ancestor_remaining) in (
remaining_dict.iteritems()):
cost = calc_cost(props_remaining, ancestor_remaining)
if cost < minimal_cost:
minimal_cost = cost
minimal_postfix = index_postfix
minimal_props = props_remaining
minimal_ancestor = ancestor_remaining
props = frozenset(minimal_props), (minimal_postfix, frozenset(), frozenset())
return False, kind, minimal_ancestor, props
def IndexYamlForQuery(kind, ancestor, props):
"""Return the composite index definition YAML needed for a query.
Given a query, the arguments for this method can be computed with:
_, kind, ancestor, props = datastore_index.CompositeIndexForQuery(query)
props = datastore_index.GetRecommendedIndexProperties(props)
Args:
kind: the kind or None
ancestor: True if this is an ancestor query, False otherwise
props: tuples of the form (name, direction) where:
name - a property name;
direction - datastore_pb.Query_Order.ASCENDING or ...DESCENDING;
Returns:
A string with the YAML for the composite index needed by the query.
"""
yaml = []
yaml.append('- kind: %s' % kind)
if ancestor:
yaml.append(' ancestor: yes')
if props:
yaml.append(' properties:')
for name, direction in props:
yaml.append(' - name: %s' % name)
if direction == DESCENDING:
yaml.append(' direction: desc')
return '\n'.join(yaml)
def IndexXmlForQuery(kind, ancestor, props):
"""Return the composite index definition XML needed for a query.
Given a query, the arguments for this method can be computed with:
_, kind, ancestor, props = datastore_index.CompositeIndexForQuery(query)
props = datastore_index.GetRecommendedIndexProperties(props)
Args:
kind: the kind or None
ancestor: True if this is an ancestor query, False otherwise
props: tuples of the form (name, direction) where:
name - a property name;
direction - datastore_pb.Query_Order.ASCENDING or ...DESCENDING;
Returns:
A string with the XML for the composite index needed by the query.
"""
xml = []
xml.append('<datastore-index kind="%s" ancestor="%s">'
% (kind, 'true' if ancestor else 'false'))
for name, direction in props:
xml.append(' <property name="%s" direction="%s" />'
% (name, 'asc' if direction == ASCENDING else 'desc'))
xml.append('</datastore-index>')
return '\n'.join(xml)
def IndexDefinitionToProto(app_id, index_definition):
"""Transform individual Index definition to protocol buffer.
Args:
app_id: Application id for new protocol buffer CompositeIndex.
index_definition: datastore_index.Index object to transform.
Returns:
New entity_pb.CompositeIndex with default values set and index
information filled in.
"""
proto = entity_pb.CompositeIndex()
proto.set_app_id(app_id)
proto.set_id(0)
proto.set_state(entity_pb.CompositeIndex.WRITE_ONLY)
definition_proto = proto.mutable_definition()
definition_proto.set_entity_type(index_definition.kind)
definition_proto.set_ancestor(index_definition.ancestor)
if index_definition.properties is not None:
for prop in index_definition.properties:
prop_proto = definition_proto.add_property()
prop_proto.set_name(prop.name)
prop_proto.set_direction(_DIRECTION_MAP[prop.direction])
return proto
def IndexDefinitionsToProtos(app_id, index_definitions):
"""Transform multiple index definitions to composite index records
Args:
app_id: Application id for new protocol buffer CompositeIndex.
index_definition: A list of datastore_index.Index objects to transform.
Returns:
A list of tranformed entity_pb.Compositeindex entities with default values
set and index information filled in.
"""
return [IndexDefinitionToProto(app_id, index)
for index in index_definitions]
def ProtoToIndexDefinition(proto):
"""Transform individual index protocol buffer to index definition.
Args:
proto: An instance of entity_pb.CompositeIndex to transform.
Returns:
A new instance of datastore_index.Index.
"""
properties = []
proto_index = proto.definition()
for prop_proto in proto_index.property_list():
prop_definition = Property(name=prop_proto.name())
if prop_proto.direction() == entity_pb.Index_Property.DESCENDING:
prop_definition.direction = 'descending'
properties.append(prop_definition)
index = Index(kind=proto_index.entity_type(), properties=properties)
if proto_index.ancestor():
index.ancestor = True
return index
def ProtosToIndexDefinitions(protos):
"""Transform multiple index protocol buffers to index definitions.
Args:
A list of entity_pb.Index records.
"""
return [ProtoToIndexDefinition(definition) for definition in protos]
| |
import time
import unittest
import six
if six.PY3:
from unittest import mock
else:
import mock
from engineio import packet
from engineio import payload
from engineio import socket
class TestSocket(unittest.TestCase):
def _get_mock_server(self):
mock_server = mock.Mock()
mock_server.ping_timeout = 0.1
mock_server.ping_interval = 0.1
try:
import queue
except ImportError:
import Queue as queue
import threading
mock_server.async = mock.Mock()
mock_server.async.Queue = queue.Queue
mock_server.async.QueueEmpty = queue.Empty
mock_server.async.thread = lambda t: threading.Thread(target=t)
mock_server.async.has_websocket = False
return mock_server
def test_create(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
self.assertEqual(s.server, mock_server)
self.assertEqual(s.sid, 'sid')
self.assertFalse(s.upgraded)
self.assertFalse(s.closed)
self.assertTrue(hasattr(s.queue, 'get'))
self.assertTrue(hasattr(s.queue, 'put'))
self.assertTrue(hasattr(s.queue, 'task_done'))
self.assertTrue(hasattr(s.queue, 'join'))
def test_empty_poll(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
self.assertRaises(IOError, s.poll)
def test_poll(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
pkt1 = packet.Packet(packet.MESSAGE, data='hello')
pkt2 = packet.Packet(packet.MESSAGE, data='bye')
s.send(pkt1)
s.send(pkt2)
self.assertEqual(s.poll(), [pkt1, pkt2])
def test_ping_pong(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.receive(packet.Packet(packet.PING, data='abc'))
r = s.poll()
self.assertEqual(len(r), 1)
self.assertTrue(r[0].encode(), b'3abc')
def test_invalid_packet(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
self.assertRaises(ValueError, s.receive, packet.Packet(packet.OPEN))
def test_timeout(self):
mock_server = self._get_mock_server()
mock_server.ping_interval = -0.1
s = socket.Socket(mock_server, 'sid')
s.last_ping = time.time() - 1
s.close = mock.MagicMock()
s.send('packet')
s.close.assert_called_once_with(wait=False, abort=True)
def test_polling_read(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'foo')
pkt1 = packet.Packet(packet.MESSAGE, data='hello')
pkt2 = packet.Packet(packet.MESSAGE, data='bye')
s.send(pkt1)
s.send(pkt2)
environ = {'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'sid=foo'}
start_response = mock.MagicMock()
packets = s.handle_get_request(environ, start_response)
self.assertEqual(packets, [pkt1, pkt2])
def test_polling_read_error(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'foo')
environ = {'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'sid=foo'}
start_response = mock.MagicMock()
self.assertRaises(IOError, s.handle_get_request, environ,
start_response)
def test_polling_write(self):
mock_server = self._get_mock_server()
mock_server.max_http_buffer_size = 1000
pkt1 = packet.Packet(packet.MESSAGE, data='hello')
pkt2 = packet.Packet(packet.MESSAGE, data='bye')
p = payload.Payload(packets=[pkt1, pkt2]).encode()
s = socket.Socket(mock_server, 'foo')
s.receive = mock.MagicMock()
environ = {'REQUEST_METHOD': 'POST', 'QUERY_STRING': 'sid=foo',
'CONTENT_LENGTH': len(p), 'wsgi.input': six.BytesIO(p)}
s.handle_post_request(environ)
self.assertEqual(s.receive.call_count, 2)
def test_polling_write_too_large(self):
mock_server = self._get_mock_server()
pkt1 = packet.Packet(packet.MESSAGE, data='hello')
pkt2 = packet.Packet(packet.MESSAGE, data='bye')
p = payload.Payload(packets=[pkt1, pkt2]).encode()
mock_server.max_http_buffer_size = len(p) - 1
s = socket.Socket(mock_server, 'foo')
s.receive = mock.MagicMock()
environ = {'REQUEST_METHOD': 'POST', 'QUERY_STRING': 'sid=foo',
'CONTENT_LENGTH': len(p), 'wsgi.input': six.BytesIO(p)}
self.assertRaises(ValueError, s.handle_post_request, environ)
def test_upgrade_handshake(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'foo')
s._upgrade_websocket = mock.MagicMock()
environ = {'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'sid=foo',
'HTTP_CONNECTION': 'Foo,Upgrade,Bar',
'HTTP_UPGRADE': 'websocket'}
start_response = mock.MagicMock()
s.handle_get_request(environ, start_response)
s._upgrade_websocket.assert_called_once_with(environ, start_response)
def test_upgrade(self):
mock_server = self._get_mock_server()
mock_server.async.has_websocket = True
mock_server.async.wrap_websocket = mock.MagicMock()
mock_ws = mock.MagicMock()
mock_server.async.wrap_websocket.configure_mock(
return_value=mock_ws)
s = socket.Socket(mock_server, 'sid')
environ = "foo"
start_response = "bar"
s._upgrade_websocket(environ, start_response)
mock_server.async.wrap_websocket.assert_called_once_with(
s._websocket_handler)
mock_ws.assert_called_once_with(environ, start_response)
def test_upgrade_twice(self):
mock_server = self._get_mock_server()
mock_server.async.has_websocket = True
mock_server.async.wrap_websocket = mock.MagicMock()
s = socket.Socket(mock_server, 'sid')
s.upgraded = True
environ = "foo"
start_response = "bar"
self.assertRaises(IOError, s._upgrade_websocket,
environ, start_response)
def test_upgrade_packet(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.receive(packet.Packet(packet.UPGRADE))
r = s.poll()
self.assertEqual(len(r), 1)
self.assertTrue(r[0].encode(), b'6')
def test_upgrade_no_probe(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
ws = mock.MagicMock()
ws.wait.return_value = packet.Packet(packet.NOOP).encode(
always_bytes=False)
s._websocket_handler(ws)
self.assertFalse(s.upgraded)
def test_upgrade_no_upgrade_packet(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.queue.join = mock.MagicMock(return_value=None)
ws = mock.MagicMock()
probe = six.text_type('probe')
ws.wait.side_effect = [
packet.Packet(packet.PING, data=probe).encode(
always_bytes=False),
packet.Packet(packet.NOOP).encode(always_bytes=False)]
s._websocket_handler(ws)
ws.send.assert_called_once_with(packet.Packet(
packet.PONG, data=probe).encode(always_bytes=False))
self.assertEqual(s.queue.get().packet_type, packet.NOOP)
self.assertFalse(s.upgraded)
def test_websocket_read_write(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.queue.join = mock.MagicMock(return_value=None)
foo = six.text_type('foo')
bar = six.text_type('bar')
probe = six.text_type('probe')
s.poll = mock.MagicMock(side_effect=[
[packet.Packet(packet.MESSAGE, data=bar)], IOError])
ws = mock.MagicMock()
ws.wait.side_effect = [
packet.Packet(packet.PING, data=probe).encode(
always_bytes=False),
packet.Packet(packet.UPGRADE).encode(always_bytes=False),
packet.Packet(packet.MESSAGE, data=foo).encode(
always_bytes=False),
None]
s._websocket_handler(ws)
time.sleep(0)
self.assertTrue(s.upgraded)
self.assertEqual(mock_server._trigger_event.call_count, 2)
mock_server._trigger_event.assert_has_calls([
mock.call('message', 'sid', 'foo'),
mock.call('disconnect', 'sid')])
ws.send.assert_called_with('4bar')
def test_websocket_read_write_fail(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.queue.join = mock.MagicMock(return_value=None)
foo = six.text_type('foo')
bar = six.text_type('bar')
probe = six.text_type('probe')
s.poll = mock.MagicMock(side_effect=[
[packet.Packet(packet.MESSAGE, data=bar)], IOError])
ws = mock.MagicMock()
ws.wait.side_effect = [
packet.Packet(packet.PING, data=probe).encode(
always_bytes=False),
packet.Packet(packet.UPGRADE).encode(always_bytes=False),
packet.Packet(packet.MESSAGE, data=foo).encode(
always_bytes=False),
RuntimeError]
ws.send.side_effect = [None, RuntimeError]
s._websocket_handler(ws)
time.sleep(0)
self.assertEqual(s.closed, True)
def test_websocket_ignore_invalid_packet(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.queue.join = mock.MagicMock(return_value=None)
foo = six.text_type('foo')
bar = six.text_type('bar')
probe = six.text_type('probe')
s.poll = mock.MagicMock(side_effect=[
[packet.Packet(packet.MESSAGE, data=bar)], IOError])
ws = mock.MagicMock()
ws.wait.side_effect = [
packet.Packet(packet.PING, data=probe).encode(
always_bytes=False),
packet.Packet(packet.UPGRADE).encode(always_bytes=False),
packet.Packet(packet.OPEN).encode(always_bytes=False),
packet.Packet(packet.MESSAGE, data=foo).encode(
always_bytes=False),
None]
s._websocket_handler(ws)
time.sleep(0)
self.assertTrue(s.upgraded)
self.assertEqual(mock_server._trigger_event.call_count, 2)
mock_server._trigger_event.assert_has_calls([
mock.call('message', 'sid', foo),
mock.call('disconnect', 'sid')])
ws.send.assert_called_with('4bar')
def test_send_after_close(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.close(wait=False)
self.assertRaises(IOError, s.send, packet.Packet(packet.NOOP))
def test_close_and_wait(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.queue = mock.MagicMock()
s.close(wait=True)
s.queue.join.assert_called_once_with()
def test_close_without_wait(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.queue = mock.MagicMock()
s.close(wait=False)
self.assertEqual(s.queue.join.call_count, 0)
| |
from django.conf.urls import include, url
from django.shortcuts import redirect
from olympia.addons.urls import ADDON_ID
from olympia.amo.decorators import write
from olympia.amo.utils import partial
from olympia.lib.misc.urlconf_decorator import decorate
from . import views
# These will all start with /theme/<slug>/
theme_detail_patterns = [
url('^$', lambda r,
addon_id: redirect('devhub.themes.edit', addon_id, permanent=True)),
url('^delete$', views.delete, name='devhub.themes.delete'),
# Upload url here to satisfy CSRF.
url('^edit/upload/'
'(?P<upload_type>persona_header|persona_footer)$',
views.ajax_upload_image, name='devhub.personas.reupload_persona'),
url('^edit$', views.edit_theme, name='devhub.themes.edit'),
url('^rmlocale$', views.remove_locale, name='devhub.themes.remove-locale'),
]
# These will all start with /addon/<addon_id>/
detail_patterns = [
# Redirect to the edit page from the base.
url('^$', lambda r, addon_id: redirect('devhub.addons.edit', addon_id,
permanent=True)),
url('^edit$', views.edit, name='devhub.addons.edit'),
url('^delete$', views.delete, name='devhub.addons.delete'),
url('^disable$', views.disable, name='devhub.addons.disable'),
url('^enable$', views.enable, name='devhub.addons.enable'),
url('^cancel$', views.cancel, name='devhub.addons.cancel'),
url('^ownership$', views.ownership, name='devhub.addons.owner'),
url('^admin$', views.admin, name='devhub.addons.admin'),
url('^payments$', views.payments, name='devhub.addons.payments'),
url('^payments/disable$', views.disable_payments,
name='devhub.addons.payments.disable'),
url('^profile$', views.profile, name='devhub.addons.profile'),
url('^profile/remove$', views.remove_profile,
name='devhub.addons.profile.remove'),
url('^edit_(?P<section>[^/]+)(?:/(?P<editable>[^/]+))?$',
views.addons_section, name='devhub.addons.section'),
url('^upload_preview$', views.upload_image, {'upload_type': 'preview'},
name='devhub.addons.upload_preview'),
url('^upload_icon$', views.upload_image, {'upload_type': 'icon'},
name='devhub.addons.upload_icon'),
url('^upload-(?P<channel>listed|unlisted)$', views.upload_for_version,
name='devhub.upload_for_version'),
url('^upload/(?P<uuid>[^/]+)$', views.upload_detail_for_version,
name='devhub.upload_detail_for_version'),
url('^versions$', views.version_list, name='devhub.addons.versions'),
url('^versions/delete$', views.version_delete,
name='devhub.versions.delete'),
url('^versions/reenable$', views.version_reenable,
name='devhub.versions.reenable'),
url('^versions/stats$', views.version_stats,
name='devhub.versions.stats'),
url('^versions/(?P<version_id>\d+)$', views.version_edit,
name='devhub.versions.edit'),
url('^versions/(?P<version>[^/]+)$', views.version_bounce),
# New version submission
url('^versions/submit/$',
views.submit_version_auto,
name='devhub.submit.version'),
url('^versions/submit/distribution$',
views.submit_version_distribution,
name='devhub.submit.version.distribution'),
url('^versions/submit/upload-(?P<channel>listed|unlisted)$',
views.submit_version_upload,
name='devhub.submit.version.upload'),
url('^versions/submit/(?P<version_id>\d+)/details$',
views.submit_version_details,
name='devhub.submit.version.details'),
url('^versions/submit/(?P<version_id>\d+)/finish$',
views.submit_version_finish,
name='devhub.submit.version.finish'),
# New file submission
url('^versions/(?P<version_id>\d+)/submit-file/$',
views.submit_file,
name='devhub.submit.file'),
url('^versions/submit/(?P<version_id>\d+)/finish-file$',
views.submit_file_finish,
name='devhub.submit.file.finish'),
url('^file/(?P<file_id>[^/]+)/validation$', views.file_validation,
name='devhub.file_validation'),
url('^file/(?P<file_id>[^/]+)/validation\.json$',
views.json_file_validation,
name='devhub.json_file_validation'),
url('^validation-result/(?P<result_id>\d+)$',
views.bulk_compat_result,
name='devhub.bulk_compat_result'),
url('^validation-result/(?P<result_id>\d+)\.json$',
views.json_bulk_compat_result,
name='devhub.json_bulk_compat_result'),
url('^submit/$',
lambda r, addon_id: redirect('devhub.submit.finish', addon_id)),
url('^submit/details$',
views.submit_addon_details, name='devhub.submit.details'),
url('^submit/finish$', views.submit_addon_finish,
name='devhub.submit.finish'),
url('^request-review$',
views.request_review, name='devhub.request-review'),
url('^rmlocale$', views.remove_locale, name='devhub.addons.remove-locale'),
]
# These will all start with /ajax/addon/<addon_id>/
ajax_patterns = [
url('^dependencies$', views.ajax_dependencies,
name='devhub.ajax.dependencies'),
url('^versions/compatibility/status$',
views.ajax_compat_status, name='devhub.ajax.compat.status'),
url('^versions/compatibility/error$',
views.ajax_compat_error, name='devhub.ajax.compat.error'),
url('^versions/(?P<version_id>\d+)/compatibility$',
views.ajax_compat_update, name='devhub.ajax.compat.update'),
url('^image/status$', views.image_status, name='devhub.ajax.image.status'),
]
redirect_patterns = [
url('^addon/edit/(\d+)',
lambda r, id: redirect('devhub.addons.edit', id, permanent=True)),
url('^addon/status/(\d+)',
lambda r, id: redirect('devhub.addons.versions', id, permanent=True)),
url('^versions/(\d+)',
lambda r, id: redirect('devhub.addons.versions', id, permanent=True)),
]
urlpatterns = decorate(write, [
url('^$', views.index, name='devhub.index'),
url('', include(redirect_patterns)),
# Redirect people who have /addons/ instead of /addon/.
url('^addons/\d+/.*',
lambda r: redirect(r.path.replace('addons', 'addon', 1))),
# Add-on submission
url('^addon/submit/(?:1)?$',
lambda r: redirect('devhub.submit.agreement', permanent=True)),
url('^addon/submit/agreement$', views.submit_addon,
name='devhub.submit.agreement'),
url('^addon/submit/distribution$', views.submit_addon_distribution,
name='devhub.submit.distribution'),
url('^addon/submit/upload-(?P<channel>listed|unlisted)$',
views.submit_addon_upload, name='devhub.submit.upload'),
# Submission API
url('^addon/agreement/$', views.api_key_agreement,
name='devhub.api_key_agreement'),
url('^addon/api/key/$', views.api_key, name='devhub.api_key'),
# Standalone validator:
url('^addon/validate/?$', views.validate_addon,
name='devhub.validate_addon'),
# Standalone compatibility checker:
url('^addon/check-compatibility$', views.check_addon_compatibility,
name='devhub.check_addon_compatibility'),
url(r'^addon/check-compatibility/application_versions\.json$',
views.compat_application_versions,
name='devhub.compat_application_versions'),
# Redirect to /addons/ at the base.
url('^addon$', lambda r: redirect('devhub.addons', permanent=True)),
url('^addons$', views.dashboard, name='devhub.addons'),
url('^themes$', views.dashboard, name='devhub.themes',
kwargs={'theme': True}),
url('^feed$', views.feed, name='devhub.feed_all'),
# TODO: not necessary when devhub homepage is moved out of remora
url('^feed/all$', lambda r: redirect('devhub.feed_all', permanent=True)),
url('^feed/%s$' % ADDON_ID, views.feed, name='devhub.feed'),
url('^upload$', views.upload, name='devhub.upload'),
url('^upload/unlisted$',
partial(views.upload, channel='unlisted'),
name='devhub.upload_unlisted'),
url('^upload/([^/]+)(?:/([^/]+))?$', views.upload_detail,
name='devhub.upload_detail'),
url('^standalone-upload$',
partial(views.upload, is_standalone=True),
name='devhub.standalone_upload'),
url('^standalone-upload-unlisted$',
partial(views.upload, is_standalone=True, channel='unlisted'),
name='devhub.standalone_upload_unlisted'),
url('^standalone-upload/([^/]+)$', views.standalone_upload_detail,
name='devhub.standalone_upload_detail'),
# URLs for a single add-on.
url('^addon/%s/' % ADDON_ID, include(detail_patterns)),
url('^ajax/addon/%s/' % ADDON_ID, include(ajax_patterns)),
# Themes submission.
url('^theme/submit/?$', views.submit_theme, name='devhub.themes.submit'),
url('^theme/%s/submit/done$' % ADDON_ID, views.submit_theme_done,
name='devhub.themes.submit.done'),
url('^theme/submit/upload/'
'(?P<upload_type>persona_header|persona_footer)$',
views.ajax_upload_image, name='devhub.personas.upload_persona'),
url('^theme/%s/' % ADDON_ID, include(theme_detail_patterns)),
# Add-on SDK page
url('builder$', lambda r: redirect(views.MDN_BASE)),
# Developer docs
url('docs/(?P<doc_name>[-_\w]+(?:/[-_\w]+)?)?$',
views.docs, name='devhub.docs'),
])
| |
"""Exchange and Queue declarations."""
from __future__ import absolute_import, unicode_literals
import numbers
from .abstract import MaybeChannelBound, Object
from .exceptions import ContentDisallowed
from .five import python_2_unicode_compatible, string_t
from .serialization import prepare_accept_content
TRANSIENT_DELIVERY_MODE = 1
PERSISTENT_DELIVERY_MODE = 2
DELIVERY_MODES = {'transient': TRANSIENT_DELIVERY_MODE,
'persistent': PERSISTENT_DELIVERY_MODE}
__all__ = ('Exchange', 'Queue', 'binding', 'maybe_delivery_mode')
INTERNAL_EXCHANGE_PREFIX = ('amq.',)
def _reprstr(s):
s = repr(s)
if isinstance(s, string_t) and s.startswith("u'"):
return s[2:-1]
return s[1:-1]
def pretty_bindings(bindings):
return '[{0}]'.format(', '.join(map(str, bindings)))
def maybe_delivery_mode(
v, modes=DELIVERY_MODES, default=PERSISTENT_DELIVERY_MODE):
"""Get delivery mode by name (or none if undefined)."""
if v:
return v if isinstance(v, numbers.Integral) else modes[v]
return default
@python_2_unicode_compatible
class Exchange(MaybeChannelBound):
"""An Exchange declaration.
Arguments:
name (str): See :attr:`name`.
type (str): See :attr:`type`.
channel (kombu.Connection, ChannelT): See :attr:`channel`.
durable (bool): See :attr:`durable`.
auto_delete (bool): See :attr:`auto_delete`.
delivery_mode (enum): See :attr:`delivery_mode`.
arguments (Dict): See :attr:`arguments`.
no_declare (bool): See :attr:`no_declare`
Attributes:
name (str): Name of the exchange.
Default is no name (the default exchange).
type (str):
*This description of AMQP exchange types was shamelessly stolen
from the blog post `AMQP in 10 minutes: Part 4`_ by
Rajith Attapattu. Reading this article is recommended if you're
new to amqp.*
"AMQP defines four default exchange types (routing algorithms) that
covers most of the common messaging use cases. An AMQP broker can
also define additional exchange types, so see your broker
manual for more information about available exchange types.
* `direct` (*default*)
Direct match between the routing key in the message,
and the routing criteria used when a queue is bound to
this exchange.
* `topic`
Wildcard match between the routing key and the routing
pattern specified in the exchange/queue binding.
The routing key is treated as zero or more words delimited
by `"."` and supports special wildcard characters. `"*"`
matches a single word and `"#"` matches zero or more words.
* `fanout`
Queues are bound to this exchange with no arguments. Hence
any message sent to this exchange will be forwarded to all
queues bound to this exchange.
* `headers`
Queues are bound to this exchange with a table of arguments
containing headers and values (optional). A special
argument named "x-match" determines the matching algorithm,
where `"all"` implies an `AND` (all pairs must match) and
`"any"` implies `OR` (at least one pair must match).
:attr:`arguments` is used to specify the arguments.
.. _`AMQP in 10 minutes: Part 4`:
https://bit.ly/2rcICv5
channel (ChannelT): The channel the exchange is bound to (if bound).
durable (bool): Durable exchanges remain active when a server restarts.
Non-durable exchanges (transient exchanges) are purged when a
server restarts. Default is :const:`True`.
auto_delete (bool): If set, the exchange is deleted when all queues
have finished using it. Default is :const:`False`.
delivery_mode (enum): The default delivery mode used for messages.
The value is an integer, or alias string.
* 1 or `"transient"`
The message is transient. Which means it is stored in
memory only, and is lost if the server dies or restarts.
* 2 or "persistent" (*default*)
The message is persistent. Which means the message is
stored both in-memory, and on disk, and therefore
preserved if the server dies or restarts.
The default value is 2 (persistent).
arguments (Dict): Additional arguments to specify when the exchange
is declared.
no_declare (bool): Never declare this exchange
(:meth:`declare` does nothing).
"""
TRANSIENT_DELIVERY_MODE = TRANSIENT_DELIVERY_MODE
PERSISTENT_DELIVERY_MODE = PERSISTENT_DELIVERY_MODE
name = ''
type = 'direct'
durable = True
auto_delete = False
passive = False
delivery_mode = None
no_declare = False
attrs = (
('name', None),
('type', None),
('arguments', None),
('durable', bool),
('passive', bool),
('auto_delete', bool),
('delivery_mode', lambda m: DELIVERY_MODES.get(m) or m),
('no_declare', bool),
)
def __init__(self, name='', type='', channel=None, **kwargs):
super(Exchange, self).__init__(**kwargs)
self.name = name or self.name
self.type = type or self.type
self.maybe_bind(channel)
def __hash__(self):
return hash('E|%s' % (self.name,))
def _can_declare(self):
return not self.no_declare and (
self.name and not self.name.startswith(
INTERNAL_EXCHANGE_PREFIX))
def declare(self, nowait=False, passive=None, channel=None):
"""Declare the exchange.
Creates the exchange on the broker, unless passive is set
in which case it will only assert that the exchange exists.
Argument:
nowait (bool): If set the server will not respond, and a
response will not be waited for. Default is :const:`False`.
"""
if self._can_declare():
passive = self.passive if passive is None else passive
return (channel or self.channel).exchange_declare(
exchange=self.name, type=self.type, durable=self.durable,
auto_delete=self.auto_delete, arguments=self.arguments,
nowait=nowait, passive=passive,
)
def bind_to(self, exchange='', routing_key='',
arguments=None, nowait=False, channel=None, **kwargs):
"""Bind the exchange to another exchange.
Arguments:
nowait (bool): If set the server will not respond, and the call
will not block waiting for a response.
Default is :const:`False`.
"""
if isinstance(exchange, Exchange):
exchange = exchange.name
return (channel or self.channel).exchange_bind(
destination=self.name,
source=exchange,
routing_key=routing_key,
nowait=nowait,
arguments=arguments,
)
def unbind_from(self, source='', routing_key='',
nowait=False, arguments=None, channel=None):
"""Delete previously created exchange binding from the server."""
if isinstance(source, Exchange):
source = source.name
return (channel or self.channel).exchange_unbind(
destination=self.name,
source=source,
routing_key=routing_key,
nowait=nowait,
arguments=arguments,
)
def Message(self, body, delivery_mode=None, properties=None, **kwargs):
"""Create message instance to be sent with :meth:`publish`.
Arguments:
body (Any): Message body.
delivery_mode (bool): Set custom delivery mode.
Defaults to :attr:`delivery_mode`.
priority (int): Message priority, 0 to broker configured
max priority, where higher is better.
content_type (str): The messages content_type. If content_type
is set, no serialization occurs as it is assumed this is either
a binary object, or you've done your own serialization.
Leave blank if using built-in serialization as our library
properly sets content_type.
content_encoding (str): The character set in which this object
is encoded. Use "binary" if sending in raw binary objects.
Leave blank if using built-in serialization as our library
properly sets content_encoding.
properties (Dict): Message properties.
headers (Dict): Message headers.
"""
# XXX This method is unused by kombu itself AFAICT [ask].
properties = {} if properties is None else properties
properties['delivery_mode'] = maybe_delivery_mode(self.delivery_mode)
return self.channel.prepare_message(
body,
properties=properties,
**kwargs)
def publish(self, message, routing_key=None, mandatory=False,
immediate=False, exchange=None):
"""Publish message.
Arguments:
message (Union[kombu.Message, str, bytes]):
Message to publish.
routing_key (str): Message routing key.
mandatory (bool): Currently not supported.
immediate (bool): Currently not supported.
"""
if isinstance(message, string_t):
message = self.Message(message)
exchange = exchange or self.name
return self.channel.basic_publish(
message,
exchange=exchange,
routing_key=routing_key,
mandatory=mandatory,
immediate=immediate,
)
def delete(self, if_unused=False, nowait=False):
"""Delete the exchange declaration on server.
Arguments:
if_unused (bool): Delete only if the exchange has no bindings.
Default is :const:`False`.
nowait (bool): If set the server will not respond, and a
response will not be waited for. Default is :const:`False`.
"""
return self.channel.exchange_delete(exchange=self.name,
if_unused=if_unused,
nowait=nowait)
def binding(self, routing_key='', arguments=None, unbind_arguments=None):
return binding(self, routing_key, arguments, unbind_arguments)
def __eq__(self, other):
if isinstance(other, Exchange):
return (self.name == other.name and
self.type == other.type and
self.arguments == other.arguments and
self.durable == other.durable and
self.auto_delete == other.auto_delete and
self.delivery_mode == other.delivery_mode)
return NotImplemented
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return self._repr_entity(self)
def __str__(self):
return 'Exchange {0}({1})'.format(
_reprstr(self.name) or repr(''), self.type,
)
@property
def can_cache_declaration(self):
return not self.auto_delete
@python_2_unicode_compatible
class binding(Object):
"""Represents a queue or exchange binding.
Arguments:
exchange (Exchange): Exchange to bind to.
routing_key (str): Routing key used as binding key.
arguments (Dict): Arguments for bind operation.
unbind_arguments (Dict): Arguments for unbind operation.
"""
attrs = (
('exchange', None),
('routing_key', None),
('arguments', None),
('unbind_arguments', None)
)
def __init__(self, exchange=None, routing_key='',
arguments=None, unbind_arguments=None):
self.exchange = exchange
self.routing_key = routing_key
self.arguments = arguments
self.unbind_arguments = unbind_arguments
def declare(self, channel, nowait=False):
"""Declare destination exchange."""
if self.exchange and self.exchange.name:
self.exchange.declare(channel=channel, nowait=nowait)
def bind(self, entity, nowait=False, channel=None):
"""Bind entity to this binding."""
entity.bind_to(exchange=self.exchange,
routing_key=self.routing_key,
arguments=self.arguments,
nowait=nowait,
channel=channel)
def unbind(self, entity, nowait=False, channel=None):
"""Unbind entity from this binding."""
entity.unbind_from(self.exchange,
routing_key=self.routing_key,
arguments=self.unbind_arguments,
nowait=nowait,
channel=channel)
def __repr__(self):
return '<binding: {0}>'.format(self)
def __str__(self):
return '{0}->{1}'.format(
_reprstr(self.exchange.name), _reprstr(self.routing_key),
)
@python_2_unicode_compatible
class Queue(MaybeChannelBound):
"""A Queue declaration.
Arguments:
name (str): See :attr:`name`.
exchange (Exchange, str): See :attr:`exchange`.
routing_key (str): See :attr:`routing_key`.
channel (kombu.Connection, ChannelT): See :attr:`channel`.
durable (bool): See :attr:`durable`.
exclusive (bool): See :attr:`exclusive`.
auto_delete (bool): See :attr:`auto_delete`.
queue_arguments (Dict): See :attr:`queue_arguments`.
binding_arguments (Dict): See :attr:`binding_arguments`.
consumer_arguments (Dict): See :attr:`consumer_arguments`.
no_declare (bool): See :attr:`no_declare`.
on_declared (Callable): See :attr:`on_declared`.
expires (float): See :attr:`expires`.
message_ttl (float): See :attr:`message_ttl`.
max_length (int): See :attr:`max_length`.
max_length_bytes (int): See :attr:`max_length_bytes`.
max_priority (int): See :attr:`max_priority`.
Attributes:
name (str): Name of the queue.
Default is no name (default queue destination).
exchange (Exchange): The :class:`Exchange` the queue binds to.
routing_key (str): The routing key (if any), also called *binding key*.
The interpretation of the routing key depends on
the :attr:`Exchange.type`.
* direct exchange
Matches if the routing key property of the message and
the :attr:`routing_key` attribute are identical.
* fanout exchange
Always matches, even if the binding does not have a key.
* topic exchange
Matches the routing key property of the message by a primitive
pattern matching scheme. The message routing key then consists
of words separated by dots (`"."`, like domain names), and
two special characters are available; star (`"*"`) and hash
(`"#"`). The star matches any word, and the hash matches
zero or more words. For example `"*.stock.#"` matches the
routing keys `"usd.stock"` and `"eur.stock.db"` but not
`"stock.nasdaq"`.
channel (ChannelT): The channel the Queue is bound to (if bound).
durable (bool): Durable queues remain active when a server restarts.
Non-durable queues (transient queues) are purged if/when
a server restarts.
Note that durable queues do not necessarily hold persistent
messages, although it does not make sense to send
persistent messages to a transient queue.
Default is :const:`True`.
exclusive (bool): Exclusive queues may only be consumed from by the
current connection. Setting the 'exclusive' flag
always implies 'auto-delete'.
Default is :const:`False`.
auto_delete (bool): If set, the queue is deleted when all consumers
have finished using it. Last consumer can be canceled
either explicitly or because its channel is closed. If
there was no consumer ever on the queue, it won't be
deleted.
expires (float): Set the expiry time (in seconds) for when this
queue should expire.
The expiry time decides how long the queue can stay unused
before it's automatically deleted.
*Unused* means the queue has no consumers, the queue has not been
redeclared, and ``Queue.get`` has not been invoked for a duration
of at least the expiration period.
See https://www.rabbitmq.com/ttl.html#queue-ttl
**RabbitMQ extension**: Only available when using RabbitMQ.
message_ttl (float): Message time to live in seconds.
This setting controls how long messages can stay in the queue
unconsumed. If the expiry time passes before a message consumer
has received the message, the message is deleted and no consumer
will see the message.
See https://www.rabbitmq.com/ttl.html#per-queue-message-ttl
**RabbitMQ extension**: Only available when using RabbitMQ.
max_length (int): Set the maximum number of messages that the
queue can hold.
If the number of messages in the queue size exceeds this limit,
new messages will be dropped (or dead-lettered if a dead letter
exchange is active).
See https://www.rabbitmq.com/maxlength.html
**RabbitMQ extension**: Only available when using RabbitMQ.
max_length_bytes (int): Set the max size (in bytes) for the total
of messages in the queue.
If the total size of all the messages in the queue exceeds this
limit, new messages will be dropped (or dead-lettered if a dead
letter exchange is active).
**RabbitMQ extension**: Only available when using RabbitMQ.
max_priority (int): Set the highest priority number for this queue.
For example if the value is 10, then messages can delivered to
this queue can have a ``priority`` value between 0 and 10,
where 10 is the highest priority.
RabbitMQ queues without a max priority set will ignore
the priority field in the message, so if you want priorities
you need to set the max priority field to declare the queue
as a priority queue.
**RabbitMQ extension**: Only available when using RabbitMQ.
queue_arguments (Dict): Additional arguments used when declaring
the queue. Can be used to to set the arguments value
for RabbitMQ/AMQP's ``queue.declare``.
binding_arguments (Dict): Additional arguments used when binding
the queue. Can be used to to set the arguments value
for RabbitMQ/AMQP's ``queue.declare``.
consumer_arguments (Dict): Additional arguments used when consuming
from this queue. Can be used to to set the arguments value
for RabbitMQ/AMQP's ``basic.consume``.
alias (str): Unused in Kombu, but applications can take advantage
of this, for example to give alternate names to queues with
automatically generated queue names.
on_declared (Callable): Optional callback to be applied when the
queue has been declared (the ``queue_declare`` operation is
complete). This must be a function with a signature that
accepts at least 3 positional arguments:
``(name, messages, consumers)``.
no_declare (bool): Never declare this queue, nor related
entities (:meth:`declare` does nothing).
"""
ContentDisallowed = ContentDisallowed
name = ''
exchange = Exchange('')
routing_key = ''
durable = True
exclusive = False
auto_delete = False
no_ack = False
attrs = (
('name', None),
('exchange', None),
('routing_key', None),
('queue_arguments', None),
('binding_arguments', None),
('consumer_arguments', None),
('durable', bool),
('exclusive', bool),
('auto_delete', bool),
('no_ack', None),
('alias', None),
('bindings', list),
('no_declare', bool),
('expires', float),
('message_ttl', float),
('max_length', int),
('max_length_bytes', int),
('max_priority', int)
)
def __init__(self, name='', exchange=None, routing_key='',
channel=None, bindings=None, on_declared=None,
**kwargs):
super(Queue, self).__init__(**kwargs)
self.name = name or self.name
if isinstance(exchange, str):
self.exchange = Exchange(exchange)
elif isinstance(exchange, Exchange):
self.exchange = exchange
self.routing_key = routing_key or self.routing_key
self.bindings = set(bindings or [])
self.on_declared = on_declared
# allows Queue('name', [binding(...), binding(...), ...])
if isinstance(exchange, (list, tuple, set)):
self.bindings |= set(exchange)
if self.bindings:
self.exchange = None
# exclusive implies auto-delete.
if self.exclusive:
self.auto_delete = True
self.maybe_bind(channel)
def bind(self, channel):
on_declared = self.on_declared
bound = super(Queue, self).bind(channel)
bound.on_declared = on_declared
return bound
def __hash__(self):
return hash('Q|%s' % (self.name,))
def when_bound(self):
if self.exchange:
self.exchange = self.exchange(self.channel)
def declare(self, nowait=False, channel=None):
"""Declare queue and exchange then binds queue to exchange."""
if not self.no_declare:
# - declare main binding.
self._create_exchange(nowait=nowait, channel=channel)
self._create_queue(nowait=nowait, channel=channel)
self._create_bindings(nowait=nowait, channel=channel)
return self.name
def _create_exchange(self, nowait=False, channel=None):
if self.exchange:
self.exchange.declare(nowait=nowait, channel=channel)
def _create_queue(self, nowait=False, channel=None):
self.queue_declare(nowait=nowait, passive=False, channel=channel)
if self.exchange and self.exchange.name:
self.queue_bind(nowait=nowait, channel=channel)
def _create_bindings(self, nowait=False, channel=None):
for B in self.bindings:
channel = channel or self.channel
B.declare(channel)
B.bind(self, nowait=nowait, channel=channel)
def queue_declare(self, nowait=False, passive=False, channel=None):
"""Declare queue on the server.
Arguments:
nowait (bool): Do not wait for a reply.
passive (bool): If set, the server will not create the queue.
The client can use this to check whether a queue exists
without modifying the server state.
"""
channel = channel or self.channel
queue_arguments = channel.prepare_queue_arguments(
self.queue_arguments or {},
expires=self.expires,
message_ttl=self.message_ttl,
max_length=self.max_length,
max_length_bytes=self.max_length_bytes,
max_priority=self.max_priority,
)
ret = channel.queue_declare(
queue=self.name,
passive=passive,
durable=self.durable,
exclusive=self.exclusive,
auto_delete=self.auto_delete,
arguments=queue_arguments,
nowait=nowait,
)
if not self.name:
self.name = ret[0]
if self.on_declared:
self.on_declared(*ret)
return ret
def queue_bind(self, nowait=False, channel=None):
"""Create the queue binding on the server."""
return self.bind_to(self.exchange, self.routing_key,
self.binding_arguments,
channel=channel, nowait=nowait)
def bind_to(self, exchange='', routing_key='',
arguments=None, nowait=False, channel=None):
if isinstance(exchange, Exchange):
exchange = exchange.name
return (channel or self.channel).queue_bind(
queue=self.name,
exchange=exchange,
routing_key=routing_key,
arguments=arguments,
nowait=nowait,
)
def get(self, no_ack=None, accept=None):
"""Poll the server for a new message.
This method provides direct access to the messages in a
queue using a synchronous dialogue, designed for
specific types of applications where synchronous functionality
is more important than performance.
Returns:
~kombu.Message: if a message was available,
or :const:`None` otherwise.
Arguments:
no_ack (bool): If enabled the broker will
automatically ack messages.
accept (Set[str]): Custom list of accepted content types.
"""
no_ack = self.no_ack if no_ack is None else no_ack
message = self.channel.basic_get(queue=self.name, no_ack=no_ack)
if message is not None:
m2p = getattr(self.channel, 'message_to_python', None)
if m2p:
message = m2p(message)
if message.errors:
message._reraise_error()
message.accept = prepare_accept_content(accept)
return message
def purge(self, nowait=False):
"""Remove all ready messages from the queue."""
return self.channel.queue_purge(queue=self.name,
nowait=nowait) or 0
def consume(self, consumer_tag='', callback=None,
no_ack=None, nowait=False):
"""Start a queue consumer.
Consumers last as long as the channel they were created on, or
until the client cancels them.
Arguments:
consumer_tag (str): Unique identifier for the consumer.
The consumer tag is local to a connection, so two clients
can use the same consumer tags. If this field is empty
the server will generate a unique tag.
no_ack (bool): If enabled the broker will automatically
ack messages.
nowait (bool): Do not wait for a reply.
callback (Callable): callback called for each delivered message.
"""
if no_ack is None:
no_ack = self.no_ack
return self.channel.basic_consume(
queue=self.name,
no_ack=no_ack,
consumer_tag=consumer_tag or '',
callback=callback,
nowait=nowait,
arguments=self.consumer_arguments)
def cancel(self, consumer_tag):
"""Cancel a consumer by consumer tag."""
return self.channel.basic_cancel(consumer_tag)
def delete(self, if_unused=False, if_empty=False, nowait=False):
"""Delete the queue.
Arguments:
if_unused (bool): If set, the server will only delete the queue
if it has no consumers. A channel error will be raised
if the queue has consumers.
if_empty (bool): If set, the server will only delete the queue if
it is empty. If it is not empty a channel error will be raised.
nowait (bool): Do not wait for a reply.
"""
return self.channel.queue_delete(queue=self.name,
if_unused=if_unused,
if_empty=if_empty,
nowait=nowait)
def queue_unbind(self, arguments=None, nowait=False, channel=None):
return self.unbind_from(self.exchange, self.routing_key,
arguments, nowait, channel)
def unbind_from(self, exchange='', routing_key='',
arguments=None, nowait=False, channel=None):
"""Unbind queue by deleting the binding from the server."""
return (channel or self.channel).queue_unbind(
queue=self.name,
exchange=exchange.name,
routing_key=routing_key,
arguments=arguments,
nowait=nowait,
)
def __eq__(self, other):
if isinstance(other, Queue):
return (self.name == other.name and
self.exchange == other.exchange and
self.routing_key == other.routing_key and
self.queue_arguments == other.queue_arguments and
self.binding_arguments == other.binding_arguments and
self.consumer_arguments == other.consumer_arguments and
self.durable == other.durable and
self.exclusive == other.exclusive and
self.auto_delete == other.auto_delete)
return NotImplemented
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
if self.bindings:
return self._repr_entity('Queue {name} -> {bindings}'.format(
name=_reprstr(self.name),
bindings=pretty_bindings(self.bindings),
))
return self._repr_entity(
'Queue {name} -> {0.exchange!r} -> {routing_key}'.format(
self, name=_reprstr(self.name),
routing_key=_reprstr(self.routing_key),
),
)
@property
def can_cache_declaration(self):
if self.queue_arguments:
expiring_queue = "x-expires" in self.queue_arguments
else:
expiring_queue = False
return not expiring_queue and not self.auto_delete
@classmethod
def from_dict(cls, queue, **options):
binding_key = options.get('binding_key') or options.get('routing_key')
e_durable = options.get('exchange_durable')
if e_durable is None:
e_durable = options.get('durable')
e_auto_delete = options.get('exchange_auto_delete')
if e_auto_delete is None:
e_auto_delete = options.get('auto_delete')
q_durable = options.get('queue_durable')
if q_durable is None:
q_durable = options.get('durable')
q_auto_delete = options.get('queue_auto_delete')
if q_auto_delete is None:
q_auto_delete = options.get('auto_delete')
e_arguments = options.get('exchange_arguments')
q_arguments = options.get('queue_arguments')
b_arguments = options.get('binding_arguments')
c_arguments = options.get('consumer_arguments')
bindings = options.get('bindings')
exchange = Exchange(options.get('exchange'),
type=options.get('exchange_type'),
delivery_mode=options.get('delivery_mode'),
routing_key=options.get('routing_key'),
durable=e_durable,
auto_delete=e_auto_delete,
arguments=e_arguments)
return Queue(queue,
exchange=exchange,
routing_key=binding_key,
durable=q_durable,
exclusive=options.get('exclusive'),
auto_delete=q_auto_delete,
no_ack=options.get('no_ack'),
queue_arguments=q_arguments,
binding_arguments=b_arguments,
consumer_arguments=c_arguments,
bindings=bindings)
def as_dict(self, recurse=False):
res = super(Queue, self).as_dict(recurse)
if not recurse:
return res
bindings = res.get('bindings')
if bindings:
res['bindings'] = [b.as_dict(recurse=True) for b in bindings]
return res
| |
from constant import *
from clientserver import *
from decimal import *
import re
# phase number (key) : [phase name, phase specific prompt message]
# used to generate phase specific messages
PHASE_DICTIONARY = {
PHASE_NUM.KEYWORD_CAMPAIGN: ['keyword campaign', 'Please select the number corresponding ' +
'to the desired keyword campaign being associated with your campaign.'],
PHASE_NUM.NAME: ['name',
'Please input the name that will be associated with your dynamic ' +
'search ad campaign estimate.'],
PHASE_NUM.START_DATE: ['start date',
'Please input the date that your campaign estimate starts. <b>Must ' +
'be in the form mm-dd-yyyy.</b>'],
PHASE_NUM.END_DATE: ['end date',
'Please input the date that your campaign estimate will end. <b>Must ' +
'be in the form mm-dd-yyyy.</b>'],
PHASE_NUM.DAILY_BUDGET: ['daily budget', 'Please input the amount you are willing ' +
'to spend on the campaign each day.'],
PHASE_NUM.LOCATIONS: ['locations', 'Please enter a comma separated list of state ' +
'codes that your ad will target. Must be in the form of a valid US ' +
'State abbreviation (Ex: \"CA, TX, MN\") (Must follow this form!). ' +
'Enter \"USA\" if you would like to target the entire US.'],
PHASE_NUM.NEG_LOCATIONS: ['negative locations', 'Are there any locations that your ' +
'ad will not target? Ex: CA, MN, PA (Must follow this format!' +
'). Input N/A if not applicable.'],
PHASE_NUM.DOMAIN: ['domain', 'What is the domain of the webpage being advertised? ' +
'(Ex: https://www.google.com/)'],
PHASE_NUM.TARGETS: ['targets', 'What are target pages of your domain ' +
'that you would like to specifically advertise? Multiple ' +
'target pages can be input as a comma separated list. ' +
'(Ex: https://www.google.com/page1/, https://www.google.com/page2/)'],
PHASE_NUM.MANUAL_CPC: ['cost per click', 'What is the desired cost per click? ' +
'The amount specified here will be how much you pay everytime an ' +
'ad is clicked.'],
PHASE_NUM.AD_TEXT: ['ad text', 'What text should appear on your ad.']
}
# [Error Handling]
def error_handler(event, phase_num):
"""Returns a True if event is valid or
string specifying specific error if
event is invalid.
Args:
event: the dictionary event being checked
phase_num: the phase number used to check validity of message
Returns:
True
if no error is found
Str
specifes the specific error if an error is raised
"""
# error map (PHASE_DICTIONARY key) : error handling reference
ERROR_DICTIONARY = {
PHASE_NUM.NAME: '', # name cannot be an empty string or length 0
PHASE_NUM.START_DATE: 'mm-dd-yyyy', # start date must be in the form mm-dd-yyyy
PHASE_NUM.END_DATE: 'mm-dd-yyyy', # end date must be in the form mm-dd-yyyy
PHASE_NUM.DAILY_BUDGET: 0.01, # daily budget must be at least 1 cent.
# 6/7 will be validated using python URL validator
PHASE_NUM.MANUAL_CPC: 0.01, # manual CPC must be at least 1 cent.
PHASE_NUM.AD_TEXT: '',
PHASE_NUM.LOCATIONS: ''
}
# allows each success message to be changed at once, if necessary
success = True
message = event['message']['text']
if phase_num == PHASE_NUM.VIEWING_KEYWORD_CAMPAIGNS:
campaigns = get_keyword_campaigns(event['user']['email'])
if (not message.isdigit()):
return error_message('Selection is not a valid number! Please input a number indicating what campaign you would like to view.', INVALID_INPUT)
elif (int(message) < 1 or int(message) > len(campaigns)):
return error_message('Selection is out of bounds!', INVALID_INPUT)
return success
# phase 0: name
if phase_num == PHASE_NUM.KEYWORD_CAMPAIGN:
kc_campaigns = get_keyword_campaigns(None)
try:
selection = int(message)
if (selection < 1 or selection > len(kc_campaigns)):
return 'Selection is out of bounds! Please enter a valid value.'
else:
return success
except ValueError:
return 'Selection is not a numeric value! Please enter a valid number corresponding to the keyword campaign of interest.'
elif phase_num == PHASE_NUM.NAME:
if (len(message) > 0 and message != ERROR_DICTIONARY[PHASE_NUM.NAME]):
user_key = get_campaign_key(event['user']['email'], message)
if (get_campaign_data(user_key) == None):
return success
else:
return 'The campaign name, \"{}\", already exists!'.format(message)
else:
return 'Name is not valid!'
# phase 1: start date, phase 2: end date
elif phase_num == PHASE_NUM.START_DATE or phase_num == PHASE_NUM.END_DATE:
date_parameters = message.split('-')
# Expected date_parameters [mm, dd, yyyy]
if len(date_parameters) == 3:
if (int(date_parameters[0]) >= JANUARY) and (int(date_parameters[0]) <= DECEMBER):
if ((int(date_parameters[1]) > 0) and (int(date_parameters[1]) <= LAST_DAY_MONTH - 1)) or ((int(date_parameters[0]) % 2 == 0) and (int(date_parameters[1]) == LAST_DAY_MONTH)):
if ((int(date_parameters[2]) >= 2000) and (int(date_parameters[2]) <= 2099)):
return success
else:
return 'Year can only be between 2000 and 2099!'
else:
return 'Day must be between 1 and 31! If you entered 31 as a day, make sure the month has 31 days!'
else:
return 'Month must be between 1 and 12!'
return 'Date is not in the right format! Must be in the form mm-dd-yyyy!'
# phase 3: daily budget, phase 8: manual CPC
elif phase_num == PHASE_NUM.DAILY_BUDGET or phase_num == PHASE_NUM.MANUAL_CPC:
attribute_name = 'Daily budget' if phase_num == PHASE_NUM.DAILY_BUDGET else 'Manual CPC (Cost Per Click)'
message = message[0:].strip()
try:
float(message)
if (Decimal(message) >= ERROR_DICTIONARY[PHASE_NUM.DAILY_BUDGET]):
return success
else:
return '{} must be at least $0.01!'.format(attribute_name)
except:
return '{} must be a valid decimal value! Ex: 34.00'.format(attribute_name)
# phase 4: locations, phase 5: negative locations
elif phase_num == PHASE_NUM.LOCATIONS or phase_num == PHASE_NUM.NEG_LOCATIONS:
if (phase_num == PHASE_NUM.NEG_LOCATIONS and message.lower() == 'n/a'):
return success
locations = message.split(',')
for idx in range(len(locations)):
# trim white space
locations[idx] = locations[idx].strip()
if phase_num == PHASE_NUM.NEG_LOCATIONS:
campaign_name = get_user_data(get_user_key(event['user']['email'])).campaign_name
campaign_key = get_campaign_key(event['user']['email'], campaign_name)
location_data = get_campaign_data(campaign_key).locations.split(',')
# Remove whitespace from all location entries
map(str.strip, location_data)
if locations[idx] in location_data:
return '{} is already a targetted location, it cannot be excluded.'.format(locations[idx])
elif (locations[idx] == 'USA'):
return 'You cannot add USA as a negative location!'
# user entered USA as location, other locations are ignored
if (locations[idx] == 'USA' and phase_num != 5):
return success
regex = re.compile('[A-Za-z][A-Za-z]')
if not re.match(regex, locations[idx]):
attribute = 'Locations' if phase_num == 4 else 'Negative locations'
return attribute + ' must be a two letter code! Ex: CA, TX, MN, PA'
if (len(locations) == 0):
return 'Locations must be a comma separated list of state codes. Ex: CA, TX, MN, PA'
return success
# phase 6: domain, phase 7: target page(s)
elif phase_num == PHASE_NUM.DOMAIN or phase_num == PHASE_NUM.TARGETS:
# regex string being used to validate the URL
url_regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
# format response string based on what the phase number is
attribute_name = 'domain' if phase_num == 6 else 'target pages'
failed = 'URL is not valid, please ensure that you specified the correct {}!'.format(attribute_name)
# TARGETS can be a comma separated list
if (phase_num == PHASE_NUM.TARGETS):
domains = message.split(',')
for idx in range(len(domains)):
# trim white space
domains[idx] = domains[idx].strip()
if not re.match(url_regex, domains[idx]):
return failed
return success
return success if re.match(url_regex, message) else failed
elif phase_num == PHASE_NUM.AD_TEXT:
if (len(message) > 0 and message != ERROR_DICTIONARY[PHASE_NUM.NAME]):
return success
else:
return 'Ad text is not valid! Cannot be an empty value!'
def create_keyword_campaign_list(user_id, editing, selecting_keyword_campaign):
keyword_campaign_list = ''
userid = user_id if selecting_keyword_campaign else None
keyword_json = get_keyword_campaigns(userid)
for i in range(len(keyword_json)):
keyword_campaign_list = keyword_campaign_list + '<b>{}.</b> {}<br>'.format((i + 1), keyword_json[i]['name'])
message = "Please send a number corresponding to the desired keyword campaign.<br>{}".format(keyword_campaign_list)
if len(keyword_json) == 0:
message = "There are no active keyword campaigns, please create a campaign when a valid keyword campaign exists."
return {
"actionResponse": {
"type": "UPDATE_MESSAGE" if not editing else "NEW_MESSAGE"
},
"cards": [
{
"header": build_header('Editing'),
"sections": [
{
"widgets": [
{
"textParagraph": {
"text": message
}
}
]
},
{
"widgets": [
{
"buttons": [
{
"textButton": {
"text": "QUIT",
"onClick": {
"action": {
"actionMethodName": "quit_campaign",
}
}
}
}
]
}
]
}
]
}
]
}
def create_campaign_list(user_id, keyword_campaign_id, editing):
"""Shows list of campaigns belonging to current user
Args:
user_id:
user id being used to get campaign list
keyword_campaign_id:
keyword campaign id used to get campaign list
editing:
determines if message is used for editing or for submitting
Returns:
dict
dictionary contains start campaign config message
"""
campaigns = get_dsa_campaigns(user_id, keyword_campaign_id)
if len(campaigns) == 0:
return error_message('There are no DSA Campaigns associated with this keyword campaign.', INVALID_INPUT)
campaign_list = ''
for i in range(len(campaigns)):
campaign_list = campaign_list + '<b>{}.</b> {}<br>'.format(i + 1, campaigns[i].name)
return {
"actionResponse": {
"type": "NEW_MESSAGE" if not editing else "UPDATE_MESSAGE"
},
"cards": [
{
"header": build_header('Viewing'),
"sections": [
{
"widgets": [
{
"textParagraph": {
"text": "Please send a number corresponding to the campaign you would like to view.<br>" + campaign_list
}
}
]
},
{
"widgets": [
{
"buttons": [
{
"textButton": {
"text": "BACK",
"onClick": {
"action": {
"actionMethodName": "back_submission" if editing else "quit_campaign"
}
}
}
}
]
}
]
}
]
}
]
}
def create_setting_list():
"""Formats the introduction message response
Args:
None
Returns:
dict
dictionary contains start campaign config message
"""
campaign_list = ''
for i in range(PHASE_NUM.SUBMISSION):
campaign_list = campaign_list + '<b>{}.</b> {}<br>'.format(i + 1, PHASE_DICTIONARY.get(i)[PHASE_NAME_INDEX].capitalize())
return {
"actionResponse": {
"type": "UPDATE_MESSAGE"
},
"cards": [
{
"header": build_header('Editing'),
"sections": [
{
"widgets": [
{
"textParagraph": {
"text": "Please send a number corresponding to the setting " +
"you would like to edit.<br>" + campaign_list
}
}
]
},
{
"widgets": [
{
"buttons": [
{
"textButton": {
"text": "QUIT",
"onClick": {
"action": {
"actionMethodName": "quit_campaign",
}
}
}
},
{
"textButton": {
"text": "BACK",
"onClick": {
"action": {
"actionMethodName": "back_submission",
}
}
}
},
]
}
]
}
]
}
]
}
# [Message Generating Functions]
def error_message(error_msg, phase_num):
"""Formats an error message response for invalid input
Args:
error_msg:
the specifc error message being displayed
phase_num:
the phase number that raised the error
Returns:
dict
dictionary contains error response message
"""
error = "<font color=\"#ff0000\">ERROR</font>: {}<br><b>Please send a valid value for your campaign {}!</b>".format(error_msg, PHASE_DICTIONARY.get(phase_num)[PHASE_NAME_INDEX]) if phase_num != PHASE_NUM.INACTIVE else "<font color=\"#ff0000\">ERROR</font>: <b>{}</b>".format(error_msg)
return {
"cards": [
{
"header": build_header('Alert'),
"sections": [
{
"widgets": [
{
"textParagraph": {
"text": error
}
}
]
},
{
"widgets": [
{
"buttons": [
{
"textButton": {
"text": "QUIT",
"onClick": {
"action": {
"actionMethodName": "quit_campaign"
}
}
}
}
]
}
]
}
]
}
]
}
def create_campaign_overview(campaign_data, submission):
"""Returns a campaign overview for users to view,
Buttons on message vary based on submission boolean
Args:
campaign_data:
CampaignData used to populate campaign data
submission:
boolean representing if overview is for submission
Returns:
dict
dictionary containing overview message
"""
not_set = 'None'
return {
"actionResponse": {
"type": "UPDATE_MESSAGE" if submission else "NEW_MESSAGE"
},
"cards": [
{
"header": build_header('Editing'),
"sections": [
{
"widgets": [
{
"keyValue": {
"topLabel": "Keyword Campaign ID",
"content": campaign_data.keyword_campaign_id if campaign_data.keyword_campaign_id != '' else not_set,
"icon": "STAR"
}
},
{
"keyValue": {
"topLabel": "Campaign Name",
"content": campaign_data.name if campaign_data.name != '' else not_set,
"icon": "STAR"
}
},
{
"keyValue": {
"topLabel": "Start Date",
"content": campaign_data.start_date if campaign_data.start_date != '' else not_set,
"icon": "INVITE"
}
},
{
"keyValue": {
"topLabel": "End Date",
"content": campaign_data.end_date if campaign_data.end_date != '' else not_set,
"icon": "INVITE"
}
},
{
"keyValue": {
"topLabel": "Daily Budget",
"content": '${}'.format(campaign_data.daily_budget) if campaign_data.daily_budget != 0.0 else not_set,
"icon": "DOLLAR"
}
},
{
"keyValue": {
"topLabel": "Cost Per Click",
"content": '${}'.format(campaign_data.manual_CPC) if campaign_data.manual_CPC != 0.0 else not_set,
"icon": "DOLLAR"
}
},
{
"keyValue": {
"topLabel": "Domain",
"content": campaign_data.domain if campaign_data.domain != '' else not_set,
"icon": "BOOKMARK"
}
},
{
"keyValue": {
"topLabel": "Target Pages",
"content": campaign_data.targets if campaign_data.targets != '' else not_set,
"icon": "BOOKMARK"
}
},
{
"keyValue": {
"topLabel": "Locations",
"content": campaign_data.locations if campaign_data.locations != '' else not_set,
"icon": "MAP_PIN"
}
},
{
"keyValue": {
"topLabel": "Negative Locations",
"content": campaign_data.neg_locations if campaign_data.neg_locations != '' else not_set,
"icon": "MAP_PIN"
}
},
{
"keyValue": {
"topLabel": "Ad Text",
"content": campaign_data.ad_text if campaign_data.ad_text != '' else not_set,
"icon": "DESCRIPTION"
}
}
]
},
{
"widgets": [
{
"buttons": add_overview_buttons(campaign_data, submission)
}
]
}
]
}
]
}
def add_overview_buttons(campaign_data, submission):
"""Returns list of submission specific buttons (Submit, Edit, Quit).
If not submission, then different list of buttons are
returned (Edit (not same as submit), Back)
Args:
campaign_data:
CampaignData used to populate button values
submission:
boolean representing if overview is for submission
Returns:
list
list of dictionaries containing buttons
"""
button_list = []
if not submission:
button_list.append(
{
"textButton": {
"text": "EDIT",
"onClick": {
"action": {
"actionMethodName": "confirm_edit",
"parameters": [
{
"key": "campaign_name",
"value": campaign_data.name
}
]
}
}
}
}
)
button_list.append(
{
"textButton": {
"text": "BACK",
"onClick": {
"action": {
"actionMethodName": "back_action"
}
}
}
}
)
else:
button_list.append(
{
"textButton": {
"text": "SUBMIT",
"onClick": {
"action": {
"actionMethodName": "submit"
}
}
}
}
)
button_list.append(
{
"textButton": {
"text": "QUIT",
"onClick": {
"action": {
"actionMethodName": "quit_campaign"
}
}
}
}
)
button_list.append(
{
"textButton": {
"text": "EDIT SETTINGS",
"onClick": {
"action": {
"actionMethodName": "edit_campaign_settings"
}
}
}
}
)
return button_list
def create_confirmation_message(message, phase_num, editing):
"""Formats a confirmation message response for valid input
Args:
message:
the specifc message that has passed error
checking and is ready to be user confirmed
phase_num:
the phase number that will be used
to confirm the input value
Returns:
dict
dictionary contains confirmation prompt
"""
message_value = message
if (phase_num == PHASE_NUM.KEYWORD_CAMPAIGN):
message_value = get_keyword_campaigns(None)[int(message) - 1]['name']
# store the index of the keyword campaign being chosen
message = int(message) - 1
return {
"cards": [
{
"header": build_header('Editing'),
"sections": [
{
"widgets": [
{
"textParagraph": {
"text": "You picked <b>\"{}\"</b> for your campaign {}, is this correct?".format(message_value, PHASE_DICTIONARY.get(phase_num)[PHASE_NAME_INDEX])
}
}
]
},
{
"widgets": [
{
"buttons": [
{
"textButton": {
"text": "NO",
"onClick": {
"action": {
"actionMethodName": "no_action",
"parameters": [
{
"key": "phase_num",
"value": phase_num
},
{
"key": "editing",
"value": editing
}
]
}
}
}
},
{
"textButton": {
"text": "YES",
"onClick": {
"action": {
"actionMethodName": "yes_action",
"parameters": [
{
"key": "input_data",
"value": message
},
{
"key": "editing",
"value": editing
}
]
}
}
}
}
]
}
]
}
]
}
]
}
def start_campaign_edit(event):
"""Formats the introduction message response
Args:
None
Returns:
dict
dictionary contains start campaign config message
"""
campaign_list = ''
campaigns = get_user_campaigns(event['user']['email'])
for i in range(len(campaigns)):
campaign = convert_entity_to_campaign(campaigns[i])
campaign_list = campaign_list + '<b>{}.</b> {}<br>'.format(i + 1, campaign.name)
return {
"actionResponse": {
"type": "UPDATE_MESSAGE"
},
"cards": [
{
"header": build_header('Editing'),
"sections": [
{
"widgets": [
{
"textParagraph": {
"text": "Please send a number corresponding to the campaign " +
"you would like to edit.<br>" + campaign_list
}
}
]
},
{
"widgets": [
{
"buttons": [
{
"textButton": {
"text": "QUIT",
"onClick": {
"action": {
"actionMethodName": "quit_campaign",
}
}
}
}
]
}
]
}
]
}
]
}
def start_user_campaign(event):
"""Formats the introduction message response
Args:
None
Returns:
dict
dictionary contains start campaign config message
"""
return {
"actionResponse": {
"type": "UPDATE_MESSAGE"
},
"cards": [
{
"header": build_header('Editing'),
"sections": [
{
"widgets": [
{
"textParagraph": {
"text": "You will now start configuring your " +
"DSA Campaign! If you would like to " +
"save and quit, you may click <b>\"QUIT\" " +
"</b>below at any point in the " +
"configuration process. Additionally, " +
"the process of entering data is simple. " +
"<br><b>1.</b> I will prompt you for a specific " +
"parameter - you must enter in a valid " +
"entry before moving on. <br><b>2.</b> Next, I " +
"will ask for your confirmation. " +
"<br><b>3.</b> You will be able to review and edit your " +
"settings before final submission. " +
"<br>To begin, please click <b>CONTINUE</b>."
}
}
]
},
{
"widgets": [
{
"buttons": [
{
"textButton": {
"text": "CONTINUE",
"onClick": {
"action": {
"actionMethodName": "continue_campaign",
}
}
}
}
]
}
]
}
]
}
]
}
def create_submission_message(event):
"""Formats the introduction message response
Args:
None
Returns:
dict
dictionary contains start campaign config message
"""
return {
"actionResponse": {
"type": "UPDATE_MESSAGE"
},
"cards": [
{
"header": build_header('Editing'),
"sections": [
{
"widgets": [
{
"textParagraph": {
"text": "Congratulations on submitting your campaign estimate, {}! ".format(event['user']['displayName']) +
"Your Dynamic Search Ad Campaign " +
"estimate will be available shortly!"
}
}
]
},
{
"widgets": [
{
"buttons": [
{
"textButton": {
"text": "QUIT",
"onClick": {
"action": {
"actionMethodName": "quit_campaign",
}
}
}
},
{
"textButton": {
"text": "VISIT WEBSITE",
"onClick": {
"openLink": {
"url": "https://dsa-uplift-estimation-2020.uc.r.appspot.com/"
}
}
}
}
]
}
]
}
]
}
]
}
def create_configure_message(user_id, phase_num, editing):
"""Determine what setting message to provide based on
phase number.
Args:
setting: the name of the setting to be input.
campaignName: the name of the campaign being configured
Returns:
dictionary containing configuration prompt message for
specified phase number
"""
if phase_num == PHASE_NUM.KEYWORD_CAMPAIGN:
return create_keyword_campaign_list(user_id, editing, False)
configure_str = '{}'.format(PHASE_DICTIONARY.get(phase_num)[PROMPT_MSG_INDEX])
return {
"actionResponse": {
"type": "UPDATE_MESSAGE" if not editing else "NEW_MESSAGE"
},
"cards": [
{
"header": build_header('Editing'),
"sections": [
{
"widgets": [
{
"textParagraph": {
"text": configure_str
}
}
]
},
{
"widgets": [
{
"buttons": add_configure_buttons(editing)
}
]
}
]
}
]
}
def add_configure_buttons(editing):
"""Returns a list of buttons to be used by
create_configure_message, adds additional
'BACK' button if user is editing a submission
Args:
editing: boolean for if user is editing
Returns:
list
containing the buttons to be used by the
create_configure_message function
"""
button_list = [
{
"textButton": {
"text": "QUIT",
"onClick": {
"action": {
"actionMethodName": "quit_campaign",
}
}
}
}
]
if editing:
button_list.append(
{
"textButton": {
"text": "BACK",
"onClick": {
"action": {
"actionMethodName": "back_submission",
}
}
}
}
)
return button_list
def create_join_message(event):
"""Create a join message with content based on event data
Args:
event: A dictionary with the event data.
Returns:
dict
dictionary containing join response message
"""
return {
"cards": [
{
"header": build_header('Standby'),
"sections": [
{
"widgets": [
{
"textParagraph": {
"text": "Thanks for adding the Dynamic Search Ads Configuration Bot, {}! To begin, please choose what you would like to do below.".format(event['user']['displayName'])
}
},
]
},
{
"widgets": [
{
"buttons": add_edit_button(event['user']['email'])
}
]
}
]
}
]
}
def create_home_message(event):
"""Create a home message with content based on event data.
User is prompted with this when configuration process is
cancelled.
Args:
event: A dictionary with the event data.
Returns:
dict
dictionary containing home response message
"""
return {
"actionResponse": {
"type": "UPDATE_MESSAGE"
},
"cards": [
{
"header": build_header('Standby'),
"sections": [
{
"widgets": [
{
"textParagraph": {
"text": "Please choose what you would like to do below."
}
},
]
},
{
"widgets": [
{
"buttons": add_edit_button(event['user']['email'])
}
]
}
]
}
]
}
def add_edit_button(user_id):
"""Determines if home page requires an edit button
Args:
user_id:
user_id being used to request home page
Returns:
list
list of dictionaries containing buttons
"""
button_list = [
{
"textButton": {
"text": "START NEW CAMPAIGN",
"onClick": {
"action": {
"actionMethodName": "start_campaign",
}
}
}
}
]
if (len(get_user_campaigns(user_id)) != 0):
button_list.append(
{
"textButton": {
"text": "EDIT EXISTING CAMPAIGN",
"onClick": {
"action": {
"actionMethodName": "edit_campaign",
}
}
}
}
)
keyword_json = get_keyword_campaigns(user_id)
if (len(keyword_json) != 0):
button_list.append(
{
"textButton": {
"text": "VIEW CAMPAIGNS",
"onClick": {
"action": {
"actionMethodName": "view_campaigns",
}
}
}
}
)
return button_list
def get_campaign_overview(campaign_data, viewing):
"""Returns a campaign overview for users to view,
Buttons on message vary based on submission boolean
Args:
campaign_data:
CampaignData used to populate campaign data
viewing:
bool representing if user is viewing a campaign from campaign list prompt, (determines if this updates previous message)
Returns:
dict
dictionary containing overview message
"""
not_set = 'None'
return {
"actionResponse": {
"type": "UPDATE_MESSAGE" if not viewing else "NEW_MESSAGE"
},
"cards": [
{
"header": build_header(campaign_data.status.capitalize()),
"sections": [
{
"widgets": [
{
"keyValue": {
"topLabel": "Keyword Campaign ID",
"content": campaign_data.keyword_campaign_id if campaign_data.keyword_campaign_id != '' else not_set,
"icon": "STAR"
}
},
{
"keyValue": {
"topLabel": "Campaign Name",
"content": campaign_data.name if campaign_data.name != '' else not_set,
"icon": "STAR"
}
},
{
"keyValue": {
"topLabel": "Start Date",
"content": campaign_data.start_date if campaign_data.start_date != '' else not_set,
"icon": "INVITE"
}
},
{
"keyValue": {
"topLabel": "End Date",
"content": campaign_data.end_date if campaign_data.end_date != '' else not_set,
"icon": "INVITE"
}
},
{
"keyValue": {
"topLabel": "Daily Budget",
"content": '${}'.format(campaign_data.daily_budget) if campaign_data.daily_budget != 0.0 else not_set,
"icon": "DOLLAR"
}
},
{
"keyValue": {
"topLabel": "Cost Per Click",
"content": '${}'.format(campaign_data.manual_CPC) if campaign_data.manual_CPC != 0.0 else not_set,
"icon": "DOLLAR"
}
},
{
"keyValue": {
"topLabel": "Domain",
"content": campaign_data.domain if campaign_data.domain != '' else not_set,
"icon": "BOOKMARK"
}
},
{
"keyValue": {
"topLabel": "Target Pages",
"content": campaign_data.targets if campaign_data.targets != '' else not_set,
"icon": "BOOKMARK"
}
},
{
"keyValue": {
"topLabel": "Locations",
"content": campaign_data.locations if campaign_data.locations != '' else not_set,
"icon": "MAP_PIN"
}
},
{
"keyValue": {
"topLabel": "Negative Locations",
"content": campaign_data.neg_locations if campaign_data.neg_locations != '' else not_set,
"icon": "MAP_PIN"
}
},
{
"keyValue": {
"topLabel": "Ad Text",
"content": campaign_data.ad_text if campaign_data.ad_text != '' else not_set,
"icon": "DESCRIPTION"
}
}
]
},
{
"widgets": [
{
"keyValue": {
"topLabel": "Impressions",
"content": campaign_data.impressions if campaign_data.impressions != '' else not_set,
"icon": "PERSON"
}
},
{
"keyValue": {
"topLabel": "Clicks",
"content": campaign_data.clicks if campaign_data.clicks != '' else not_set,
"icon": "CONFIRMATION_NUMBER_ICON"
}
},
{
"keyValue": {
"topLabel": "Cost",
"content": '${}'.format(campaign_data.cost) if campaign_data.cost != '' else not_set,
"icon": "DOLLAR"
}
}
]
},
{
"widgets": [
{
"buttons": [
{
"textButton": {
"text": "BACK",
"onClick": {
"action": {
"actionMethodName": "quit_campaign"
}
}
}
},
{
"textButton": {
"text": "DELETE CAMPAIGN",
"onClick": {
"action": {
"actionMethodName": "delete_campaign",
"parameters": [
{
"key": "campaign_id",
"value": campaign_data.campaign_id
},
{
"key": "keyword_campaign_id",
"value": campaign_data.keyword_campaign_id
}
]
}
}
}
}
]
}
]
}
]
}
]
}
def create_campaign_deletion_confirmation(error):
message = 'Campaign successfully deleted.'
if error:
message = 'Campaign cannot be deleted right now, please try again.'
return {
"actionResponse": {
"type": "UPDATE_MESSAGE"
},
"cards": [
{
"header": build_header('Alert'),
"sections": [
{
"widgets": [
{
"textParagraph": {
"text": message
}
}
]
},
{
"widgets": [
{
"buttons": [
{
"textButton": {
"text": "QUIT",
"onClick": {
"action": {
"actionMethodName": "quit_campaign",
}
}
}
}
]
}
]
}
]
}
]
}
def build_header(subtitle):
"""Create a header dictionary to be used by every message
Args:
subtitle: User defined subtitle appearing on the messages
Returns:
dict
dictionary containing the header information for a message
"""
header_dict = {
'title': 'Dynamic Search Ads Configuration Bot',
'subtitle': subtitle,
'imageStyle': "AVATAR",
'imageUrl': "https://9to5google.com/2018/05/05/exclusive-new-google-app-icons-i-o-2018-gallery/ads_512dp/"
}
return header_dict
def confirm_campaign_delete(user_id, campaign_id, keyword_campaign_id):
campaigns = get_dsa_campaigns(user_id, keyword_campaign_id)
campaign_to_delete = None
for campaign in campaigns:
if campaign.campaign_id == campaign_id:
campaign_to_delete = campaign
break
return {
"cards": [
{
"header": build_header('WARNING'),
"sections": [
{
"widgets": [
{
"textParagraph": {
"text": "<font color=\"#ff0000\">ALERT:</font> Are you sure you would like to delete the campaign, <b>{}</b>?".format(campaign_to_delete.name)
}
}
]
},
{
"widgets": [
{
"buttons": [
{
"textButton": {
"text": "NO",
"onClick": {
"action": {
"actionMethodName": "no_action",
"parameters": [
{
"key": "campaign_id",
"value": campaign_id
}
]
}
}
}
},
{
"textButton": {
"text": "YES",
"onClick": {
"action": {
"actionMethodName": "yes_action",
"parameters": [
{
"key": "campaign_id",
"value": campaign_id
}
]
}
}
}
}
]
}
]
}
]
}
]
}
| |
# -*- coding: utf-8 -*-
import httplib as http
import importlib
import pkgutil
import pytest
from pytz import utc
from datetime import datetime
import urllib
from nose.tools import * # noqa:
import re
from tests.base import ApiTestCase, DbTestCase
from osf_tests import factories
from tests.utils import make_drf_request_with_version
from api.base.settings.defaults import API_BASE
from api.base.serializers import JSONAPISerializer, BaseAPISerializer
from api.base import serializers as base_serializers
from api.nodes.serializers import NodeSerializer, RelationshipField
from api.waffle.serializers import WaffleSerializer, BaseWaffleSerializer
from api.registrations.serializers import RegistrationSerializer
SER_MODULES = []
for loader, name, _ in pkgutil.iter_modules(['api']):
if name != 'base' and name != 'test':
try:
SER_MODULES.append(
importlib.import_module(
'api.{}.serializers'.format(name)
)
)
except ImportError:
pass
SER_CLASSES = []
for mod in SER_MODULES:
for name, val in mod.__dict__.items():
try:
if issubclass(val, BaseAPISerializer):
if 'JSONAPI' in name or 'BaseAPI' in name:
continue
SER_CLASSES.append(val)
except TypeError:
pass
class FakeModel(object):
def null_field(self):
return None
def valued_field(self):
return 'Some'
null = None
foo = 'bar'
pk = '1234'
class FakeSerializer(base_serializers.JSONAPISerializer):
class Meta:
type_ = 'foos'
links = base_serializers.LinksField({
'null_field': 'null_field',
'valued_field': 'valued_field',
})
null_link_field = base_serializers.RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<null>'},
)
valued_link_field = base_serializers.RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<foo>'},
)
def null_field(*args, **kwargs):
return None
def valued_field(*args, **kwargs):
return 'http://foo.com'
class TestSerializerMetaType(ApiTestCase):
def test_expected_serializers_have_meta_types(self):
for ser in SER_CLASSES:
assert hasattr(
ser, 'Meta'
), 'Serializer {} has no Meta'.format(ser)
assert hasattr(
ser.Meta, 'type_'
) or hasattr(
ser.Meta, 'get_type'
), 'Serializer {} has no Meta.type_ or Meta.get_type()'.format(ser)
class TestNodeSerializerAndRegistrationSerializerDifferences(ApiTestCase):
"""
All fields on the Node Serializer other than the few we can serialize for withdrawals must be redeclared on the
Registration Serializer and wrapped in HideIfWithdrawal
HideIfRegistration fields should not be serialized on registrations.
"""
def setUp(self):
super(TestNodeSerializerAndRegistrationSerializerDifferences, self).setUp()
self.node = factories.ProjectFactory(is_public=True)
self.registration = factories.RegistrationFactory(
project=self.node, is_public=True)
self.url = '/{}nodes/{}/'.format(API_BASE, self.node._id)
self.reg_url = '/{}registrations/{}/'.format(
API_BASE, self.registration._id)
def test_registration_serializer(self):
# fields that are visible for withdrawals
visible_on_withdrawals = [
'contributors',
'bibliographic_contributors',
'implicit_contributors',
'date_created',
'date_modified',
'description',
'id',
'links',
'registration',
'article_doi',
'title',
'type',
'category',
'root',
'parent',
'affiliated_institutions',
'identifiers',
'current_user_can_comment',
'current_user_is_contributor',
'preprint',
'subjects',
'wiki_enabled']
# fields that do not appear on registrations
non_registration_fields = ['registrations', 'draft_registrations', 'templated_by_count', 'settings', 'children']
for field in NodeSerializer._declared_fields:
assert_in(field, RegistrationSerializer._declared_fields)
reg_field = RegistrationSerializer._declared_fields[field]
if field not in visible_on_withdrawals and field not in non_registration_fields:
assert_true(
isinstance(reg_field, base_serializers.HideIfWithdrawal) or
isinstance(reg_field, base_serializers.ShowIfVersion) or
isinstance(reg_field, base_serializers.ShowIfAdminScopeOrAnonymous)
)
def test_hide_if_registration_fields(self):
node_res = self.app.get(self.url)
node_relationships = node_res.json['data']['relationships']
registration_res = self.app.get(self.reg_url)
registration_relationships = registration_res.json['data']['relationships']
hide_if_registration_fields = [
field for field in NodeSerializer._declared_fields if isinstance(
NodeSerializer._declared_fields[field],
base_serializers.HideIfRegistration)]
for field in hide_if_registration_fields:
assert_in(field, node_relationships)
assert_not_in(field, registration_relationships)
class TestNullLinks(ApiTestCase):
def test_null_links_are_omitted(self):
req = make_drf_request_with_version(version='2.0')
rep = FakeSerializer(FakeModel, context={'request': req}).data['data']
assert_not_in('null_field', rep['links'])
assert_in('valued_field', rep['links'])
assert_not_in('null_link_field', rep['relationships'])
class TestApiBaseSerializers(ApiTestCase):
def setUp(self):
super(TestApiBaseSerializers, self).setUp()
self.user = factories.AuthUserFactory()
self.auth = factories.Auth(self.user)
self.node = factories.ProjectFactory(is_public=True)
for i in range(5):
factories.ProjectFactory(is_public=True, parent=self.node)
self.linked_node = factories.NodeFactory(
creator=self.user, is_public=True)
self.node.add_pointer(self.linked_node, auth=self.auth)
self.url = '/{}nodes/{}/'.format(API_BASE, self.node._id)
def test_serializers_have_get_absolute_url_method(self):
serializers = JSONAPISerializer.__subclasses__()
base_get_absolute_url = JSONAPISerializer.get_absolute_url
for serializer in serializers:
# Waffle endpoints are nonstandard
if serializer == WaffleSerializer or serializer == BaseWaffleSerializer:
continue
if not re.match('^(api_test|test).*', serializer.__module__):
assert hasattr(
serializer, 'get_absolute_url'
), 'No get_absolute_url method'
assert_not_equal(
serializer.get_absolute_url,
base_get_absolute_url
)
def test_counts_not_included_in_link_fields_by_default(self):
res = self.app.get(self.url)
relationships = res.json['data']['relationships']
for relation in relationships.values():
if relation == {'data': None}:
continue
if isinstance(relation, list):
for item in relation:
link = item['links'].values()[0]
link_meta = link.get('meta', {})
assert_not_in('count', link_meta)
else:
link = relation['links'].values()[0]
link_meta = link.get('meta', {})
assert_not_in('count', link_meta)
def test_counts_included_in_link_fields_with_related_counts_query_param(
self):
res = self.app.get(self.url, params={'related_counts': True})
relationships = res.json['data']['relationships']
for key, relation in relationships.items():
if relation == {}:
continue
field = NodeSerializer._declared_fields[key]
if getattr(field, 'field', None):
field = field.field
related_meta = getattr(field, 'related_meta', {})
if related_meta and related_meta.get('count', False):
link = relation['links'].values()[0]
assert_in('count', link['meta'], field)
def test_related_counts_excluded_query_param_false(self):
res = self.app.get(self.url, params={'related_counts': False})
relationships = res.json['data']['relationships']
for relation in relationships.values():
if relation == {'data': None}:
continue
if isinstance(relation, list):
for item in relation:
link = item['links'].values()[0]
link_meta = link.get('meta', {})
assert_not_in('count', link_meta)
else:
link = relation['links'].values()[0]
link_meta = link.get('meta', {})
assert_not_in('count', link_meta)
def test_invalid_related_counts_value_raises_bad_request(self):
res = self.app.get(
self.url,
params={'related_counts': 'fish'},
expect_errors=True
)
assert_equal(res.status_code, http.BAD_REQUEST)
def test_invalid_embed_value_raise_bad_request(self):
res = self.app.get(
self.url,
params={'embed': 'foo'},
expect_errors=True
)
assert_equal(res.status_code, http.BAD_REQUEST)
assert_equal(
res.json['errors'][0]['detail'],
'The following fields are not embeddable: foo'
)
def test_embed_does_not_remove_relationship(self):
res = self.app.get(self.url, params={'embed': 'root'})
assert_equal(res.status_code, 200)
assert_in(
self.url,
res.json['data']['relationships']['root']['links']['related']['href']
)
def test_counts_included_in_children_field_with_children_related_counts_query_param(
self):
res = self.app.get(self.url, params={'related_counts': 'children'})
relationships = res.json['data']['relationships']
for key, relation in relationships.items():
if relation == {}:
continue
field = NodeSerializer._declared_fields[key]
if getattr(field, 'field', None):
field = field.field
if isinstance(relation, list):
for item in relation:
link = item['links'].values()[0]
related_meta = getattr(field, 'related_meta', {})
if related_meta and related_meta.get('count', False) and key == 'children':
assert_in('count', link['meta'])
else:
assert_not_in('count', link.get('meta', {}))
elif relation != {'data': None}:
link = relation['links'].values()[0]
related_meta = getattr(field, 'related_meta', {})
if related_meta and related_meta.get('count', False) and key == 'children':
assert_in('count', link['meta'])
else:
assert_not_in('count', link.get('meta', {}))
def test_counts_included_in_children_and_contributors_fields_with_field_csv_related_counts_query_param(
self):
res = self.app.get(
self.url,
params={'related_counts': 'children,contributors'}
)
relationships = res.json['data']['relationships']
for key, relation in relationships.items():
if relation == {}:
continue
field = NodeSerializer._declared_fields[key]
if getattr(field, 'field', None):
field = field.field
if isinstance(relation, list):
for item in relation:
link = item['links'].values()[0]
related_meta = getattr(field, 'related_meta', {})
if related_meta and related_meta.get('count', False) and key == 'children' or key == 'contributors':
assert_in('count', link['meta'])
else:
assert_not_in('count', link.get('meta', {}))
elif relation != {'data': None}:
link = relation['links'].values()[0]
related_meta = getattr(field, 'related_meta', {})
if related_meta and related_meta.get('count', False) and key == 'children' or key == 'contributors':
assert_in('count', link['meta'])
else:
assert_not_in('count', link.get('meta', {}))
def test_error_when_requesting_related_counts_for_attribute_field(self):
res = self.app.get(
self.url,
params={'related_counts': 'title'},
expect_errors=True
)
assert_equal(res.status_code, http.BAD_REQUEST)
assert_equal(
res.json['errors'][0]['detail'],
"Acceptable values for the related_counts query param are 'true', 'false', or any of the relationship fields; got 'title'"
)
@pytest.mark.django_db
class TestRelationshipField:
# We need a Serializer to test the Relationship field (needs context)
class BasicNodeSerializer(JSONAPISerializer):
parent = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<_id>'}
)
parent_with_meta = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_count', 'extra': 'get_extra'},
)
self_and_related_field = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<_id>'},
self_view='nodes:node-contributors',
self_view_kwargs={'node_id': '<_id>'},
)
two_url_kwargs = RelationshipField(
# fake url, for testing purposes
related_view='nodes:node-pointer-detail',
related_view_kwargs={'node_id': '<_id>', 'node_link_id': '<_id>'},
)
# If related_view_kwargs is a callable, this field _must_ match the property name on
# the target record
registered_from = RelationshipField(
related_view=lambda n: 'registrations:registration-detail' if n and n.is_registration else 'nodes:node-detail',
related_view_kwargs=lambda n: {'node_id': '<registered_from._id>'})
field_with_filters = base_serializers.RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<_id>'},
filter={'target': 'hello', 'woop': 'yea'}
)
class Meta:
type_ = 'nodes'
def get_count(self, obj):
return 1
def get_extra(self, obj):
return 'foo'
# TODO: Expand tests
# Regression test for https://openscience.atlassian.net/browse/OSF-4832
def test_serializing_meta(self):
req = make_drf_request_with_version(version='2.0')
project = factories.ProjectFactory()
node = factories.NodeFactory(parent=project)
data = self.BasicNodeSerializer(
node, context={'request': req}
).data['data']
meta = data['relationships']['parent_with_meta']['links']['related']['meta']
assert_not_in('count', meta)
assert_in('extra', meta)
assert_equal(meta['extra'], 'foo')
def test_serializing_empty_to_one(self):
req = make_drf_request_with_version(version='2.2')
node = factories.NodeFactory()
data = self.BasicNodeSerializer(
node, context={'request': req}
).data['data']
# This node is not registered_from another node hence it is an empty-to-one.
assert 'registered_from' not in data['relationships']
# In 2.9, API returns null for empty relationships
# https://openscience.atlassian.net/browse/PLAT-840
req = make_drf_request_with_version(version='2.9')
node = factories.NodeFactory()
data = self.BasicNodeSerializer(
node, context={'request': req}
).data['data']
assert data['relationships']['registered_from']['data'] is None
def test_self_and_related_fields(self):
req = make_drf_request_with_version(version='2.0')
project = factories.ProjectFactory()
node = factories.NodeFactory(parent=project)
data = self.BasicNodeSerializer(
node, context={'request': req}
).data['data']
relationship_field = data['relationships']['self_and_related_field']['links']
assert_in(
'/v2/nodes/{}/contributors/'.format(node._id),
relationship_field['self']['href']
)
assert_in(
'/v2/nodes/{}/'.format(node._id),
relationship_field['related']['href']
)
def test_field_with_two_kwargs(self):
req = make_drf_request_with_version(version='2.0')
project = factories.ProjectFactory()
node = factories.NodeFactory(parent=project)
data = self.BasicNodeSerializer(
node, context={'request': req}
).data['data']
field = data['relationships']['two_url_kwargs']['links']
assert_in(
'/v2/nodes/{}/node_links/{}/'.format(node._id, node._id),
field['related']['href']
)
def test_field_with_two_filters(self):
req = make_drf_request_with_version(version='2.0')
project = factories.ProjectFactory()
node = factories.NodeFactory(parent=project)
data = self.BasicNodeSerializer(
node, context={'request': req}
).data['data']
field = data['relationships']['field_with_filters']['links']
assert_in(
urllib.quote('filter[target]=hello', safe='?='),
field['related']['href']
)
assert_in(
urllib.quote('filter[woop]=yea', safe='?='),
field['related']['href']
)
def test_field_with_callable_related_attrs(self):
req = make_drf_request_with_version(version='2.0')
project = factories.ProjectFactory()
node = factories.NodeFactory(parent=project)
data = self.BasicNodeSerializer(
node, context={'request': req}
).data['data']
assert_not_in('registered_from', data['relationships'])
registration = factories.RegistrationFactory(project=node)
data = self.BasicNodeSerializer(
registration, context={
'request': req}
).data['data']
field = data['relationships']['registered_from']['links']
assert_in('/v2/nodes/{}/'.format(node._id), field['related']['href'])
class TestShowIfVersion(ApiTestCase):
def setUp(self):
super(TestShowIfVersion, self).setUp()
self.node = factories.NodeFactory()
self.registration = factories.RegistrationFactory()
def test_node_links_allowed_version_node_serializer(self):
req = make_drf_request_with_version(version='2.0')
data = NodeSerializer(self.node, context={'request': req}).data['data']
assert_in('node_links', data['relationships'])
def test_node_links_bad_version_node_serializer(self):
req = make_drf_request_with_version(version='2.1')
data = NodeSerializer(self.node, context={'request': req}).data['data']
assert_not_in('node_links', data['relationships'])
def test_node_links_allowed_version_registration_serializer(self):
req = make_drf_request_with_version(version='2.0')
data = RegistrationSerializer(
self.registration,
context={'request': req}
).data['data']
assert_in('node_links', data['relationships'])
def test_node_links_bad_version_registration_serializer(self):
req = make_drf_request_with_version(version='2.1')
data = RegistrationSerializer(
self.registration,
context={'request': req}
).data['data']
assert_not_in('node_links', data['relationships'])
def test_node_links_withdrawn_registration(self):
factories.WithdrawnRegistrationFactory(
registration=self.registration)
req = make_drf_request_with_version(version='2.0')
data = RegistrationSerializer(
self.registration,
context={'request': req}
).data['data']
assert_not_in('node_links', data['relationships'])
req = make_drf_request_with_version(version='2.1')
data = RegistrationSerializer(
self.registration,
context={'request': req}
).data['data']
assert_not_in('node_links', data['relationships'])
class VersionedDateTimeField(DbTestCase):
def setUp(self):
super(VersionedDateTimeField, self).setUp()
self.node = factories.NodeFactory()
self.old_date = datetime.utcnow() # naive dates before django-osf
self.old_date_without_microseconds = self.old_date.replace(
microsecond=0)
self.new_date = datetime.utcnow().replace(
tzinfo=utc) # non-naive after django-osf
self.new_date_without_microseconds = self.new_date.replace(
microsecond=0)
self.old_format = '%Y-%m-%dT%H:%M:%S.%f'
self.old_format_without_microseconds = '%Y-%m-%dT%H:%M:%S'
self.new_format = '%Y-%m-%dT%H:%M:%S.%fZ'
def test_old_date_formats_to_old_format(self):
req = make_drf_request_with_version(version='2.0')
setattr(self.node, 'last_logged', self.old_date)
data = NodeSerializer(self.node, context={'request': req}).data['data']
assert_equal(
datetime.strftime(self.old_date, self.old_format),
data['attributes']['date_modified']
)
def test_old_date_without_microseconds_formats_to_old_format(self):
req = make_drf_request_with_version(version='2.0')
setattr(self.node, 'last_logged', self.old_date_without_microseconds)
data = NodeSerializer(self.node, context={'request': req}).data['data']
assert_equal(
datetime.strftime(
self.old_date_without_microseconds,
self.old_format_without_microseconds
),
data['attributes']['date_modified']
)
def test_old_date_formats_to_new_format(self):
req = make_drf_request_with_version(version='2.2')
setattr(self.node, 'last_logged', self.old_date)
data = NodeSerializer(self.node, context={'request': req}).data['data']
assert_equal(
datetime.strftime(self.old_date, self.new_format),
data['attributes']['date_modified']
)
def test_old_date_without_microseconds_formats_to_new_format(self):
req = make_drf_request_with_version(version='2.2')
setattr(self.node, 'last_logged', self.old_date_without_microseconds)
data = NodeSerializer(self.node, context={'request': req}).data['data']
assert_equal(
datetime.strftime(
self.old_date_without_microseconds,
self.new_format
),
data['attributes']['date_modified']
)
def test_new_date_formats_to_old_format(self):
req = make_drf_request_with_version(version='2.0')
setattr(self.node, 'last_logged', self.new_date)
data = NodeSerializer(self.node, context={'request': req}).data['data']
assert_equal(
datetime.strftime(self.new_date, self.old_format),
data['attributes']['date_modified']
)
def test_new_date_without_microseconds_formats_to_old_format(self):
req = make_drf_request_with_version(version='2.0')
setattr(self.node, 'last_logged', self.new_date_without_microseconds)
data = NodeSerializer(self.node, context={'request': req}).data['data']
assert_equal(
datetime.strftime(
self.new_date_without_microseconds,
self.old_format_without_microseconds
),
data['attributes']['date_modified']
)
def test_new_date_formats_to_new_format(self):
req = make_drf_request_with_version(version='2.2')
setattr(self.node, 'last_logged', self.new_date)
data = NodeSerializer(self.node, context={'request': req}).data['data']
assert_equal(
datetime.strftime(self.new_date, self.new_format),
data['attributes']['date_modified']
)
def test_new_date_without_microseconds_formats_to_new_format(self):
req = make_drf_request_with_version(version='2.2')
setattr(self.node, 'last_logged', self.new_date_without_microseconds)
data = NodeSerializer(self.node, context={'request': req}).data['data']
assert_equal(
datetime.strftime(
self.new_date_without_microseconds,
self.new_format
),
data['attributes']['date_modified']
)
# regression test for https://openscience.atlassian.net/browse/PLAT-1350
# VersionedDateTimeField was treating version 2.10 and higher as decimals,
# less than 2.2
def test_old_date_formats_to_new_format_with_2_10(self):
req = make_drf_request_with_version(version='2.10')
setattr(self.node, 'last_logged', self.old_date)
data = NodeSerializer(self.node, context={'request': req}).data['data']
assert_equal(
datetime.strftime(self.old_date, self.new_format),
data['attributes']['date_modified']
)
| |
# Copyright (C) 2012 - 2014 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for consistency group code.
"""
import json
from xml.dom import minidom
import mock
import webob
import cinder.consistencygroup
from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit.api.v2 import stubs
from cinder.tests.unit import utils
from cinder.volume import api as volume_api
class ConsistencyGroupsAPITestCase(test.TestCase):
"""Test Case for consistency groups API."""
def setUp(self):
super(ConsistencyGroupsAPITestCase, self).setUp()
self.cg_api = cinder.consistencygroup.API()
self.ctxt = context.RequestContext('fake', 'fake', auth_token=True,
is_admin=True)
def _create_consistencygroup(
self,
ctxt=None,
name='test_consistencygroup',
description='this is a test consistency group',
volume_type_id='123456',
availability_zone='az1',
host='fakehost',
status='creating'):
"""Create a consistency group object."""
ctxt = ctxt or self.ctxt
consistencygroup = objects.ConsistencyGroup(ctxt)
consistencygroup.user_id = 'fake'
consistencygroup.project_id = 'fake'
consistencygroup.availability_zone = availability_zone
consistencygroup.name = name
consistencygroup.description = description
consistencygroup.volume_type_id = volume_type_id
consistencygroup.host = host
consistencygroup.status = status
consistencygroup.create()
return consistencygroup
def test_show_consistencygroup(self):
consistencygroup = self._create_consistencygroup()
req = webob.Request.blank('/v2/fake/consistencygroups/%s' %
consistencygroup.id)
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
self.assertEqual('az1',
res_dict['consistencygroup']['availability_zone'])
self.assertEqual('this is a test consistency group',
res_dict['consistencygroup']['description'])
self.assertEqual('test_consistencygroup',
res_dict['consistencygroup']['name'])
self.assertEqual('creating',
res_dict['consistencygroup']['status'])
self.assertEqual(['123456'],
res_dict['consistencygroup']['volume_types'])
consistencygroup.destroy()
def test_show_consistencygroup_xml_content_type(self):
consistencygroup = self._create_consistencygroup()
req = webob.Request.blank('/v2/fake/consistencygroups/%s' %
consistencygroup.id)
req.method = 'GET'
req.headers['Content-Type'] = 'application/xml'
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(200, res.status_int)
dom = minidom.parseString(res.body)
consistencygroups = dom.getElementsByTagName('consistencygroup')
name = consistencygroups.item(0).getAttribute('name')
self.assertEqual("test_consistencygroup", name.strip())
consistencygroup.destroy()
def test_show_consistencygroup_with_consistencygroup_NotFound(self):
req = webob.Request.blank('/v2/fake/consistencygroups/9999')
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(404, res.status_int)
self.assertEqual(404, res_dict['itemNotFound']['code'])
self.assertEqual('ConsistencyGroup 9999 could not be found.',
res_dict['itemNotFound']['message'])
def test_show_consistencygroup_with_null_volume_type(self):
consistencygroup = self._create_consistencygroup(volume_type_id=None)
req = webob.Request.blank('/v2/fake/consistencygroups/%s' %
consistencygroup.id)
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
self.assertEqual('az1',
res_dict['consistencygroup']['availability_zone'])
self.assertEqual('this is a test consistency group',
res_dict['consistencygroup']['description'])
self.assertEqual('test_consistencygroup',
res_dict['consistencygroup']['name'])
self.assertEqual('creating',
res_dict['consistencygroup']['status'])
self.assertEqual([], res_dict['consistencygroup']['volume_types'])
consistencygroup.destroy()
def test_list_consistencygroups_json(self):
consistencygroup1 = self._create_consistencygroup()
consistencygroup2 = self._create_consistencygroup()
consistencygroup3 = self._create_consistencygroup()
req = webob.Request.blank('/v2/fake/consistencygroups')
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
self.assertEqual(consistencygroup1.id,
res_dict['consistencygroups'][0]['id'])
self.assertEqual('test_consistencygroup',
res_dict['consistencygroups'][0]['name'])
self.assertEqual(consistencygroup2.id,
res_dict['consistencygroups'][1]['id'])
self.assertEqual('test_consistencygroup',
res_dict['consistencygroups'][1]['name'])
self.assertEqual(consistencygroup3.id,
res_dict['consistencygroups'][2]['id'])
self.assertEqual('test_consistencygroup',
res_dict['consistencygroups'][2]['name'])
consistencygroup1.destroy()
consistencygroup2.destroy()
consistencygroup3.destroy()
def test_list_consistencygroups_xml(self):
consistencygroup1 = self._create_consistencygroup()
consistencygroup2 = self._create_consistencygroup()
consistencygroup3 = self._create_consistencygroup()
req = webob.Request.blank('/v2/fake/consistencygroups')
req.method = 'GET'
req.headers['Content-Type'] = 'application/xml'
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(200, res.status_int)
dom = minidom.parseString(res.body)
consistencygroup_list = dom.getElementsByTagName('consistencygroup')
self.assertEqual(consistencygroup1.id,
consistencygroup_list.item(0).getAttribute('id'))
self.assertEqual(consistencygroup2.id,
consistencygroup_list.item(1).getAttribute('id'))
self.assertEqual(consistencygroup3.id,
consistencygroup_list.item(2).getAttribute('id'))
consistencygroup3.destroy()
consistencygroup2.destroy()
consistencygroup1.destroy()
def test_list_consistencygroups_detail_json(self):
consistencygroup1 = self._create_consistencygroup()
consistencygroup2 = self._create_consistencygroup()
consistencygroup3 = self._create_consistencygroup(volume_type_id=(
'uuid1,uuid2'))
req = webob.Request.blank('/v2/fake/consistencygroups/detail')
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
req.headers['Accept'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
self.assertEqual('az1',
res_dict['consistencygroups'][0]['availability_zone'])
self.assertEqual('this is a test consistency group',
res_dict['consistencygroups'][0]['description'])
self.assertEqual('test_consistencygroup',
res_dict['consistencygroups'][0]['name'])
self.assertEqual(consistencygroup1.id,
res_dict['consistencygroups'][0]['id'])
self.assertEqual('creating',
res_dict['consistencygroups'][0]['status'])
self.assertEqual(['123456'],
res_dict['consistencygroups'][0]['volume_types'])
self.assertEqual('az1',
res_dict['consistencygroups'][1]['availability_zone'])
self.assertEqual('this is a test consistency group',
res_dict['consistencygroups'][1]['description'])
self.assertEqual('test_consistencygroup',
res_dict['consistencygroups'][1]['name'])
self.assertEqual(consistencygroup2.id,
res_dict['consistencygroups'][1]['id'])
self.assertEqual('creating',
res_dict['consistencygroups'][1]['status'])
self.assertEqual(['123456'],
res_dict['consistencygroups'][1]['volume_types'])
self.assertEqual('az1',
res_dict['consistencygroups'][2]['availability_zone'])
self.assertEqual('this is a test consistency group',
res_dict['consistencygroups'][2]['description'])
self.assertEqual('test_consistencygroup',
res_dict['consistencygroups'][2]['name'])
self.assertEqual(consistencygroup3.id,
res_dict['consistencygroups'][2]['id'])
self.assertEqual('creating',
res_dict['consistencygroups'][2]['status'])
self.assertEqual(['uuid1', 'uuid2'],
res_dict['consistencygroups'][2]['volume_types'])
consistencygroup1.destroy()
consistencygroup2.destroy()
consistencygroup3.destroy()
def test_list_consistencygroups_detail_xml(self):
consistencygroup1 = self._create_consistencygroup()
consistencygroup2 = self._create_consistencygroup()
consistencygroup3 = self._create_consistencygroup()
req = webob.Request.blank('/v2/fake/consistencygroups/detail')
req.method = 'GET'
req.headers['Content-Type'] = 'application/xml'
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(200, res.status_int)
dom = minidom.parseString(res.body)
consistencygroup_detail = dom.getElementsByTagName('consistencygroup')
self.assertEqual(
'az1',
consistencygroup_detail.item(0).getAttribute('availability_zone'))
self.assertEqual(
'this is a test consistency group',
consistencygroup_detail.item(0).getAttribute('description'))
self.assertEqual(
'test_consistencygroup',
consistencygroup_detail.item(0).getAttribute('name'))
self.assertEqual(
consistencygroup1.id,
consistencygroup_detail.item(0).getAttribute('id'))
self.assertEqual(
'creating',
consistencygroup_detail.item(0).getAttribute('status'))
self.assertEqual(
'az1',
consistencygroup_detail.item(1).getAttribute('availability_zone'))
self.assertEqual(
'this is a test consistency group',
consistencygroup_detail.item(1).getAttribute('description'))
self.assertEqual(
'test_consistencygroup',
consistencygroup_detail.item(1).getAttribute('name'))
self.assertEqual(
consistencygroup2.id,
consistencygroup_detail.item(1).getAttribute('id'))
self.assertEqual(
'creating',
consistencygroup_detail.item(1).getAttribute('status'))
self.assertEqual(
'az1',
consistencygroup_detail.item(2).getAttribute('availability_zone'))
self.assertEqual(
'this is a test consistency group',
consistencygroup_detail.item(2).getAttribute('description'))
self.assertEqual(
'test_consistencygroup',
consistencygroup_detail.item(2).getAttribute('name'))
self.assertEqual(
consistencygroup3.id,
consistencygroup_detail.item(2).getAttribute('id'))
self.assertEqual(
'creating',
consistencygroup_detail.item(2).getAttribute('status'))
consistencygroup3.destroy()
consistencygroup2.destroy()
consistencygroup1.destroy()
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_create_consistencygroup_json(self, mock_validate):
group_id = "1"
# Create volume type
vol_type = 'test'
db.volume_type_create(context.get_admin_context(),
{'name': vol_type, 'extra_specs': {}})
body = {"consistencygroup": {"name": "cg1",
"volume_types": vol_type,
"description":
"Consistency Group 1", }}
req = webob.Request.blank('/v2/fake/consistencygroups')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(202, res.status_int)
self.assertIn('id', res_dict['consistencygroup'])
self.assertTrue(mock_validate.called)
group_id = res_dict['consistencygroup']['id']
cg = objects.ConsistencyGroup.get_by_id(context.get_admin_context(),
group_id)
cg.destroy()
def test_create_consistencygroup_with_no_body(self):
# omit body from the request
req = webob.Request.blank('/v2/fake/consistencygroups')
req.body = json.dumps(None)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.headers['Accept'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
self.assertEqual("Missing required element 'consistencygroup' in "
"request body.",
res_dict['badRequest']['message'])
def test_delete_consistencygroup_available(self):
consistencygroup = self._create_consistencygroup(status='available')
req = webob.Request.blank('/v2/fake/consistencygroups/%s/delete' %
consistencygroup.id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
body = {"consistencygroup": {"force": True}}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
consistencygroup = objects.ConsistencyGroup.get_by_id(
self.ctxt, consistencygroup.id)
self.assertEqual(202, res.status_int)
self.assertEqual('deleting', consistencygroup.status)
consistencygroup.destroy()
def test_delete_consistencygroup_with_consistencygroup_NotFound(self):
req = webob.Request.blank('/v2/fake/consistencygroups/9999/delete')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(None)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(404, res.status_int)
self.assertEqual(404, res_dict['itemNotFound']['code'])
self.assertEqual('ConsistencyGroup 9999 could not be found.',
res_dict['itemNotFound']['message'])
def test_delete_consistencygroup_with_Invalidconsistencygroup(self):
consistencygroup = self._create_consistencygroup(status='invalid')
req = webob.Request.blank('/v2/fake/consistencygroups/%s/delete' %
consistencygroup.id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
body = {"consistencygroup": {"force": False}}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
msg = (_('Invalid ConsistencyGroup: Consistency group status must be '
'available or error, but current status is: invalid'))
self.assertEqual(msg, res_dict['badRequest']['message'])
consistencygroup.destroy()
def test_delete_consistencygroup_no_host(self):
consistencygroup = self._create_consistencygroup(
host=None,
status='error')
req = webob.Request.blank('/v2/fake/consistencygroups/%s/delete' %
consistencygroup.id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
body = {"consistencygroup": {"force": True}}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(202, res.status_int)
cg = objects.ConsistencyGroup.get_by_id(
context.get_admin_context(read_deleted='yes'),
consistencygroup.id)
self.assertEqual('deleted', cg.status)
self.assertIsNone(cg.host)
def test_create_delete_consistencygroup_update_quota(self):
name = 'mycg'
description = 'consistency group 1'
fake_type = {'id': '1', 'name': 'fake_type'}
self.stubs.Set(db, 'volume_types_get_by_name_or_id',
mock.Mock(return_value=[fake_type]))
self.stubs.Set(self.cg_api,
'_cast_create_consistencygroup',
mock.Mock())
self.stubs.Set(self.cg_api, 'update_quota',
mock.Mock())
cg = self.cg_api.create(self.ctxt, name, description,
fake_type['name'])
self.cg_api.update_quota.assert_called_once_with(
self.ctxt, cg, 1)
self.assertEqual('creating', cg.status)
self.assertIsNone(cg.host)
self.cg_api.update_quota.reset_mock()
cg.status = 'error'
self.cg_api.delete(self.ctxt, cg)
self.cg_api.update_quota.assert_called_once_with(
self.ctxt, cg, -1, self.ctxt.project_id)
cg = objects.ConsistencyGroup.get_by_id(
context.get_admin_context(read_deleted='yes'),
cg.id)
self.assertEqual('deleted', cg.status)
def test_delete_consistencygroup_with_invalid_body(self):
consistencygroup = self._create_consistencygroup(status='available')
req = webob.Request.blank('/v2/fake/consistencygroups/%s/delete' %
consistencygroup.id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
body = {"invalid_request_element": {"force": False}}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)
def test_delete_consistencygroup_with_invalid_force_value_in_body(self):
consistencygroup = self._create_consistencygroup(status='available')
req = webob.Request.blank('/v2/fake/consistencygroups/%s/delete' %
consistencygroup.id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
body = {"consistencygroup": {"force": "abcd"}}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)
def test_delete_consistencygroup_with_empty_force_value_in_body(self):
consistencygroup = self._create_consistencygroup(status='available')
req = webob.Request.blank('/v2/fake/consistencygroups/%s/delete' %
consistencygroup.id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
body = {"consistencygroup": {"force": ""}}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)
def test_create_consistencygroup_failed_no_volume_type(self):
name = 'cg1'
body = {"consistencygroup": {"name": name,
"description":
"Consistency Group 1", }}
req = webob.Request.blank('/v2/fake/consistencygroups')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
msg = (_('volume_types must be provided to create '
'consistency group %s.') % name)
self.assertEqual(msg, res_dict['badRequest']['message'])
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_update_consistencygroup_success(self, mock_validate):
volume_type_id = '123456'
consistencygroup = self._create_consistencygroup(status='available',
host='test_host')
remove_volume_id = utils.create_volume(
self.ctxt,
volume_type_id=volume_type_id,
consistencygroup_id=consistencygroup.id)['id']
remove_volume_id2 = utils.create_volume(
self.ctxt,
volume_type_id=volume_type_id,
consistencygroup_id=consistencygroup.id)['id']
self.assertEqual('available', consistencygroup.status)
cg_volumes = db.volume_get_all_by_group(self.ctxt.elevated(),
consistencygroup.id)
cg_vol_ids = [cg_vol['id'] for cg_vol in cg_volumes]
self.assertIn(remove_volume_id, cg_vol_ids)
self.assertIn(remove_volume_id2, cg_vol_ids)
add_volume_id = utils.create_volume(
self.ctxt,
volume_type_id=volume_type_id)['id']
add_volume_id2 = utils.create_volume(
self.ctxt,
volume_type_id=volume_type_id)['id']
req = webob.Request.blank('/v2/fake/consistencygroups/%s/update' %
consistencygroup.id)
req.method = 'PUT'
req.headers['Content-Type'] = 'application/json'
name = 'newcg'
description = 'New Consistency Group Description'
add_volumes = add_volume_id + "," + add_volume_id2
remove_volumes = remove_volume_id + "," + remove_volume_id2
body = {"consistencygroup": {"name": name,
"description": description,
"add_volumes": add_volumes,
"remove_volumes": remove_volumes, }}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
consistencygroup = objects.ConsistencyGroup.get_by_id(
self.ctxt, consistencygroup.id)
self.assertEqual(202, res.status_int)
self.assertTrue(mock_validate.called)
self.assertEqual('updating', consistencygroup.status)
consistencygroup.destroy()
def test_update_consistencygroup_add_volume_not_found(self):
consistencygroup = self._create_consistencygroup(ctxt=self.ctxt,
status='available')
req = webob.Request.blank('/v2/fake/consistencygroups/%s/update' %
consistencygroup.id)
req.method = 'PUT'
req.headers['Content-Type'] = 'application/json'
body = {"consistencygroup": {"name": None,
"description": None,
"add_volumes": "fake-volume-uuid",
"remove_volumes": None, }}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
msg = (_("Invalid volume: Cannot add volume fake-volume-uuid "
"to consistency group %(group_id)s because volume cannot "
"be found.") %
{'group_id': consistencygroup.id})
self.assertEqual(msg, res_dict['badRequest']['message'])
consistencygroup.destroy()
def test_update_consistencygroup_remove_volume_not_found(self):
consistencygroup = self._create_consistencygroup(ctxt=self.ctxt,
status='available')
req = webob.Request.blank('/v2/fake/consistencygroups/%s/update' %
consistencygroup.id)
req.method = 'PUT'
req.headers['Content-Type'] = 'application/json'
body = {"consistencygroup": {"name": None,
"description": "new description",
"add_volumes": None,
"remove_volumes": "fake-volume-uuid", }}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
msg = (_("Invalid volume: Cannot remove volume fake-volume-uuid "
"from consistency group %(group_id)s because it is not "
"in the group.") %
{'group_id': consistencygroup.id})
self.assertEqual(msg, res_dict['badRequest']['message'])
consistencygroup.destroy()
def test_update_consistencygroup_empty_parameters(self):
consistencygroup = self._create_consistencygroup(ctxt=self.ctxt,
status='available')
req = webob.Request.blank('/v2/fake/consistencygroups/%s/update' %
consistencygroup.id)
req.method = 'PUT'
req.headers['Content-Type'] = 'application/json'
body = {"consistencygroup": {"name": "",
"description": "",
"add_volumes": None,
"remove_volumes": None, }}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
consistencygroup.destroy()
def test_update_consistencygroup_add_volume_invalid_state(self):
volume_type_id = '123456'
consistencygroup = self._create_consistencygroup(ctxt=self.ctxt,
status='available')
add_volume_id = utils.create_volume(
self.ctxt,
volume_type_id=volume_type_id,
status='wrong_status')['id']
req = webob.Request.blank('/v2/fake/consistencygroups/%s/update' %
consistencygroup.id)
req.method = 'PUT'
req.headers['Content-Type'] = 'application/json'
add_volumes = add_volume_id
body = {"consistencygroup": {"name": "cg1",
"description": "",
"add_volumes": add_volumes,
"remove_volumes": None, }}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
msg = (_("Invalid volume: Cannot add volume %(volume_id)s "
"to consistency group %(group_id)s because volume is in an "
"invalid state: %(status)s. Valid states are: ('available', "
"'in-use').") %
{'volume_id': add_volume_id,
'group_id': consistencygroup.id,
'status': 'wrong_status'})
self.assertEqual(msg, res_dict['badRequest']['message'])
consistencygroup.destroy()
def test_update_consistencygroup_add_volume_invalid_volume_type(self):
consistencygroup = self._create_consistencygroup(ctxt=self.ctxt,
status='available')
wrong_type = 'wrong-volume-type-id'
add_volume_id = utils.create_volume(
self.ctxt,
volume_type_id=wrong_type)['id']
req = webob.Request.blank('/v2/fake/consistencygroups/%s/update' %
consistencygroup.id)
req.method = 'PUT'
req.headers['Content-Type'] = 'application/json'
add_volumes = add_volume_id
body = {"consistencygroup": {"name": "cg1",
"description": "",
"add_volumes": add_volumes,
"remove_volumes": None, }}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
msg = (_("Invalid volume: Cannot add volume %(volume_id)s "
"to consistency group %(group_id)s because volume type "
"%(volume_type)s is not supported by the group.") %
{'volume_id': add_volume_id,
'group_id': consistencygroup.id,
'volume_type': wrong_type})
self.assertEqual(msg, res_dict['badRequest']['message'])
consistencygroup.destroy()
def test_update_consistencygroup_add_volume_already_in_cg(self):
consistencygroup = self._create_consistencygroup(ctxt=self.ctxt,
status='available')
add_volume_id = utils.create_volume(
self.ctxt,
consistencygroup_id='some_other_cg')['id']
req = webob.Request.blank('/v2/fake/consistencygroups/%s/update' %
consistencygroup.id)
req.method = 'PUT'
req.headers['Content-Type'] = 'application/json'
add_volumes = add_volume_id
body = {"consistencygroup": {"name": "cg1",
"description": "",
"add_volumes": add_volumes,
"remove_volumes": None, }}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
self.assertIsNotNone(res_dict['badRequest']['message'])
consistencygroup.destroy()
def test_update_consistencygroup_invalid_state(self):
wrong_status = 'wrong_status'
consistencygroup = self._create_consistencygroup(status=wrong_status,
ctxt=self.ctxt)
req = webob.Request.blank('/v2/fake/consistencygroups/%s/update' %
consistencygroup.id)
req.method = 'PUT'
req.headers['Content-Type'] = 'application/json'
body = {"consistencygroup": {"name": "new name",
"description": None,
"add_volumes": None,
"remove_volumes": None, }}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
msg = _("Invalid ConsistencyGroup: Consistency group status must be "
"available, but current status is: %s.") % wrong_status
self.assertEqual(msg, res_dict['badRequest']['message'])
consistencygroup.destroy()
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_create_consistencygroup_from_src(self, mock_validate):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
consistencygroup = utils.create_consistencygroup(self.ctxt)
volume_id = utils.create_volume(
self.ctxt,
consistencygroup_id=consistencygroup.id)['id']
cgsnapshot = utils.create_cgsnapshot(
self.ctxt, consistencygroup_id=consistencygroup.id)
snapshot = utils.create_snapshot(
self.ctxt,
volume_id,
cgsnapshot_id=cgsnapshot.id,
status='available')
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
"description":
"Consistency Group 1",
"cgsnapshot_id": cgsnapshot.id}}
req = webob.Request.blank('/v2/fake/consistencygroups/create_from_src')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(202, res.status_int)
self.assertIn('id', res_dict['consistencygroup'])
self.assertEqual(test_cg_name, res_dict['consistencygroup']['name'])
self.assertTrue(mock_validate.called)
cg_ref = objects.ConsistencyGroup.get_by_id(
self.ctxt.elevated(), res_dict['consistencygroup']['id'])
cg_ref.destroy()
snapshot.destroy()
db.volume_destroy(self.ctxt.elevated(), volume_id)
consistencygroup.destroy()
cgsnapshot.destroy()
def test_create_consistencygroup_from_src_cg(self):
self.mock_object(volume_api.API, "create", stubs.stub_volume_create)
source_cg = utils.create_consistencygroup(self.ctxt)
volume_id = utils.create_volume(
self.ctxt,
consistencygroup_id=source_cg.id)['id']
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
"description":
"Consistency Group 1",
"source_cgid": source_cg.id}}
req = webob.Request.blank('/v2/fake/consistencygroups/create_from_src')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(202, res.status_int)
self.assertIn('id', res_dict['consistencygroup'])
self.assertEqual(test_cg_name, res_dict['consistencygroup']['name'])
cg = objects.ConsistencyGroup.get_by_id(
self.ctxt, res_dict['consistencygroup']['id'])
cg.destroy
db.volume_destroy(self.ctxt.elevated(), volume_id)
source_cg.destroy()
def test_create_consistencygroup_from_src_both_snap_cg(self):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
consistencygroup = utils.create_consistencygroup(self.ctxt)
volume_id = utils.create_volume(
self.ctxt,
consistencygroup_id=consistencygroup.id)['id']
cgsnapshot_id = utils.create_cgsnapshot(
self.ctxt,
consistencygroup_id=consistencygroup.id)['id']
snapshot = utils.create_snapshot(
self.ctxt,
volume_id,
cgsnapshot_id=cgsnapshot_id,
status='available')
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
"description":
"Consistency Group 1",
"cgsnapshot_id": cgsnapshot_id,
"source_cgid":
consistencygroup.id}}
req = webob.Request.blank('/v2/fake/consistencygroups/create_from_src')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
self.assertIsNotNone(res_dict['badRequest']['message'])
snapshot.destroy()
db.cgsnapshot_destroy(self.ctxt.elevated(), cgsnapshot_id)
db.volume_destroy(self.ctxt.elevated(), volume_id)
consistencygroup.destroy()
def test_create_consistencygroup_from_src_invalid_body(self):
name = 'cg1'
body = {"invalid": {"name": name,
"description":
"Consistency Group 1", }}
req = webob.Request.blank('/v2/fake/consistencygroups/create_from_src')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
# Missing 'consistencygroup-from-src' in the body.
self.assertIsNotNone(res_dict['badRequest']['message'])
def test_create_consistencygroup_from_src_no_source_id(self):
name = 'cg1'
body = {"consistencygroup-from-src": {"name": name,
"description":
"Consistency Group 1", }}
req = webob.Request.blank('/v2/fake/consistencygroups/create_from_src')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
self.assertIsNotNone(res_dict['badRequest']['message'])
def test_create_consistencygroup_from_src_no_host(self):
consistencygroup = utils.create_consistencygroup(self.ctxt, host=None)
volume_id = utils.create_volume(
self.ctxt,
consistencygroup_id=consistencygroup.id)['id']
cgsnapshot = utils.create_cgsnapshot(
self.ctxt, consistencygroup_id=consistencygroup.id)
snapshot = utils.create_snapshot(
self.ctxt,
volume_id,
cgsnapshot_id=cgsnapshot.id,
status='available')
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
"description":
"Consistency Group 1",
"cgsnapshot_id": cgsnapshot.id}}
req = webob.Request.blank('/v2/fake/consistencygroups/create_from_src')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
msg = _('Invalid ConsistencyGroup: No host to create consistency '
'group')
self.assertIn(msg, res_dict['badRequest']['message'])
snapshot.destroy()
db.volume_destroy(self.ctxt.elevated(), volume_id)
consistencygroup.destroy()
cgsnapshot.destroy()
def test_create_consistencygroup_from_src_cgsnapshot_empty(self):
consistencygroup = utils.create_consistencygroup(self.ctxt)
volume_id = utils.create_volume(
self.ctxt,
consistencygroup_id=consistencygroup.id)['id']
cgsnapshot = utils.create_cgsnapshot(
self.ctxt,
consistencygroup_id=consistencygroup.id)
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
"description":
"Consistency Group 1",
"cgsnapshot_id": cgsnapshot.id}}
req = webob.Request.blank('/v2/fake/consistencygroups/create_from_src')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
self.assertIsNotNone(res_dict['badRequest']['message'])
db.volume_destroy(self.ctxt.elevated(), volume_id)
consistencygroup.destroy()
cgsnapshot.destroy()
def test_create_consistencygroup_from_src_source_cg_empty(self):
source_cg = utils.create_consistencygroup(self.ctxt)
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
"description":
"Consistency Group 1",
"source_cgid": source_cg.id}}
req = webob.Request.blank('/v2/fake/consistencygroups/create_from_src')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
self.assertIsNotNone(res_dict['badRequest']['message'])
source_cg.destroy()
def test_create_consistencygroup_from_src_cgsnapshot_notfound(self):
consistencygroup = utils.create_consistencygroup(self.ctxt)
volume_id = utils.create_volume(
self.ctxt,
consistencygroup_id=consistencygroup.id)['id']
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
"description":
"Consistency Group 1",
"cgsnapshot_id": "fake_cgsnap"}}
req = webob.Request.blank('/v2/fake/consistencygroups/create_from_src')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(404, res.status_int)
self.assertEqual(404, res_dict['itemNotFound']['code'])
self.assertIsNotNone(res_dict['itemNotFound']['message'])
db.volume_destroy(self.ctxt.elevated(), volume_id)
consistencygroup.destroy()
def test_create_consistencygroup_from_src_source_cg_notfound(self):
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
"description":
"Consistency Group 1",
"source_cgid": "fake_source_cg"}}
req = webob.Request.blank('/v2/fake/consistencygroups/create_from_src')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(404, res.status_int)
self.assertEqual(404, res_dict['itemNotFound']['code'])
self.assertIsNotNone(res_dict['itemNotFound']['message'])
@mock.patch.object(volume_api.API, 'create',
side_effect=exception.CinderException(
'Create volume failed.'))
def test_create_consistencygroup_from_src_cgsnapshot_create_volume_failed(
self, mock_create):
consistencygroup = utils.create_consistencygroup(self.ctxt)
volume_id = utils.create_volume(
self.ctxt,
consistencygroup_id=consistencygroup.id)['id']
cgsnapshot = utils.create_cgsnapshot(
self.ctxt, consistencygroup_id=consistencygroup.id)
snapshot = utils.create_snapshot(
self.ctxt,
volume_id,
cgsnapshot_id=cgsnapshot.id,
status='available')
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
"description":
"Consistency Group 1",
"cgsnapshot_id": cgsnapshot.id}}
req = webob.Request.blank('/v2/fake/consistencygroups/create_from_src')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
msg = _("Create volume failed.")
self.assertEqual(msg, res_dict['badRequest']['message'])
snapshot.destroy()
db.volume_destroy(self.ctxt.elevated(), volume_id)
consistencygroup.destroy()
cgsnapshot.destroy()
@mock.patch.object(volume_api.API, 'create',
side_effect=exception.CinderException(
'Create volume failed.'))
def test_create_consistencygroup_from_src_cg_create_volume_failed(
self, mock_create):
source_cg = utils.create_consistencygroup(self.ctxt)
volume_id = utils.create_volume(
self.ctxt,
consistencygroup_id=source_cg.id)['id']
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
"description":
"Consistency Group 1",
"source_cgid": source_cg.id}}
req = webob.Request.blank('/v2/fake/consistencygroups/create_from_src')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
self.assertIsNotNone(res_dict['badRequest']['message'])
db.volume_destroy(self.ctxt.elevated(), volume_id)
source_cg.destroy()
| |
# Copyright 2015 CloudByte Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Test class for cloudbyte's cinder driver.
This involves mocking of elasticenter's json responses
when a method of this driver is unit tested.
"""
import json
import mock
import testtools
from testtools import matchers
from cinder import exception
from cinder.volume import configuration as conf
from cinder.volume.drivers.cloudbyte import cloudbyte
# A fake list account response of cloudbyte's elasticenter
FAKE_LIST_ACCOUNT_RESPONSE = """{ "listAccountResponse" : {
"count":1 ,
"account" : [{
"id": "d13a4e9e-0c05-4d2d-8a5e-5efd3ef058e0",
"name": "CustomerA",
"simpleid": 1,
"description": "None",
"iqnname": "iqn.2014-05.cvsacc1",
"availIOPS": 508,
"totaliops": 2000,
"usedIOPS": 1492,
"volumes": [],
"storageBuckets": [],
"tsms": [],
"qosgroups": [],
"filesystemslist": [],
"currentUsedSpace": 53179,
"currentAvailableSpace": 1249349,
"currentThroughput": 156,
"currentIOPS": 33,
"currentLatency": 0,
"currentThrottle": 0,
"numericquota": 3145728.0,
"currentnumericquota": 1253376.0,
"currentavailablequota": 1892352.0,
"revisionnumber": 1
}]
}}"""
# A fake list tsm response of cloudbyte's elasticenter
FAKE_LIST_TSM_RESPONSE = """{ "listTsmResponse" : {
"count":1 ,
"listTsm" : [{
"id": "955eaf34-4221-3a77-82d0-99113b126fa8",
"simpleid": 2,
"name": "openstack",
"ipaddress": "172.16.50.40",
"accountname": "CustomerA",
"sitename": "BLR",
"clustername": "HAGrp1",
"controllerName": "Controller",
"controlleripaddress": "172.16.50.6",
"clusterstatus": "Online",
"hapoolstatus": "ONLINE",
"hapoolname": "pool",
"hapoolavailiops": 1700,
"hapoolgrace": true,
"hapoolavailtput": 6800,
"poollatency": 10,
"accountid": "d13a4e9e-0c05-4d2d-8a5e-5efd3ef058e0",
"controllerid": "8c2f7084-99c0-36e6-9cb7-205e3ba4c813",
"poolid": "adcbef8f-2193-3f2c-9bb1-fcaf977ae0fc",
"datasetid": "87a23025-f2b2-39e9-85ac-9cda15bfed1a",
"storageBuckets": [],
"currentUsedSpace": 16384,
"currentAvailableSpace": 188416,
"currentTotalSpace": 204800,
"currentThroughput": 12,
"tpcontrol": "true",
"currentIOPS": 0,
"iopscontrol": "true",
"gracecontrol": "false",
"currentLatency": 0,
"currentThrottle": 0,
"iops": "1000",
"availIOPS": "500",
"availThroughput": "2000",
"usedIOPS": "500",
"usedThroughput": "2000",
"throughput": "4000",
"latency": "15",
"graceallowed": true,
"numericquota": 1048576.0,
"currentnumericquota": 204800.0,
"availablequota": 843776.0,
"blocksize": "4",
"type": "1",
"iqnname": "iqn.2014-05.cvsacc1.openstack",
"interfaceName": "em0",
"revisionnumber": 0,
"status": "Online",
"subnet": "16",
"managedstate": "Available",
"configurationstate": "sync",
"offlinenodes": "",
"pooltakeover": "noTakeOver",
"totalprovisionquota": "536576",
"haNodeStatus": "Available",
"ispooltakeoveronpartialfailure": true,
"filesystemslist": [],
"volumes": [],
"qosgrouplist": []
}]
}}"""
# A fake add QOS group response of cloudbyte's elasticenter
FAKE_ADD_QOS_GROUP_RESPONSE = """{ "addqosgroupresponse" : {
"qosgroup" : {
"id": "d73662ac-6db8-3b2c-981a-012af4e2f7bd",
"name": "QoS_DS1acc1openstacktsm",
"tsmid": "8146146e-f67b-3942-8074-3074599207a4",
"controllerid": "f1603e87-d1e6-3dcb-a549-7a6e77f82d86",
"poolid": "73b567c0-e57d-37b5-b765-9d70725f59af",
"parentid": "81ebdcbb-f73b-3337-8f32-222820e6acb9",
"tsmName": "openstacktsm",
"offlinenodes": "",
"sitename": "site1",
"clustername": "HA1",
"controllerName": "node1",
"clusterstatus": "Online",
"currentThroughput": 0,
"currentIOPS": 0,
"currentLatency": 0,
"currentThrottle": 0,
"iopsvalue": "(0/100)",
"throughputvalue": "(0/400)",
"iops": "100",
"iopscontrol": "true",
"throughput": "400",
"tpcontrol": "true",
"blocksize": "4k",
"latency": "15",
"graceallowed": false,
"type": "1",
"revisionnumber": 0,
"managedstate": "Available",
"configurationstate": "init",
"standardproviops": 0,
"operatingblocksize": 0,
"operatingcachehit": 0,
"operatingiops": 0,
"standardoperatingiops": 0
}
}}"""
# A fake create volume response of cloudbyte's elasticenter
FAKE_CREATE_VOLUME_RESPONSE = """{ "createvolumeresponse" : {
"jobid": "f94e2257-9515-4a44-add0-4b16cb1bcf67"
}}"""
# A fake query async job response of cloudbyte's elasticenter
FAKE_QUERY_ASYNC_JOB_RESULT_RESPONSE = """{ "queryasyncjobresultresponse" : {
"accountid": "e8aca633-7bce-4ab7-915a-6d8847248467",
"userid": "a83d1030-1b85-40f7-9479-f40e4dbdd5d5",
"cmd": "com.cloudbyte.api.commands.CreateVolumeCmd",
"msg": "5",
"jobstatus": 1,
"jobprocstatus": 0,
"jobresultcode": 0,
"jobresulttype": "object",
"jobresult": {
"storage": {
"id": "92cfd601-bc1f-3fa7-8322-c492099f3326",
"name": "DS1",
"simpleid": 20,
"compression": "off",
"sync": "always",
"noofcopies": 1,
"recordsize": "4k",
"deduplication": "off",
"quota": "10G",
"path": "devpool1/acc1openstacktsm/DS1",
"tsmid": "8146146e-f67b-3942-8074-3074599207a4",
"poolid": "73b567c0-e57d-37b5-b765-9d70725f59af",
"mountpoint": "acc1DS1",
"currentUsedSpace": 0,
"currentAvailableSpace": 0,
"currentTotalSpace": 0,
"currentThroughput": 0,
"currentIOPS": 0,
"currentLatency": 0,
"currentThrottle": 0,
"tsmName": "openstacktsm",
"hapoolname": "devpool1",
"revisionnumber": 0,
"blocklength": "512B",
"nfsenabled": false,
"cifsenabled": false,
"iscsienabled": true,
"fcenabled": false
}
},
"created": "2014-06-16 15:49:49",
"jobid": "f94e2257-9515-4a44-add0-4b16cb1bcf67"
}}"""
# A fake list filesystem response of cloudbyte's elasticenter
FAKE_LIST_FILE_SYSTEM_RESPONSE = """{ "listFilesystemResponse" : {
"count":1 ,
"filesystem" : [{
"id": "c93df32e-3a99-3491-8e10-cf318a7f9b7f",
"name": "c93df32e3a9934918e10cf318a7f9b7f",
"simpleid": 34,
"type": "filesystem",
"revisionnumber": 1,
"path": "/cvsacc1DS1",
"clusterid": "8b404f12-7975-4e4e-8549-7abeba397fc9",
"clusterstatus": "Online",
"Tsmid": "955eaf34-4221-3a77-82d0-99113b126fa8",
"tsmType": "1",
"accountid": "d13a4e9e-0c05-4d2d-8a5e-5efd3ef058e0",
"poolid": "adcbef8f-2193-3f2c-9bb1-fcaf977ae0fc",
"controllerid": "8c2f7084-99c0-36e6-9cb7-205e3ba4c813",
"groupid": "663923c9-084b-3778-b13d-72f23d046b8d",
"parentid": "08de7c14-62af-3992-8407-28f5f053e59b",
"compression": "off",
"sync": "always",
"noofcopies": 1,
"recordsize": "4k",
"deduplication": "off",
"quota": "1T",
"unicode": "off",
"casesensitivity": "sensitive",
"readonly": false,
"nfsenabled": true,
"cifsenabled": false,
"iscsienabled": false,
"fcenabled": false,
"currentUsedSpace": 19968,
"currentAvailableSpace": 1028608,
"currentTotalSpace": 1048576,
"currentThroughput": 0,
"currentIOPS": 0,
"currentLatency": 0,
"currentThrottle": 0,
"numericquota": 1048576.0,
"status": "Online",
"managedstate": "Available",
"configurationstate": "sync",
"tsmName": "cvstsm1",
"ipaddress": "172.16.50.35",
"sitename": "BLR",
"clustername": "HAGrp1",
"controllerName": "Controller",
"hapoolname": "pool",
"hapoolgrace": true,
"tsmgrace": true,
"tsmcontrolgrace": "false",
"accountname": "CustomerA",
"groupname": "QoS_DS1cvsacc1cvstsm1",
"iops": "500",
"blocksize": "4",
"throughput": "2000",
"latency": "15",
"graceallowed": false,
"offlinenodes": "",
"tpcontrol": "true",
"iopscontrol": "true",
"tsmAvailIops": "8",
"tsmAvailTput": "32",
"iqnname": "",
"mountpoint": "cvsacc1DS1",
"pooltakeover": "noTakeOver",
"volumeaccessible": "true",
"localschedulecount": 0
}]
}}"""
# A fake list storage snapshot response of cloudbyte's elasticenter
FAKE_LIST_STORAGE_SNAPSHOTS_RESPONSE = """{ "listDatasetSnapshotsResponse" : {
"count":1 ,
"snapshot" : [{
"name": "snap_c60890b1f23646f29e6d51e6e592cee6",
"path": "DS1@snap_c60890b1f23646f29e6d51e6e592cee6",
"availMem": "-",
"usedMem": "0",
"refer": "26K",
"mountpoint": "-",
"timestamp": "Mon Jun 16 2014 14:41",
"clones": 0,
"pooltakeover": "noTakeOver",
"managedstate": "Available"
}]
}}"""
# A fake delete storage snapshot response of cloudbyte's elasticenter
FAKE_DELETE_STORAGE_SNAPSHOT_RESPONSE = """{ "deleteSnapshotResponse" : {
"DeleteSnapshot" : {
"status": "success"
}
}}"""
# A fake update volume iscsi service response of cloudbyte's elasticenter
FAKE_UPDATE_VOLUME_ISCSI_SERVICE_RESPONSE = (
"""{ "updatingvolumeiscsidetails" : {
"viscsioptions" : {
"id": "0426c04a-8fac-30e8-a8ad-ddab2f08013a",
"volume_id": "12371e7c-392b-34b9-ac43-073b3c85f1d1",
"ag_id": "4459248d-e9f1-3d2a-b7e8-b5d9ce587fc1",
"ig_id": "527bd65b-ebec-39ce-a5e9-9dd1106cc0fc",
"iqnname": "iqn.2014-06.acc1.openstacktsm:acc1DS1",
"authmethod": "None",
"status": true,
"usn": "12371e7c392b34b9ac43073b3c85f1d1",
"initialdigest": "Auto",
"queuedepth": "32",
"inqproduct": 0,
"inqrevision": 0,
"blocklength": "512B"
}}
}""")
# A fake list iscsi initiator response of cloudbyte's elasticenter
FAKE_LIST_ISCSI_INITIATOR_RESPONSE = """{ "listInitiatorsResponse" : {
"count":2 ,
"initiator" : [{
"id": "527bd65b-ebec-39ce-a5e9-9dd1106cc0fc",
"accountid": "86c5251a-9044-4690-b924-0d97627aeb8c",
"name": "ALL",
"netmask": "ALL",
"initiatorgroup": "ALL"
},{
"id": "203e0235-1d5a-3130-9204-98e3f642a564",
"accountid": "86c5251a-9044-4690-b924-0d97627aeb8c",
"name": "None",
"netmask": "None",
"initiatorgroup": "None"
}]
}}"""
# A fake delete file system response of cloudbyte's elasticenter
FAKE_DELETE_FILE_SYSTEM_RESPONSE = """{ "deleteResponse" : {
"response" : [{
"code": "0",
"description": "success"
}]
}}"""
# A fake create storage snapshot response of cloudbyte's elasticenter
FAKE_CREATE_STORAGE_SNAPSHOT_RESPONSE = (
"""{ "createStorageSnapshotResponse" : {
"StorageSnapshot" : {
"id": "21d7a92a-f15e-3f5b-b981-cb30697b8028",
"name": "snap_c60890b1f23646f29e6d51e6e592cee6",
"usn": "21d7a92af15e3f5bb981cb30697b8028",
"lunusn": "12371e7c392b34b9ac43073b3c85f1d1",
"lunid": "12371e7c-392b-34b9-ac43-073b3c85f1d1",
"scsiEnabled": false
}}
}""")
# A fake list volume iscsi service response of cloudbyte's elasticenter
FAKE_LIST_VOLUME_ISCSI_SERVICE_RESPONSE = (
"""{ "listVolumeiSCSIServiceResponse" : {
"count":1 ,
"iSCSIService" : [{
"id": "67ddcbf4-6887-3ced-8695-7b9cdffce885",
"volume_id": "c93df32e-3a99-3491-8e10-cf318a7f9b7f",
"ag_id": "4459248d-e9f1-3d2a-b7e8-b5d9ce587fc1",
"ig_id": "203e0235-1d5a-3130-9204-98e3f642a564",
"iqnname": "iqn.2014-06.acc1.openstacktsm:acc1DS1",
"authmethod": "None",
"status": true,
"usn": "92cfd601bc1f3fa78322c492099f3326",
"initialdigest": "Auto",
"queuedepth": "32",
"inqproduct": 0,
"inqrevision": 0,
"blocklength": "512B"
}]
}}""")
# A fake clone dataset snapshot response of cloudbyte's elasticenter
FAKE_CLONE_DATASET_SNAPSHOT_RESPONSE = """{ "cloneDatasetSnapshot" : {
"filesystem" : {
"id": "dcd46a57-e3f4-3fc1-8dd8-2e658d9ebb11",
"name": "DS1Snap1clone1",
"simpleid": 21,
"type": "volume",
"revisionnumber": 1,
"path": "iqn.2014-06.acc1.openstacktsm:acc1DS1Snap1clone1",
"clusterid": "0ff44329-9a69-4611-bac2-6eaf1b08bb18",
"clusterstatus": "Online",
"Tsmid": "8146146e-f67b-3942-8074-3074599207a4",
"tsmType": "1",
"accountid": "86c5251a-9044-4690-b924-0d97627aeb8c",
"poolid": "73b567c0-e57d-37b5-b765-9d70725f59af",
"controllerid": "f1603e87-d1e6-3dcb-a549-7a6e77f82d86",
"groupid": "d73662ac-6db8-3b2c-981a-012af4e2f7bd",
"parentid": "81ebdcbb-f73b-3337-8f32-222820e6acb9",
"compression": "off",
"sync": "always",
"noofcopies": 1,
"recordsize": "4k",
"deduplication": "off",
"quota": "10G",
"unicode": "off",
"casesensitivity": "sensitive",
"readonly": false,
"nfsenabled": false,
"cifsenabled": false,
"iscsienabled": true,
"fcenabled": false,
"currentUsedSpace": 0,
"currentAvailableSpace": 10240,
"currentTotalSpace": 10240,
"currentThroughput": 0,
"currentIOPS": 0,
"currentLatency": 0,
"currentThrottle": 0,
"numericquota": 10240.0,
"status": "Online",
"managedstate": "Available",
"configurationstate": "sync",
"tsmName": "openstacktsm",
"ipaddress": "20.10.22.56",
"sitename": "site1",
"clustername": "HA1",
"controllerName": "node1",
"hapoolname": "devpool1",
"hapoolgrace": true,
"tsmgrace": true,
"tsmcontrolgrace": "false",
"accountname": "acc1",
"groupname": "QoS_DS1acc1openstacktsm",
"iops": "100",
"blocksize": "4k",
"throughput": "400",
"latency": "15",
"graceallowed": false,
"offlinenodes": "",
"tpcontrol": "true",
"iopscontrol": "true",
"tsmAvailIops": "700",
"tsmAvailTput": "2800",
"iqnname": "iqn.2014-06.acc1.openstacktsm:acc1DS1Snap1clone1",
"mountpoint": "acc1DS1Snap1clone1",
"blocklength": "512B",
"volumeaccessible": "true",
"localschedulecount": 0
}
}}"""
# A fake update filesystem response of cloudbyte's elasticenter
FAKE_UPDATE_FILE_SYSTEM_RESPONSE = """{ "updatefilesystemresponse" : {
"count":1 ,
"filesystem" : [{
"id": "92cfd601-bc1f-3fa7-8322-c492099f3326",
"name": "DS1",
"simpleid": 20,
"type": "volume",
"revisionnumber": 1,
"path": "iqn.2014-06.acc1.openstacktsm:acc1DS1",
"clusterid": "0ff44329-9a69-4611-bac2-6eaf1b08bb18",
"clusterstatus": "Online",
"Tsmid": "8146146e-f67b-3942-8074-3074599207a4",
"tsmType": "1",
"accountid": "86c5251a-9044-4690-b924-0d97627aeb8c",
"poolid": "73b567c0-e57d-37b5-b765-9d70725f59af",
"controllerid": "f1603e87-d1e6-3dcb-a549-7a6e77f82d86",
"groupid": "d73662ac-6db8-3b2c-981a-012af4e2f7bd",
"parentid": "81ebdcbb-f73b-3337-8f32-222820e6acb9",
"compression": "off",
"sync": "always",
"noofcopies": 1,
"recordsize": "4k",
"deduplication": "off",
"quota": "12G",
"unicode": "off",
"casesensitivity": "sensitive",
"readonly": false,
"nfsenabled": false,
"cifsenabled": false,
"iscsienabled": true,
"fcenabled": false,
"currentUsedSpace": 0,
"currentAvailableSpace": 10240,
"currentTotalSpace": 10240,
"currentThroughput": 0,
"currentIOPS": 0,
"currentLatency": 0,
"currentThrottle": 0,
"numericquota": 12288.0,
"status": "Online",
"managedstate": "Available",
"configurationstate": "sync",
"tsmName": "openstacktsm",
"ipaddress": "20.10.22.56",
"sitename": "site1",
"clustername": "HA1",
"controllerName": "node1",
"hapoolname": "devpool1",
"hapoolgrace": true,
"tsmgrace": true,
"tsmcontrolgrace": "false",
"accountname": "acc1",
"groupname": "QoS_DS1acc1openstacktsm",
"iops": "100",
"blocksize": "4k",
"throughput": "400",
"latency": "15",
"graceallowed": false,
"offlinenodes": "",
"tpcontrol": "true",
"iopscontrol": "true",
"tsmAvailIops": "700",
"tsmAvailTput": "2800",
"iqnname": "iqn.2014-06.acc1.openstacktsm:acc1DS1",
"mountpoint": "acc1DS1",
"blocklength": "512B",
"volumeaccessible": "true",
"localschedulecount": 0
}]
}}"""
# A fake update QOS group response of cloudbyte's elasticenter
FAKE_UPDATE_QOS_GROUP_RESPONSE = """{ "updateqosresponse" : {
"count":1 ,
"qosgroup" : [{
"id": "d73662ac-6db8-3b2c-981a-012af4e2f7bd",
"name": "QoS_DS1acc1openstacktsm",
"tsmid": "8146146e-f67b-3942-8074-3074599207a4",
"controllerid": "f1603e87-d1e6-3dcb-a549-7a6e77f82d86",
"poolid": "73b567c0-e57d-37b5-b765-9d70725f59af",
"parentid": "81ebdcbb-f73b-3337-8f32-222820e6acb9",
"tsmName": "openstacktsm",
"offlinenodes": "",
"sitename": "site1",
"clustername": "HA1",
"controllerName": "node1",
"clusterstatus": "Online",
"currentThroughput": 0,
"currentIOPS": 0,
"currentLatency": 0,
"currentThrottle": 0,
"iopsvalue": "(0/101)",
"throughputvalue": "(0/404)",
"iops": "101",
"iopscontrol": "true",
"throughput": "404",
"tpcontrol": "true",
"blocksize": "4k",
"latency": "15",
"graceallowed": true,
"type": "1",
"revisionnumber": 2,
"managedstate": "Available",
"configurationstate": "sync",
"status": "Online",
"standardproviops": 0,
"operatingblocksize": 0,
"operatingcachehit": 0,
"operatingiops": 0,
"standardoperatingiops": 0
}]
}}"""
# This dict maps the http commands of elasticenter
# with its respective fake responses
MAP_COMMAND_TO_FAKE_RESPONSE = {}
MAP_COMMAND_TO_FAKE_RESPONSE['deleteFileSystem'] = (
json.loads(FAKE_DELETE_FILE_SYSTEM_RESPONSE))
MAP_COMMAND_TO_FAKE_RESPONSE["listFileSystem"] = (
json.loads(FAKE_LIST_FILE_SYSTEM_RESPONSE))
MAP_COMMAND_TO_FAKE_RESPONSE["deleteSnapshot"] = (
json.loads(FAKE_DELETE_STORAGE_SNAPSHOT_RESPONSE))
MAP_COMMAND_TO_FAKE_RESPONSE["listStorageSnapshots"] = (
json.loads(FAKE_LIST_STORAGE_SNAPSHOTS_RESPONSE))
MAP_COMMAND_TO_FAKE_RESPONSE["updateVolumeiSCSIService"] = (
json.loads(FAKE_UPDATE_VOLUME_ISCSI_SERVICE_RESPONSE))
MAP_COMMAND_TO_FAKE_RESPONSE["createStorageSnapshot"] = (
json.loads(FAKE_CREATE_STORAGE_SNAPSHOT_RESPONSE))
MAP_COMMAND_TO_FAKE_RESPONSE["listAccount"] = (
json.loads(FAKE_LIST_ACCOUNT_RESPONSE))
MAP_COMMAND_TO_FAKE_RESPONSE["listTsm"] = (
json.loads(FAKE_LIST_TSM_RESPONSE))
MAP_COMMAND_TO_FAKE_RESPONSE["addQosGroup"] = (
json.loads(FAKE_ADD_QOS_GROUP_RESPONSE))
MAP_COMMAND_TO_FAKE_RESPONSE["queryAsyncJobResult"] = (
json.loads(FAKE_QUERY_ASYNC_JOB_RESULT_RESPONSE))
MAP_COMMAND_TO_FAKE_RESPONSE["createVolume"] = (
json.loads(FAKE_CREATE_VOLUME_RESPONSE))
MAP_COMMAND_TO_FAKE_RESPONSE["listVolumeiSCSIService"] = (
json.loads(FAKE_LIST_VOLUME_ISCSI_SERVICE_RESPONSE))
MAP_COMMAND_TO_FAKE_RESPONSE["listiSCSIInitiator"] = (
json.loads(FAKE_LIST_ISCSI_INITIATOR_RESPONSE))
MAP_COMMAND_TO_FAKE_RESPONSE['cloneDatasetSnapshot'] = (
json.loads(FAKE_CLONE_DATASET_SNAPSHOT_RESPONSE))
MAP_COMMAND_TO_FAKE_RESPONSE['updateFileSystem'] = (
json.loads(FAKE_UPDATE_FILE_SYSTEM_RESPONSE))
MAP_COMMAND_TO_FAKE_RESPONSE['updateQosGroup'] = (
json.loads(FAKE_UPDATE_QOS_GROUP_RESPONSE))
MAP_COMMAND_TO_FAKE_RESPONSE['listStorageSnapshots'] = (
json.loads(FAKE_LIST_STORAGE_SNAPSHOTS_RESPONSE))
# This dict maps the http commands of elasticenter
# with its respective fake json responses
MAP_COMMAND_TO_FAKE_JSON_RESPONSE = {}
MAP_COMMAND_TO_FAKE_JSON_RESPONSE["listTsm"] = FAKE_LIST_TSM_RESPONSE
class CloudByteISCSIDriverTestCase(testtools.TestCase):
def setUp(self):
super(CloudByteISCSIDriverTestCase, self).setUp()
self._configure_driver()
def _configure_driver(self):
configuration = conf.Configuration(None, None)
# initialize the elasticenter iscsi driver
self.driver = cloudbyte.CloudByteISCSIDriver(
configuration=configuration)
# override some parts of driver configuration
self.driver.configuration.cb_tsm_name = 'openstack'
self.driver.configuration.cb_account_name = 'CustomerA'
def _side_effect_api_req(self, cmd, params, version='1.0'):
"""This is a side effect function.
The return value is determined based on cmd argument.
The signature matches exactly with the method it tries
to mock.
"""
return MAP_COMMAND_TO_FAKE_RESPONSE[cmd]
def _side_effect_api_req_to_create_vol(self, cmd, params, version='1.0'):
"""This is a side effect function."""
if cmd == 'createVolume':
return {}
return MAP_COMMAND_TO_FAKE_RESPONSE[cmd]
def _side_effect_api_req_to_query_asyncjob(
self, cmd, params, version='1.0'):
"""This is a side effect function."""
if cmd == 'queryAsyncJobResult':
return {'queryasyncjobresultresponse': {'jobstatus': 0}}
return MAP_COMMAND_TO_FAKE_RESPONSE[cmd]
def _side_effect_api_req_to_list_tsm(self, cmd, params, version='1.0'):
"""This is a side effect function."""
if cmd == 'listTsm':
return {}
return MAP_COMMAND_TO_FAKE_RESPONSE[cmd]
def _none_response_to_list_tsm(self, cmd, params, version='1.0'):
"""This is a side effect function."""
if cmd == 'listTsm':
return {"listTsmResponse": {}}
return MAP_COMMAND_TO_FAKE_RESPONSE[cmd]
def _side_effect_api_req_to_list_filesystem(
self, cmd, params, version='1.0'):
"""This is a side effect function."""
if cmd == 'listFileSystem':
return {}
return MAP_COMMAND_TO_FAKE_RESPONSE[cmd]
def _side_effect_api_req_to_list_vol_iscsi_service(
self, cmd, params, version='1.0'):
"""This is a side effect function."""
if cmd == 'listVolumeiSCSIService':
return {}
return MAP_COMMAND_TO_FAKE_RESPONSE[cmd]
def _side_effect_api_req_to_list_iscsi_initiator(
self, cmd, params, version='1.0'):
"""This is a side effect function."""
if cmd == 'listiSCSIInitiator':
return {}
return MAP_COMMAND_TO_FAKE_RESPONSE[cmd]
def _side_effect_create_vol_from_snap(self, cloned_volume, snapshot):
"""This is a side effect function."""
return {}
def _side_effect_create_snapshot(self, snapshot):
"""This is a side effect function."""
model_update = {}
model_update['provider_id'] = "devpool1/acc1openstacktsm/DS1@DS1Snap1"
return model_update
def _side_effect_get_connection(self, host, url):
"""This is a side effect function."""
return_obj = {}
return_obj['http_status'] = 200
# mock the response data
return_obj['data'] = MAP_COMMAND_TO_FAKE_RESPONSE['listTsm']
return_obj['error'] = None
return return_obj
def _side_effect_get_err_connection(self, host, url):
"""This is a side effect function."""
return_obj = {}
return_obj['http_status'] = 500
# mock the response data
return_obj['data'] = None
return_obj['error'] = "Http status: 500, Error: Elasticenter "
"is not available."
return return_obj
def _side_effect_get_err_connection2(self, host, url):
"""This is a side effect function."""
msg = ("Error executing CloudByte API %(cmd)s , Error: %(err)s" %
{'cmd': 'MockTest', 'err': 'Error'})
raise exception.VolumeBackendAPIException(msg)
def _get_fake_volume_id(self):
# Get the filesystems
fs_list = MAP_COMMAND_TO_FAKE_RESPONSE['listFileSystem']
filesystems = fs_list['listFilesystemResponse']['filesystem']
# Get the volume id from the first filesystem
volume_id = filesystems[0]['id']
return volume_id
@mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'_execute_and_get_response_details')
def test_api_request_for_cloudbyte(self, mock_conn):
# Test - I
# configure the mocks with respective side-effects
mock_conn.side_effect = self._side_effect_get_connection
# run the test
data = self.driver._api_request_for_cloudbyte('listTsm', {})
# assert the data attributes
self.assertEqual(1, data['listTsmResponse']['count'])
# Test - II
# configure the mocks with side-effects
mock_conn.reset_mock()
mock_conn.side_effect = self._side_effect_get_err_connection
# run the test
with testtools.ExpectedException(
exception.VolumeBackendAPIException,
'Bad or unexpected response from the storage volume '
'backend API: Failed to execute CloudByte API'):
self.driver._api_request_for_cloudbyte('listTsm', {})
# Test - III
# configure the mocks with side-effects
mock_conn.reset_mock()
mock_conn.side_effect = self._side_effect_get_err_connection2
# run the test
with testtools.ExpectedException(
exception.VolumeBackendAPIException,
'Error executing CloudByte API'):
self.driver._api_request_for_cloudbyte('listTsm', {})
@mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'_api_request_for_cloudbyte')
def test_delete_volume(self, mock_api_req):
# prepare the dependencies
fake_volume_id = self._get_fake_volume_id()
volume = {'id': fake_volume_id, 'provider_id': fake_volume_id}
# Test-I
mock_api_req.side_effect = self._side_effect_api_req
# run the test
self.driver.delete_volume(volume)
# assert that 2 api calls were invoked
self.assertEqual(2, mock_api_req.call_count)
# Test-II
# reset & re-configure mock
volume['provider_id'] = None
mock_api_req.reset_mock()
mock_api_req.side_effect = self._side_effect_api_req
# run the test
self.driver.delete_volume(volume)
# assert that no api calls were invoked
self.assertEqual(0, mock_api_req.call_count)
@mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'_api_request_for_cloudbyte')
def test_delete_snapshot(self, mock_api_req):
snapshot = {
'id': 'SomeID',
'provider_id': 'devpool1/acc1openstacktsm/DS1@DS1Snap1',
'display_name': 'DS1Snap1',
'volume_id': 'SomeVol',
'volume': {
'display_name': 'DS1'
}
}
# Test - I
# now run the test
self.driver.delete_snapshot(snapshot)
# assert that 1 api call was invoked
self.assertEqual(1, mock_api_req.call_count)
# Test - II
# reconfigure the dependencies
snapshot['provider_id'] = None
# reset & reconfigure the mock
mock_api_req.reset_mock()
mock_api_req.side_effect = self._side_effect_api_req
# now run the test
self.driver.delete_snapshot(snapshot)
# assert that no api calls were invoked
self.assertEqual(0, mock_api_req.call_count)
@mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'_api_request_for_cloudbyte')
def test_create_snapshot(self, mock_api_req):
# prepare the dependencies
fake_volume_id = self._get_fake_volume_id()
snapshot = {
'id': 'c60890b1-f236-46f2-9e6d-51e6e592cee6',
'display_name': 'DS1Snap1',
'volume_id': 'SomeVol',
'volume': {
'display_name': 'DS1',
'provider_id': fake_volume_id
}
}
# Test - I
# configure the mocks with respective side-effects
mock_api_req.side_effect = self._side_effect_api_req
# now run the test
model_update = self.driver.create_snapshot(snapshot)
# assert that 2 api calls were invoked
self.assertEqual(2, mock_api_req.call_count)
self.assertEqual('DS1@snap_c60890b1f23646f29e6d51e6e592cee6',
model_update['provider_id'])
# Test - II
# reconfigure the dependencies
snapshot['volume']['provider_id'] = None
# reset & reconfigure the mock
mock_api_req.reset_mock()
mock_api_req.side_effect = self._side_effect_api_req
# now run the test & assert the exception
with testtools.ExpectedException(
exception.VolumeBackendAPIException,
'Bad or unexpected response from the storage volume '
'backend API: Failed to create snapshot'):
self.driver.create_snapshot(snapshot)
# assert that no api calls were invoked
self.assertEqual(0, mock_api_req.call_count)
@mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'_api_request_for_cloudbyte')
def test_create_volume(self, mock_api_req):
# prepare the dependencies
fake_volume_id = self._get_fake_volume_id()
volume = {
'id': fake_volume_id,
'size': 22
}
# Test - I
# configure the mocks with respective side-effects
mock_api_req.side_effect = self._side_effect_api_req
# now run the test
provider_details = self.driver.create_volume(volume)
# assert equality checks for certain configuration attributes
self.assertEqual(
'openstack', self.driver.configuration.cb_tsm_name)
self.assertEqual(
'CustomerA', self.driver.configuration.cb_account_name)
self.assertThat(
provider_details['provider_location'],
matchers.Contains('172.16.50.35:3260'))
# assert that 9 api calls were invoked
self.assertEqual(9, mock_api_req.call_count)
# Test - II
# reconfigure the dependencies
volume['id'] = 'NotExists'
del volume['size']
# reset & reconfigure the mock
mock_api_req.reset_mock()
mock_api_req.side_effect = self._side_effect_api_req
# now run the test & assert the exception
with testtools.ExpectedException(
exception.VolumeBackendAPIException,
"Bad or unexpected response from the storage volume "
"backend API: Volume \[NotExists\] not found in "
"CloudByte storage."):
self.driver.create_volume(volume)
# Test - III
# reconfigure the dependencies
volume['id'] = 'abc'
# reset the mocks
mock_api_req.reset_mock()
# configure or re-configure the mocks
mock_api_req.side_effect = self._side_effect_api_req_to_create_vol
# now run the test & assert the exception
with testtools.ExpectedException(
exception.VolumeBackendAPIException,
'Bad or unexpected response from the storage volume '
'backend API: Null response received while '
'creating volume'):
self.driver.create_volume(volume)
# Test - IV
# reconfigure the dependencies
# reset the mocks
mock_api_req.reset_mock()
# configure or re-configure the mocks
mock_api_req.side_effect = self._side_effect_api_req_to_list_filesystem
# now run the test
with testtools.ExpectedException(
exception.VolumeBackendAPIException,
"Bad or unexpected response from the storage volume "
"backend API: Null response received from CloudByte's "
"list filesystem."):
self.driver.create_volume(volume)
# Test - VI
volume['id'] = fake_volume_id
# reconfigure the dependencies
# reset the mocks
mock_api_req.reset_mock()
# configure or re-configure the mocks
mock_api_req.side_effect = (
self._side_effect_api_req_to_list_vol_iscsi_service)
# now run the test
with testtools.ExpectedException(
exception.VolumeBackendAPIException,
"Bad or unexpected response from the storage volume "
"backend API: Null response received from CloudByte's "
"list volume iscsi service."):
self.driver.create_volume(volume)
# Test - VII
# reconfigure the dependencies
# reset the mocks
mock_api_req.reset_mock()
# configure or re-configure the mocks
mock_api_req.side_effect = (
self._side_effect_api_req_to_list_iscsi_initiator)
# now run the test
with testtools.ExpectedException(
exception.VolumeBackendAPIException,
"Bad or unexpected response from the storage volume "
"backend API: Null response received from CloudByte's "
"list iscsi initiators."):
self.driver.create_volume(volume)
# Test - VIII
volume['id'] = fake_volume_id
volume['size'] = 22
# reconfigure the dependencies
# reset the mocks
mock_api_req.reset_mock()
# configure or re-configure the mocks
mock_api_req.side_effect = (
self._none_response_to_list_tsm)
# now run the test
with testtools.ExpectedException(
exception.VolumeBackendAPIException,
"Bad or unexpected response from the storage volume "
"backend API: TSM \[openstack\] was not found in CloudByte "
"storage for account \[CustomerA\]."):
self.driver.create_volume(volume)
@mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'_api_request_for_cloudbyte')
@mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'create_volume_from_snapshot')
@mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'create_snapshot')
def test_create_cloned_volume(self, mock_create_snapshot,
mock_create_vol_from_snap, mock_api_req):
# prepare the input test data
fake_volume_id = self._get_fake_volume_id()
src_volume = {'display_name': 'DS1Snap1'}
cloned_volume = {
'source_volid': fake_volume_id,
'id': 'SomeNewID',
'display_name': 'CloneOfDS1Snap1'
}
# Test - I
# configure the mocks with respective sideeffects
mock_api_req.side_effect = self._side_effect_api_req
mock_create_vol_from_snap.side_effect = (
self._side_effect_create_vol_from_snap)
mock_create_snapshot.side_effect = (
self._side_effect_create_snapshot)
# now run the test
self.driver.create_cloned_volume(cloned_volume, src_volume)
# assert that n api calls were invoked
self.assertEqual(0, mock_api_req.call_count)
@mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'_api_request_for_cloudbyte')
def test_create_volume_from_snapshot(self, mock_api_req):
# prepare the input test data
fake_volume_id = self._get_fake_volume_id()
snapshot = {
'volume_id': fake_volume_id,
'provider_id': 'devpool1/acc1openstacktsm/DS1@DS1Snap1',
'id': 'SomeSnapID',
'volume': {
'provider_id': fake_volume_id
}
}
cloned_volume = {
'display_name': 'CloneOfDS1Snap1',
'id': 'ClonedVolID'
}
# Test - I
# configure the mocks with respective side-effects
mock_api_req.side_effect = self._side_effect_api_req
# now run the test
self.driver.create_volume_from_snapshot(cloned_volume, snapshot)
# assert n api calls were invoked
self.assertEqual(1, mock_api_req.call_count)
@mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'_api_request_for_cloudbyte')
def test_extend_volume(self, mock_api_req):
# prepare the input test data
fake_volume_id = self._get_fake_volume_id()
volume = {
'id': 'SomeID',
'provider_id': fake_volume_id
}
new_size = '2'
# Test - I
# configure the mock with respective side-effects
mock_api_req.side_effect = self._side_effect_api_req
# now run the test
self.driver.extend_volume(volume, new_size)
# assert n api calls were invoked
self.assertEqual(1, mock_api_req.call_count)
@mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'_api_request_for_cloudbyte')
def test_create_export(self, mock_api_req):
# prepare the input test data
# configure the mocks with respective side-effects
mock_api_req.side_effect = self._side_effect_api_req
# now run the test
model_update = self.driver.create_export({}, {})
# assert the result
self.assertEqual(None, model_update['provider_auth'])
@mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'_api_request_for_cloudbyte')
def test_ensure_export(self, mock_api_req):
# prepare the input test data
# configure the mock with respective side-effects
mock_api_req.side_effect = self._side_effect_api_req
# now run the test
model_update = self.driver.ensure_export({}, {})
# assert the result to have a provider_auth attribute
self.assertEqual(None, model_update['provider_auth'])
@mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'_api_request_for_cloudbyte')
def test_get_volume_stats(self, mock_api_req):
# prepare the input test data
# configure the mock with a side-effect
mock_api_req.side_effect = self._side_effect_api_req
# Test - I
# run the test
vol_stats = self.driver.get_volume_stats()
# assert 0 api calls were invoked
self.assertEqual(0, mock_api_req.call_count)
# Test - II
# run the test with refresh as True
vol_stats = self.driver.get_volume_stats(refresh=True)
# assert n api calls were invoked
self.assertEqual(1, mock_api_req.call_count)
# assert the result attributes with respective values
self.assertEqual(1024.0, vol_stats['total_capacity_gb'])
self.assertEqual(824.0, vol_stats['free_capacity_gb'])
self.assertEqual(0, vol_stats['reserved_percentage'])
self.assertEqual('CloudByte', vol_stats['vendor_name'])
self.assertEqual('iSCSI', vol_stats['storage_protocol'])
# Test - III
# configure the mocks with side-effect
mock_api_req.reset_mock()
mock_api_req.side_effect = self._side_effect_api_req_to_list_tsm
# run the test with refresh as True
with testtools.ExpectedException(
exception.VolumeBackendAPIException,
"Bad or unexpected response from the storage volume "
"backend API: No response was received from CloudByte "
"storage list tsm API call."):
self.driver.get_volume_stats(refresh=True)
| |
# -*- coding: utf-8 -*-
'''
Manage client ssh components
.. note::
This module requires the use of MD5 hashing. Certain security audits may
not permit the use of MD5. For those cases, this module should be disabled
or removed.
'''
from __future__ import absolute_import
# Import python libs
import binascii
import hashlib
import logging
import os
import re
import subprocess
# Import salt libs
import salt.utils
import salt.utils.files
import salt.utils.decorators as decorators
from salt.exceptions import (
SaltInvocationError,
CommandExecutionError,
)
from salt.ext.six.moves import range
log = logging.getLogger(__name__)
DEFAULT_SSH_PORT = 22
def __virtual__():
# TODO: This could work on windows with some love
if salt.utils.is_windows():
return (False, 'The module cannot be loaded on windows.')
return True
def _refine_enc(enc):
'''
Return the properly formatted ssh value for the authorized encryption key
type. ecdsa defaults to 256 bits, must give full ecdsa enc schema string
if using higher enc. If the type is not found, raise CommandExecutionError.
'''
rsa = ['r', 'rsa', 'ssh-rsa']
dss = ['d', 'dsa', 'dss', 'ssh-dss']
ecdsa = ['e', 'ecdsa', 'ecdsa-sha2-nistp521', 'ecdsa-sha2-nistp384',
'ecdsa-sha2-nistp256']
ed25519 = ['ed25519', 'ssh-ed25519']
if enc in rsa:
return 'ssh-rsa'
elif enc in dss:
return 'ssh-dss'
elif enc in ecdsa:
# ecdsa defaults to ecdsa-sha2-nistp256
# otherwise enc string is actual encoding string
if enc in ['e', 'ecdsa']:
return 'ecdsa-sha2-nistp256'
return enc
elif enc in ed25519:
return 'ssh-ed25519'
else:
raise CommandExecutionError(
'Incorrect encryption key type \'{0}\'.'.format(enc)
)
def _format_auth_line(key, enc, comment, options):
'''
Properly format user input.
'''
line = ''
if options:
line += '{0} '.format(','.join(options))
line += '{0} {1} {2}\n'.format(enc, key, comment)
return line
def _expand_authorized_keys_path(path, user, home):
'''
Expand the AuthorizedKeysFile expression. Defined in man sshd_config(5)
'''
converted_path = ''
had_escape = False
for char in path:
if had_escape:
had_escape = False
if char == '%':
converted_path += '%'
elif char == 'u':
converted_path += user
elif char == 'h':
converted_path += home
else:
error = 'AuthorizedKeysFile path: unknown token character "%{0}"'.format(char)
raise CommandExecutionError(error)
continue
elif char == '%':
had_escape = True
else:
converted_path += char
if had_escape:
error = "AuthorizedKeysFile path: Last character can't be escape character"
raise CommandExecutionError(error)
return converted_path
def _get_config_file(user, config):
'''
Get absolute path to a user's ssh_config.
'''
uinfo = __salt__['user.info'](user)
if not uinfo:
raise CommandExecutionError('User \'{0}\' does not exist'.format(user))
home = uinfo['home']
config = _expand_authorized_keys_path(config, user, home)
if not os.path.isabs(config):
config = os.path.join(home, config)
return config
def _replace_auth_key(
user,
key,
enc='ssh-rsa',
comment='',
options=None,
config='.ssh/authorized_keys'):
'''
Replace an existing key
'''
auth_line = _format_auth_line(key, enc, comment, options or [])
lines = []
full = _get_config_file(user, config)
try:
# open the file for both reading AND writing
with salt.utils.fopen(full, 'r') as _fh:
for line in _fh:
if line.startswith('#'):
# Commented Line
lines.append(line)
continue
comps = line.split()
if len(comps) < 2:
# Not a valid line
lines.append(line)
continue
key_ind = 1
if comps[0][:4:] not in ['ssh-', 'ecds']:
key_ind = 2
if comps[key_ind] == key:
lines.append(auth_line)
else:
lines.append(line)
_fh.close()
# Re-open the file writable after properly closing it
with salt.utils.fopen(full, 'w') as _fh:
# Write out any changes
_fh.writelines(lines)
except (IOError, OSError) as exc:
raise CommandExecutionError(
'Problem reading or writing to key file: {0}'.format(exc)
)
def _validate_keys(key_file):
'''
Return a dict containing validated keys in the passed file
'''
ret = {}
linere = re.compile(r'^(.*?)\s?((?:ssh\-|ecds)[\w-]+\s.+)$')
try:
with salt.utils.fopen(key_file, 'r') as _fh:
for line in _fh:
if line.startswith('#'):
# Commented Line
continue
# get "{options} key"
search = re.search(linere, line)
if not search:
# not an auth ssh key, perhaps a blank line
continue
opts = search.group(1)
comps = search.group(2).split()
if len(comps) < 2:
# Not a valid line
continue
if opts:
# It has options, grab them
options = opts.split(',')
else:
options = []
enc = comps[0]
key = comps[1]
comment = ' '.join(comps[2:])
fingerprint = _fingerprint(key)
if fingerprint is None:
continue
ret[key] = {'enc': enc,
'comment': comment,
'options': options,
'fingerprint': fingerprint}
except (IOError, OSError):
raise CommandExecutionError(
'Problem reading ssh key file {0}'.format(key_file)
)
return ret
def _fingerprint(public_key):
'''
Return a public key fingerprint based on its base64-encoded representation
The fingerprint string is formatted according to RFC 4716 (ch.4), that is,
in the form "xx:xx:...:xx"
If the key is invalid (incorrect base64 string), return None
'''
try:
raw_key = public_key.decode('base64')
except binascii.Error:
return None
ret = hashlib.md5(raw_key).hexdigest()
chunks = [ret[i:i + 2] for i in range(0, len(ret), 2)]
return ':'.join(chunks)
def _get_known_hosts_file(config=None, user=None):
if user:
config = config or '.ssh/known_hosts'
else:
config = config or '/etc/ssh/ssh_known_hosts'
if os.path.isabs(config):
full = config
else:
if user:
uinfo = __salt__['user.info'](user)
if not uinfo:
return {'status': 'error',
'error': 'User {0} does not exist'.format(user)}
full = os.path.join(uinfo['home'], config)
else:
return {
'status': 'error',
'error': 'Cannot determine absolute path to file.'
}
return full
def host_keys(keydir=None, private=True):
'''
Return the minion's host keys
CLI Example:
.. code-block:: bash
salt '*' ssh.host_keys
salt '*' ssh.host_keys keydir=/etc/ssh
salt '*' ssh.host_keys keydir=/etc/ssh private=False
'''
# TODO: support parsing sshd_config for the key directory
if not keydir:
if __grains__['kernel'] == 'Linux':
keydir = '/etc/ssh'
else:
# If keydir is None, os.listdir() will blow up
raise SaltInvocationError('ssh.host_keys: Please specify a keydir')
keys = {}
for fn_ in os.listdir(keydir):
if fn_.startswith('ssh_host_'):
if fn_.endswith('.pub') is False and private is False:
log.info(
'Skipping private key file {0} as private is set to False'
.format(fn_)
)
continue
top = fn_.split('.')
comps = top[0].split('_')
kname = comps[2]
if len(top) > 1:
kname += '.{0}'.format(top[1])
try:
with salt.utils.fopen(os.path.join(keydir, fn_), 'r') as _fh:
# As of RFC 4716 "a key file is a text file, containing a
# sequence of lines", although some SSH implementations
# (e.g. OpenSSH) manage their own format(s). Please see
# #20708 for a discussion about how to handle SSH key files
# in the future
keys[kname] = _fh.readline()
# only read the whole file if it is not in the legacy 1.1
# binary format
if keys[kname] != "SSH PRIVATE KEY FILE FORMAT 1.1\n":
keys[kname] += _fh.read()
keys[kname] = keys[kname].strip()
except (IOError, OSError):
keys[kname] = ''
return keys
def auth_keys(user=None, config='.ssh/authorized_keys'):
'''
Return the authorized keys for users
CLI Example:
.. code-block:: bash
salt '*' ssh.auth_keys
salt '*' ssh.auth_keys root
salt '*' ssh.auth_keys user=root
salt '*' ssh.auth_keys user="[user1, user2]"
'''
if not user:
user = __salt__['user.list_users']()
old_output_when_one_user = False
if not isinstance(user, list):
user = [user]
old_output_when_one_user = True
keys = {}
for u in user:
full = None
try:
full = _get_config_file(u, config)
except CommandExecutionError:
pass
if full and os.path.isfile(full):
keys[u] = _validate_keys(full)
if old_output_when_one_user:
if user[0] in keys:
return keys[user[0]]
else:
return {}
return keys
def check_key_file(user,
source,
config='.ssh/authorized_keys',
saltenv='base',
env=None):
'''
Check a keyfile from a source destination against the local keys and
return the keys to change
CLI Example:
.. code-block:: bash
salt '*' ssh.check_key_file root salt://ssh/keyfile
'''
if env is not None:
salt.utils.warn_until(
'Carbon',
'Passing a salt environment should be done using \'saltenv\' '
'not \'env\'. This functionality will be removed in Salt Carbon.'
)
# Backwards compatibility
saltenv = env
keyfile = __salt__['cp.cache_file'](source, saltenv)
if not keyfile:
return {}
s_keys = _validate_keys(keyfile)
if not s_keys:
err = 'No keys detected in {0}. Is file properly ' \
'formatted?'.format(source)
log.error(err)
__context__['ssh_auth.error'] = err
return {}
else:
ret = {}
for key in s_keys:
ret[key] = check_key(
user,
key,
s_keys[key]['enc'],
s_keys[key]['comment'],
s_keys[key]['options'],
config)
return ret
def check_key(user, key, enc, comment, options, config='.ssh/authorized_keys',
cache_keys=None):
'''
Check to see if a key needs updating, returns "update", "add" or "exists"
CLI Example:
.. code-block:: bash
salt '*' ssh.check_key <user> <key> <enc> <comment> <options>
'''
if cache_keys is None:
cache_keys = []
enc = _refine_enc(enc)
current = auth_keys(user, config)
nline = _format_auth_line(key, enc, comment, options)
# Removing existing keys from the auth_keys isn't really a good idea
# in fact
#
# as:
# - We can have non-salt managed keys in that file
# - We can have multiple states defining keys for an user
# and with such code only one state will win
# the remove all-other-keys war
#
# if cache_keys:
# for pub_key in set(current).difference(set(cache_keys)):
# rm_auth_key(user, pub_key)
if key in current:
cline = _format_auth_line(key,
current[key]['enc'],
current[key]['comment'],
current[key]['options'])
if cline != nline:
return 'update'
else:
return 'add'
return 'exists'
def rm_auth_key_from_file(user,
source,
config='.ssh/authorized_keys',
saltenv='base',
env=None):
'''
Remove an authorized key from the specified user's authorized key file,
using a file as source
CLI Example:
.. code-block:: bash
salt '*' ssh.rm_auth_key_from_file <user> salt://ssh_keys/<user>.id_rsa.pub
'''
if env is not None:
salt.utils.warn_until(
'Carbon',
'Passing a salt environment should be done using \'saltenv\' '
'not \'env\'. This functionality will be removed in Salt Carbon.'
)
# Backwards compatibility
saltenv = env
lfile = __salt__['cp.cache_file'](source, saltenv)
if not os.path.isfile(lfile):
raise CommandExecutionError(
'Failed to pull key file from salt file server'
)
s_keys = _validate_keys(lfile)
if not s_keys:
err = (
'No keys detected in {0}. Is file properly formatted?'.format(
source
)
)
log.error(err)
__context__['ssh_auth.error'] = err
return 'fail'
else:
rval = ''
for key in s_keys:
rval += rm_auth_key(
user,
key,
config
)
# Due to the ability for a single file to have multiple keys, it's
# possible for a single call to this function to have both "replace"
# and "new" as possible valid returns. I ordered the following as I
# thought best.
if 'Key not removed' in rval:
return 'Key not removed'
elif 'Key removed' in rval:
return 'Key removed'
else:
return 'Key not present'
def rm_auth_key(user, key, config='.ssh/authorized_keys'):
'''
Remove an authorized key from the specified user's authorized key file
CLI Example:
.. code-block:: bash
salt '*' ssh.rm_auth_key <user> <key>
'''
current = auth_keys(user, config)
linere = re.compile(r'^(.*?)\s?((?:ssh\-|ecds)[\w-]+\s.+)$')
if key in current:
# Remove the key
full = _get_config_file(user, config)
# Return something sensible if the file doesn't exist
if not os.path.isfile(full):
return 'Authorized keys file {0} not present'.format(full)
lines = []
try:
# Read every line in the file to find the right ssh key
# and then write out the correct one. Open the file once
with salt.utils.fopen(full, 'r') as _fh:
for line in _fh:
if line.startswith('#'):
# Commented Line
lines.append(line)
continue
# get "{options} key"
search = re.search(linere, line)
if not search:
# not an auth ssh key, perhaps a blank line
continue
comps = search.group(2).split()
if len(comps) < 2:
# Not a valid line
lines.append(line)
continue
pkey = comps[1]
# This is the key we are "deleting", so don't put
# it in the list of keys to be re-added back
if pkey == key:
continue
lines.append(line)
# Let the context manager do the right thing here and then
# re-open the file in write mode to save the changes out.
with salt.utils.fopen(full, 'w') as _fh:
_fh.writelines(lines)
except (IOError, OSError) as exc:
log.warn('Could not read/write key file: {0}'.format(str(exc)))
return 'Key not removed'
return 'Key removed'
# TODO: Should this function return a simple boolean?
return 'Key not present'
def set_auth_key_from_file(user,
source,
config='.ssh/authorized_keys',
saltenv='base',
env=None):
'''
Add a key to the authorized_keys file, using a file as the source.
CLI Example:
.. code-block:: bash
salt '*' ssh.set_auth_key_from_file <user> salt://ssh_keys/<user>.id_rsa.pub
'''
if env is not None:
salt.utils.warn_until(
'Carbon',
'Passing a salt environment should be done using \'saltenv\' '
'not \'env\'. This functionality will be removed in Salt Carbon.'
)
# Backwards compatibility
saltenv = env
# TODO: add support for pulling keys from other file sources as well
lfile = __salt__['cp.cache_file'](source, saltenv)
if not os.path.isfile(lfile):
raise CommandExecutionError(
'Failed to pull key file from salt file server'
)
s_keys = _validate_keys(lfile)
if not s_keys:
err = (
'No keys detected in {0}. Is file properly formatted?'.format(
source
)
)
log.error(err)
__context__['ssh_auth.error'] = err
return 'fail'
else:
rval = ''
for key in s_keys:
rval += set_auth_key(
user,
key,
s_keys[key]['enc'],
s_keys[key]['comment'],
s_keys[key]['options'],
config,
list(s_keys.keys())
)
# Due to the ability for a single file to have multiple keys, it's
# possible for a single call to this function to have both "replace"
# and "new" as possible valid returns. I ordered the following as I
# thought best.
if 'fail' in rval:
return 'fail'
elif 'replace' in rval:
return 'replace'
elif 'new' in rval:
return 'new'
else:
return 'no change'
def set_auth_key(
user,
key,
enc='ssh-rsa',
comment='',
options=None,
config='.ssh/authorized_keys',
cache_keys=None):
'''
Add a key to the authorized_keys file. The "key" parameter must only be the
string of text that is the encoded key. If the key begins with "ssh-rsa"
or ends with user@host, remove those from the key before passing it to this
function.
CLI Example:
.. code-block:: bash
salt '*' ssh.set_auth_key <user> '<key>' enc='dsa'
'''
if cache_keys is None:
cache_keys = []
if len(key.split()) > 1:
return 'invalid'
enc = _refine_enc(enc)
uinfo = __salt__['user.info'](user)
if not uinfo:
return 'fail'
status = check_key(user, key, enc, comment, options, config, cache_keys)
if status == 'update':
_replace_auth_key(user, key, enc, comment, options or [], config)
return 'replace'
elif status == 'exists':
return 'no change'
else:
auth_line = _format_auth_line(key, enc, comment, options)
fconfig = _get_config_file(user, config)
# Fail if the key lives under the user's homedir, and the homedir
# doesn't exist
udir = uinfo.get('home', '')
if fconfig.startswith(udir) and not os.path.isdir(udir):
return 'fail'
if not os.path.isdir(os.path.dirname(fconfig)):
dpath = os.path.dirname(fconfig)
os.makedirs(dpath)
if os.geteuid() == 0:
os.chown(dpath, uinfo['uid'], uinfo['gid'])
os.chmod(dpath, 448)
# If SELINUX is available run a restorecon on the file
rcon = salt.utils.which('restorecon')
if rcon:
cmd = [rcon, dpath]
subprocess.call(cmd)
if not os.path.isfile(fconfig):
new_file = True
else:
new_file = False
try:
with salt.utils.fopen(fconfig, 'a+') as _fh:
if new_file is False:
# Let's make sure we have a new line at the end of the file
_fh.seek(1024, 2)
if not _fh.read(1024).rstrip(' ').endswith('\n'):
_fh.seek(0, 2)
_fh.write('\n')
_fh.write('{0}'.format(auth_line))
except (IOError, OSError) as exc:
msg = 'Could not write to key file: {0}'
raise CommandExecutionError(msg.format(str(exc)))
if new_file:
if os.geteuid() == 0:
os.chown(fconfig, uinfo['uid'], uinfo['gid'])
os.chmod(fconfig, 384)
# If SELINUX is available run a restorecon on the file
rcon = salt.utils.which('restorecon')
if rcon:
cmd = [rcon, fconfig]
subprocess.call(cmd)
return 'new'
def _parse_openssh_output(lines):
'''
Helper function which parses ssh-keygen -F and ssh-keyscan function output
and yield dict with keys information, one by one.
'''
for line in lines:
if line.startswith('#'):
continue
try:
hostname, enc, key = line.split()
except ValueError: # incorrect format
continue
fingerprint = _fingerprint(key)
if not fingerprint:
continue
yield {'hostname': hostname, 'key': key, 'enc': enc,
'fingerprint': fingerprint}
@decorators.which('ssh-keygen')
def get_known_host(user, hostname, config=None, port=None):
'''
Return information about known host from the configfile, if any.
If there is no such key, return None.
CLI Example:
.. code-block:: bash
salt '*' ssh.get_known_host <user> <hostname>
'''
full = _get_known_hosts_file(config=config, user=user)
if isinstance(full, dict):
return full
ssh_hostname = _hostname_and_port_to_ssh_hostname(hostname, port)
cmd = ['ssh-keygen', '-F', ssh_hostname, '-f', full]
lines = __salt__['cmd.run'](cmd,
ignore_retcode=True,
python_shell=False).splitlines()
known_hosts = list(_parse_openssh_output(lines))
return known_hosts[0] if known_hosts else None
@decorators.which('ssh-keyscan')
def recv_known_host(hostname,
enc=None,
port=None,
hash_hostname=True,
hash_known_hosts=True,
timeout=5):
'''
Retrieve information about host public key from remote server
hostname
The name of the remote host (e.g. "github.com")
enc
Defines what type of key is being used, can be ed25519, ecdsa ssh-rsa
or ssh-dss
port
optional parameter, denoting the port of the remote host, which will be
used in case, if the public key will be requested from it. By default
the port 22 is used.
hash_hostname : True
Hash all hostnames and addresses in the known hosts file.
.. deprecated:: Carbon
Please use hash_known_hosts instead.
hash_known_hosts : True
Hash all hostnames and addresses in the known hosts file.
timeout : int
Set the timeout for connection attempts. If ``timeout`` seconds have
elapsed since a connection was initiated to a host or since the last
time anything was read from that host, then the connection is closed
and the host in question considered unavailable. Default is 5 seconds.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' ssh.recv_known_host <hostname> enc=<enc> port=<port>
'''
if not hash_hostname:
salt.utils.warn_until(
'Carbon',
'The hash_hostname parameter is misleading as ssh-keygen can only '
'hash the whole known hosts file, not entries for individual '
'hosts. Please use hash_known_hosts=False instead.')
hash_known_hosts = hash_hostname
# The following list of OSes have an old version of openssh-clients
# and thus require the '-t' option for ssh-keyscan
need_dash_t = ('CentOS-5',)
cmd = ['ssh-keyscan']
if port:
cmd.extend(['-p', port])
if enc:
cmd.extend(['-t', enc])
if not enc and __grains__.get('osfinger') in need_dash_t:
cmd.extend(['-t', 'rsa'])
if hash_known_hosts:
cmd.append('-H')
cmd.extend(['-T', str(timeout)])
cmd.append(hostname)
lines = None
attempts = 5
while not lines and attempts > 0:
attempts = attempts - 1
lines = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
known_hosts = list(_parse_openssh_output(lines))
return known_hosts[0] if known_hosts else None
def check_known_host(user=None, hostname=None, key=None, fingerprint=None,
config=None, port=None):
'''
Check the record in known_hosts file, either by its value or by fingerprint
(it's enough to set up either key or fingerprint, you don't need to set up
both).
If provided key or fingerprint doesn't match with stored value, return
"update", if no value is found for a given host, return "add", otherwise
return "exists".
If neither key, nor fingerprint is defined, then additional validation is
not performed.
CLI Example:
.. code-block:: bash
salt '*' ssh.check_known_host <user> <hostname> key='AAAA...FAaQ=='
'''
if not hostname:
return {'status': 'error',
'error': 'hostname argument required'}
if not user:
config = config or '/etc/ssh/ssh_known_hosts'
else:
config = config or '.ssh/known_hosts'
known_host = get_known_host(user, hostname, config=config, port=port)
if not known_host or 'fingerprint' not in known_host:
return 'add'
if key:
return 'exists' if key == known_host['key'] else 'update'
elif fingerprint:
return ('exists' if fingerprint == known_host['fingerprint']
else 'update')
else:
return 'exists'
def rm_known_host(user=None, hostname=None, config=None, port=None):
'''
Remove all keys belonging to hostname from a known_hosts file.
CLI Example:
.. code-block:: bash
salt '*' ssh.rm_known_host <user> <hostname>
'''
if not hostname:
return {'status': 'error',
'error': 'hostname argument required'}
full = _get_known_hosts_file(config=config, user=user)
if isinstance(full, dict):
return full
if not os.path.isfile(full):
return {'status': 'error',
'error': 'Known hosts file {0} does not exist'.format(full)}
ssh_hostname = _hostname_and_port_to_ssh_hostname(hostname, port)
cmd = ['ssh-keygen', '-R', ssh_hostname, '-f', full]
cmd_result = __salt__['cmd.run'](cmd, python_shell=False)
# ssh-keygen creates a new file, thus a chown is required.
if os.geteuid() == 0 and user:
uinfo = __salt__['user.info'](user)
os.chown(full, uinfo['uid'], uinfo['gid'])
return {'status': 'removed', 'comment': cmd_result}
def set_known_host(user=None,
hostname=None,
fingerprint=None,
key=None,
port=None,
enc=None,
hash_hostname=True,
config=None,
hash_known_hosts=True,
timeout=5):
'''
Download SSH public key from remote host "hostname", optionally validate
its fingerprint against "fingerprint" variable and save the record in the
known_hosts file.
If such a record does already exists in there, do nothing.
user
The user who owns the ssh authorized keys file to modify
hostname
The name of the remote host (e.g. "github.com")
fingerprint
The fingerprint of the key which must be presented in the known_hosts
file (optional if key specified)
key
The public key which must be presented in the known_hosts file
(optional if fingerprint specified)
port
optional parameter, denoting the port of the remote host, which will be
used in case, if the public key will be requested from it. By default
the port 22 is used.
enc
Defines what type of key is being used, can be ed25519, ecdsa ssh-rsa
or ssh-dss
hash_hostname : True
Hash all hostnames and addresses in the known hosts file.
.. deprecated:: Carbon
Please use hash_known_hosts instead.
config
The location of the authorized keys file relative to the user's home
directory, defaults to ".ssh/known_hosts". If no user is specified,
defaults to "/etc/ssh/ssh_known_hosts". If present, must be an
absolute path when a user is not specified.
hash_known_hosts : True
Hash all hostnames and addresses in the known hosts file.
timeout : int
Set the timeout for connection attempts. If ``timeout`` seconds have
elapsed since a connection was initiated to a host or since the last
time anything was read from that host, then the connection is closed
and the host in question considered unavailable. Default is 5 seconds.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' ssh.set_known_host <user> fingerprint='xx:xx:..:xx' enc='ssh-rsa' config='.ssh/known_hosts'
'''
if not hostname:
return {'status': 'error',
'error': 'hostname argument required'}
if not hash_hostname:
salt.utils.warn_until(
'Carbon',
'The hash_hostname parameter is misleading as ssh-keygen can only '
'hash the whole known hosts file, not entries for individual '
'hosts. Please use hash_known_hosts=False instead.')
hash_known_hosts = hash_hostname
if port is not None and port != DEFAULT_SSH_PORT and hash_known_hosts:
return {'status': 'error',
'error': 'argument port can not be used in '
'conjunction with argument hash_known_hosts'}
update_required = False
check_required = False
stored_host = get_known_host(user, hostname, config, port)
if not stored_host:
update_required = True
elif fingerprint and fingerprint != stored_host['fingerprint']:
update_required = True
elif key and key != stored_host['key']:
update_required = True
elif key != stored_host['key']:
check_required = True
if not update_required and not check_required:
return {'status': 'exists', 'key': stored_host['key']}
if not key:
remote_host = recv_known_host(hostname,
enc=enc,
port=port,
hash_known_hosts=hash_known_hosts,
timeout=timeout)
if not remote_host:
return {'status': 'error',
'error': 'Unable to receive remote host key'}
if fingerprint and fingerprint != remote_host['fingerprint']:
return {'status': 'error',
'error': ('Remote host public key found but its fingerprint '
'does not match one you have provided')}
if check_required:
if remote_host['key'] == stored_host['key']:
return {'status': 'exists', 'key': stored_host['key']}
# remove everything we had in the config so far
rm_known_host(user, hostname, config=config)
# set up new value
full = _get_known_hosts_file(config=config, user=user)
if isinstance(full, dict):
return full
if key:
remote_host = {'hostname': hostname, 'enc': enc, 'key': key}
if hash_known_hosts or port in [DEFAULT_SSH_PORT, None] or ':' in remote_host['hostname']:
line = '{hostname} {enc} {key}\n'.format(**remote_host)
else:
remote_host['port'] = port
line = '[{hostname}]:{port} {enc} {key}\n'.format(**remote_host)
# ensure ~/.ssh exists
ssh_dir = os.path.dirname(full)
if user:
uinfo = __salt__['user.info'](user)
try:
log.debug('Ensuring ssh config dir "{0}" exists'.format(ssh_dir))
os.makedirs(ssh_dir)
except OSError as exc:
if exc.args[1] == 'Permission denied':
log.error('Unable to create directory {0}: '
'{1}'.format(ssh_dir, exc.args[1]))
elif exc.args[1] == 'File exists':
log.debug('{0} already exists, no need to create '
'it'.format(ssh_dir))
else:
# set proper ownership/permissions
if user:
os.chown(ssh_dir, uinfo['uid'], uinfo['gid'])
os.chmod(ssh_dir, 0o700)
# write line to known_hosts file
try:
with salt.utils.fopen(full, 'a') as ofile:
ofile.write(line)
except (IOError, OSError) as exception:
raise CommandExecutionError(
"Couldn't append to known hosts file: '{0}'".format(exception)
)
if os.geteuid() == 0 and user:
os.chown(full, uinfo['uid'], uinfo['gid'])
os.chmod(full, 0o644)
if key and hash_known_hosts:
cmd_result = __salt__['ssh.hash_known_hosts'](user=user, config=full)
return {'status': 'updated', 'old': stored_host, 'new': remote_host}
def user_keys(user=None, pubfile=None, prvfile=None):
'''
Return the user's ssh keys on the minion
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' ssh.user_keys
salt '*' ssh.user_keys user=user1
salt '*' ssh.user_keys user=user1 pubfile=/home/user1/.ssh/id_rsa.pub prvfile=/home/user1/.ssh/id_rsa
salt '*' ssh.user_keys user=user1 prvfile=False
salt '*' ssh.user_keys user="['user1','user2'] pubfile=id_rsa.pub prvfile=id_rsa
As you can see you can tell Salt not to read from the user's private (or
public) key file by setting the file path to ``False``. This can be useful
to prevent Salt from publishing private data via Salt Mine or others.
'''
if not user:
user = __salt__['user.list_users']()
if not isinstance(user, list):
# only one so convert to list
user = [user]
keys = {}
for u in user:
keys[u] = {}
userinfo = __salt__['user.info'](u)
if 'home' not in userinfo:
# no home directory, skip
continue
userKeys = []
if pubfile:
userKeys.append(pubfile)
elif pubfile is not False:
# Add the default public keys
userKeys += ['id_rsa.pub', 'id_dsa.pub',
'id_ecdsa.pub', 'id_ed25519.pub']
if prvfile:
userKeys.append(prvfile)
elif prvfile is not False:
# Add the default private keys
userKeys += ['id_rsa', 'id_dsa', 'id_ecdsa', 'id_ed25519']
for key in userKeys:
if key.startswith('/'):
keyname = os.path.basename(key)
fn_ = key
else:
# if not full path, assume key is in .ssh
# in user's home directory
keyname = key
fn_ = '{0}/.ssh/{1}'.format(userinfo['home'], key)
if os.path.exists(fn_):
try:
with salt.utils.fopen(fn_, 'r') as _fh:
keys[u][keyname] = ''.join(_fh.readlines())
except (IOError, OSError):
pass
# clean up any empty items
_keys = {}
for key in keys:
if keys[key]:
_keys[key] = keys[key]
return _keys
@decorators.which('ssh-keygen')
def hash_known_hosts(user=None, config=None):
'''
Hash all the hostnames in the known hosts file.
.. versionadded:: 2014.7.0
user
hash known hosts of this user
config
path to known hosts file: can be absolute or relative to user's home
directory
CLI Example:
.. code-block:: bash
salt '*' ssh.hash_known_hosts
'''
full = _get_known_hosts_file(config=config, user=user)
if isinstance(full, dict):
return full # full contains error information
if not os.path.isfile(full):
return {'status': 'error',
'error': 'Known hosts file {0} does not exist'.format(full)}
cmd = ['ssh-keygen', '-H', '-f', full]
cmd_result = __salt__['cmd.run'](cmd, python_shell=False)
# ssh-keygen creates a new file, thus a chown is required.
if os.geteuid() == 0 and user:
uinfo = __salt__['user.info'](user)
os.chown(full, uinfo['uid'], uinfo['gid'])
return {'status': 'updated', 'comment': cmd_result}
def _hostname_and_port_to_ssh_hostname(hostname, port=DEFAULT_SSH_PORT):
if not port or port == DEFAULT_SSH_PORT:
return hostname
else:
return '[{0}]:{1}'.format(hostname, port)
def key_is_encrypted(key):
'''
.. versionadded:: 2015.8.7
Function to determine whether or not a private key is encrypted with a
passphrase.
Checks key for a ``Proc-Type`` header with ``ENCRYPTED`` in the value. If
found, returns ``True``, otherwise returns ``False``.
CLI Example:
.. code-block:: bash
salt '*' ssh.key_is_encrypted /root/id_rsa
'''
try:
with salt.utils.fopen(key, 'r') as fp_:
key_data = fp_.read()
except (IOError, OSError) as exc:
# Raise a CommandExecutionError
salt.utils.files.process_read_exception(exc, key)
is_private_key = re.search(r'BEGIN (?:\w+\s)*PRIVATE KEY', key_data)
is_encrypted = 'ENCRYPTED' in key_data
del key_data
if not is_private_key:
raise CommandExecutionError('{0} is not a private key'.format(key))
return is_encrypted
| |
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import optparse
import platform
import re
import sys
from event_log import EventLog
from error import NoSuchProjectError
from error import InvalidProjectGroupsError
class Command(object):
"""Base class for any command line action in repo.
"""
common = False
event_log = EventLog()
manifest = None
_optparse = None
def WantPager(self, _opt):
return False
def ReadEnvironmentOptions(self, opts):
""" Set options from environment variables. """
env_options = self._RegisteredEnvironmentOptions()
for env_key, opt_key in env_options.items():
# Get the user-set option value if any
opt_value = getattr(opts, opt_key)
# If the value is set, it means the user has passed it as a command
# line option, and we should use that. Otherwise we can try to set it
# with the value from the corresponding environment variable.
if opt_value is not None:
continue
env_value = os.environ.get(env_key)
if env_value is not None:
setattr(opts, opt_key, env_value)
return opts
@property
def OptionParser(self):
if self._optparse is None:
try:
me = 'repo %s' % self.NAME
usage = self.helpUsage.strip().replace('%prog', me)
except AttributeError:
usage = 'repo %s' % self.NAME
self._optparse = optparse.OptionParser(usage=usage)
self._Options(self._optparse)
return self._optparse
def _Options(self, p):
"""Initialize the option parser.
"""
def _RegisteredEnvironmentOptions(self):
"""Get options that can be set from environment variables.
Return a dictionary mapping environment variable name
to option key name that it can override.
Example: {'REPO_MY_OPTION': 'my_option'}
Will allow the option with key value 'my_option' to be set
from the value in the environment variable named 'REPO_MY_OPTION'.
Note: This does not work properly for options that are explicitly
set to None by the user, or options that are defined with a
default value other than None.
"""
return {}
def Usage(self):
"""Display usage and terminate.
"""
self.OptionParser.print_usage()
sys.exit(1)
def Execute(self, opt, args):
"""Perform the action, after option parsing is complete.
"""
raise NotImplementedError
def _ResetPathToProjectMap(self, projects):
self._by_path = dict((p.worktree, p) for p in projects)
def _UpdatePathToProjectMap(self, project):
self._by_path[project.worktree] = project
def _GetProjectByPath(self, manifest, path):
project = None
if os.path.exists(path):
oldpath = None
while path and \
path != oldpath and \
path != manifest.topdir:
try:
project = self._by_path[path]
break
except KeyError:
oldpath = path
path = os.path.dirname(path)
if not project and path == manifest.topdir:
try:
project = self._by_path[path]
except KeyError:
pass
else:
try:
project = self._by_path[path]
except KeyError:
pass
return project
def GetProjects(self, args, manifest=None, groups='', missing_ok=False,
submodules_ok=False):
"""A list of projects that match the arguments.
"""
if not manifest:
manifest = self.manifest
all_projects_list = manifest.projects
result = []
mp = manifest.manifestProject
if not groups:
groups = mp.config.GetString('manifest.groups')
if not groups:
groups = 'default,platform-' + platform.system().lower()
groups = [x for x in re.split(r'[,\s]+', groups) if x]
if not args:
derived_projects = {}
for project in all_projects_list:
if submodules_ok or project.sync_s:
derived_projects.update((p.name, p)
for p in project.GetDerivedSubprojects())
all_projects_list.extend(derived_projects.values())
for project in all_projects_list:
if (missing_ok or project.Exists) and project.MatchesGroups(groups):
result.append(project)
else:
self._ResetPathToProjectMap(all_projects_list)
for arg in args:
projects = manifest.GetProjectsWithName(arg)
if not projects:
path = os.path.abspath(arg).replace('\\', '/')
project = self._GetProjectByPath(manifest, path)
# If it's not a derived project, update path->project mapping and
# search again, as arg might actually point to a derived subproject.
if (project and not project.Derived and (submodules_ok or
project.sync_s)):
search_again = False
for subproject in project.GetDerivedSubprojects():
self._UpdatePathToProjectMap(subproject)
search_again = True
if search_again:
project = self._GetProjectByPath(manifest, path) or project
if project:
projects = [project]
if not projects:
raise NoSuchProjectError(arg)
for project in projects:
if not missing_ok and not project.Exists:
raise NoSuchProjectError(arg)
if not project.MatchesGroups(groups):
raise InvalidProjectGroupsError(arg)
result.extend(projects)
def _getpath(x):
return x.relpath
result.sort(key=_getpath)
return result
def FindProjects(self, args, inverse=False):
result = []
patterns = [re.compile(r'%s' % a, re.IGNORECASE) for a in args]
for project in self.GetProjects(''):
for pattern in patterns:
match = pattern.search(project.name) or pattern.search(project.relpath)
if not inverse and match:
result.append(project)
break
if inverse and match:
break
else:
if inverse:
result.append(project)
result.sort(key=lambda project: project.relpath)
return result
# pylint: disable=W0223
# Pylint warns that the `InteractiveCommand` and `PagedCommand` classes do not
# override method `Execute` which is abstract in `Command`. Since that method
# is always implemented in classes derived from `InteractiveCommand` and
# `PagedCommand`, this warning can be suppressed.
class InteractiveCommand(Command):
"""Command which requires user interaction on the tty and
must not run within a pager, even if the user asks to.
"""
def WantPager(self, _opt):
return False
class PagedCommand(Command):
"""Command which defaults to output in a pager, as its
display tends to be larger than one screen full.
"""
def WantPager(self, _opt):
return True
# pylint: enable=W0223
class MirrorSafeCommand(object):
"""Command permits itself to run within a mirror,
and does not require a working directory.
"""
class GitcAvailableCommand(object):
"""Command that requires GITC to be available, but does
not require the local client to be a GITC client.
"""
class GitcClientCommand(object):
"""Command that requires the local client to be a GITC
client.
"""
| |
#!@PYTHON_EXECUTABLE@
#ckwg +28
# Copyright 2011-2013 by Kitware, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither name of Kitware, Inc. nor the names of any contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def test_import():
try:
import sprokit.pipeline.process
except:
test_error("Failed to import the process module")
def test_create():
from sprokit.pipeline import datum
from sprokit.pipeline import process
process.ProcessType()
process.ProcessTypes()
process.ProcessName()
process.ProcessNames()
process.ProcessProperty()
process.ProcessProperties()
process.PortDescription()
process.PortFrequency(1)
process.PortFrequency(1, 1)
process.Port()
process.Ports()
process.PortType()
process.PortFlag()
process.PortFlags()
process.PortAddr()
process.PortAddrs()
process.Connection()
process.Connections()
process.PortInfo('type', process.PortFlags(), 'desc', process.PortFrequency(1, 1))
process.ConfInfo('default', 'desc', False)
process.DataInfo(True, datum.DatumType.invalid)
process.DataCheck.none
process.DataCheck.sync
process.DataCheck.valid
def test_api_calls():
from sprokit.pipeline import datum
from sprokit.pipeline import process
a = process.PortAddr()
a.process
a.port
a.process = ''
a.port = ''
f = process.PortFrequency(1, 1)
a = process.PortInfo('type', process.PortFlags(), 'desc', f)
a.type
a.flags
a.description
a.frequency
a = process.ConfInfo('default', 'desc', False)
a.default
a.description
a.tunable
a = process.DataInfo(True, datum.DatumType.invalid)
a.in_sync
a.max_status
process.PythonProcess.property_no_threads
process.PythonProcess.property_no_reentrancy
process.PythonProcess.property_unsync_input
process.PythonProcess.property_unsync_output
process.PythonProcess.port_heartbeat
process.PythonProcess.config_name
process.PythonProcess.config_type
process.PythonProcess.type_any
process.PythonProcess.type_none
process.PythonProcess.type_data_dependent
process.PythonProcess.type_flow_dependent
process.PythonProcess.flag_output_const
process.PythonProcess.flag_output_shared
process.PythonProcess.flag_input_static
process.PythonProcess.flag_input_mutable
process.PythonProcess.flag_input_nodep
process.PythonProcess.flag_required
def test_flags_as_set():
from sprokit.pipeline import process
# TODO: Make tests more rigorous (check more than just len()).
a = process.PortFlags()
# adding to the set
a.add(process.PythonProcess.flag_required)
a.add(process.PythonProcess.flag_input_mutable)
a.add(process.PythonProcess.flag_input_nodep)
a.add(process.PythonProcess.flag_input_static)
# length
if not len(a) == 4:
test_error("len() does not work: expected 4, got %d" % len(a))
# adding duplicate values
a.add(process.PythonProcess.flag_required)
if not len(a) == 4:
test_error(".add() added a duplicate item: expected 4, got %d" % len(a))
# adding invalid objects
expect_exception('adding a value of an invalid type', TypeError,
process.PortFlags.add, a, True),
# indexing failures
expect_exception('getting an item by index', TypeError,
process.PortFlags.__getitem__, a, 0)
expect_exception('deleting an item by index', TypeError,
process.PortFlags.__delitem__, a, 0)
expect_exception('setting an item by index', TypeError,
process.PortFlags.__setitem__, a, 0, process.PythonProcess.flag_input_mutable)
# 'in' keyword
if process.PythonProcess.flag_required not in a:
test_error("a value in the set is 'not in' the set")
if process.PythonProcess.flag_output_const in a:
test_error("a value not in the set is 'in' the set")
# iteration
for value in a:
pass
# boolean casting
if not a:
test_error("a non-empty set is False-like")
b = process.PortFlags()
if b:
test_error("an empty set is True-like")
# removal
expect_exception('.pop() on an empty set', KeyError,
process.PortFlags.pop, b)
expect_exception('.remove() with an item that does not exist in the set', KeyError,
process.PortFlags.remove, a, process.PythonProcess.flag_output_const)
a.discard(process.PythonProcess.flag_output_const)
if not len(a) == 4:
test_error(".discard() removed an item not in the set")
a.discard(process.PythonProcess.flag_input_static)
if not len(a) == 3:
test_error(".discard() did not remove an item from the set")
a.remove(process.PythonProcess.flag_input_nodep)
if not len(a) == 2:
test_error(".remove() did not remove an item from the set")
a.pop()
if not len(a) == 1:
test_error(".pop() did not remove an item from the set")
a.clear()
if a:
test_error(".clear() did not make a False-like set")
# copy
b.add(process.PythonProcess.flag_required)
c = b.copy()
b.clear()
if not c:
test_error(".clear() on a set modified a set created using .copy()")
c = b.copy()
b.add(process.PythonProcess.flag_required)
if c:
test_error(".add() on a set modified a set created using .copy()")
# set vs. set queries
a.add(process.PythonProcess.flag_input_nodep)
a.add(process.PythonProcess.flag_input_static)
if not b.isdisjoint(a):
test_error(".isdisjoint() does not work")
if b.issubset(a):
test_error(".issubset() does not work")
if a.issuperset(b):
test_error(".issuperset() does not work")
a.add(process.PythonProcess.flag_required)
if b.isdisjoint(a):
test_error(".isdisjoint() does not work")
if not b.issubset(a):
test_error(".issubset() does not work")
if not a.issuperset(b):
test_error(".issuperset() does not work")
u = a.union(b)
if not len(u) == 3:
test_error(".union() does not work: expected 3, got %d" % len(u))
d = a.difference(b)
if not len(d) == 2:
test_error(".difference() does not work: expected 2, got %d" % len(d))
i = a.intersection(b)
if not len(i) == 1:
test_error(".intersection() does not work: expected 1, got %d" % len(i))
b.add(process.PythonProcess.flag_output_const)
s = a.symmetric_difference(b)
if not len(s) == 3:
test_error(".symmetric_difference() does not work: expected 3, got %d" % len(s))
a.update(b)
if not len(a) == 4:
test_error(".update() does not work: expected 4, got %d" % len(a))
if __name__ == '__main__':
import os
import sys
if not len(sys.argv) == 4:
test_error("Expected three arguments")
sys.exit(1)
testname = sys.argv[1]
os.chdir(sys.argv[2])
sys.path.append(sys.argv[3])
from sprokit.test.test import *
run_test(testname, find_tests(locals()))
| |
"""
author Talha Oz
this is the main code implemented for cs780 class project
"""
# -*- coding: utf-8 -*-
import pandas as p
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
import re
from sklearn import cross_validation
import preprocessing as pre
from sklearn.naive_bayes import MultinomialNB
from sklearn.decomposition import PCA
from sklearn.multiclass import OneVsRestClassifier
from sklearn import linear_model
import matplotlib.pyplot as plt
from variableNames import *
import scipy.sparse
from sklearn.linear_model import SGDRegressor
from sklearn.linear_model import SGDClassifier
from sklearn.neighbors import KNeighborsRegressor
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_selection import SelectKBest, chi2
"""
from sklearn.svm import SVC
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.linear_model import Perceptron
from sklearn import svm
from sklearn.preprocessing import LabelBinarizer
import matplotlib.pyplot as plt
from scipy import sparse
from sklearn import decomposition
from sklearn.ensemble import ExtraTreesRegressor
import sklearn.decomposition as deco
import argparse
from sklearn.svm import SVC
%autoreload 2
"""
def plotClasses(y):
"""
each class is counted by its weight, not # of nonzero occurrences
"""
fig = plt.figure()
ax = plt.subplot(1,1,1)
x1 = range(y.shape[1])
y1 = [sum(y[:,a]) for a in range(y.shape[1])]
width = 0.8
labels = "s1,s2,s3,s4,s5,w1,w2,w3,w4,k1,k2,k3,k4,k5,k6,k7,k8,k9,k10,k11,k12,k13,k14,k15".split(',')
if y.shape[1] == 5:
labels = labels[:5]
elif y.shape[1] == 4:
labels = labels[5:9]
else:
labels = labels[9:]
plt.xticks(np.arange(y.shape[1])+width/2,labels)
legendkeys = tuple([k for k,v in legend.items() if k in labels])
legendvalues= tuple([v for k,v in legend.items() if k in labels])
[ax.bar(X,Y,width=width,label=k+' '+v) for X,Y,k,v in zip(x1,y1,legendkeys,legendvalues)]
# Shink current axis by 20%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Put a legend to the right of the current axis
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
#ax.legend(('1','2'),('1a','2a'))#legendkeys,legendvalues
plt.show()
def vectorize(y,train,t2,model,kbest=0,is_tfidf=True,is_sparse=True,max_features=None,is_cv=False,perlabel=False,min_df=5,is_nominal=False,is_normal=False,is_LSA=False,scale=False):
if is_cv:
X_train, X_test, y_train, y_true = cross_validation.train_test_split(train['tweet'], y, test_size=.20, random_state = 0)
# X_train1, X_test1, y_train, y_true = cross_validation.train_test_split(train['tweet'], y, test_size=.20, random_state = 0)
# X_train2, X_test2, y_train, y_true = cross_validation.train_test_split(train['state'], y, test_size=.20, random_state = 0)
# X_train3, X_test3, y_train, y_true = cross_validation.train_test_split(train['location'], y, test_size=.20, random_state = 0)
# X_train = np.hstack((X_train1,X_train2,X_train3))
# X_test = np.hstack((X_test1,X_test2,X_test3))
else:
X_train = train['tweet']
X_test = t2['tweet']
y_train = y
# if (y_train.shape[1] > 6):
# model = linear_model.Ridge (alpha = 3.0, normalize = False)
# if is_PCA:
# modelPCA = PCA(n_components='mle')
# model.fit(X_train)
if is_tfidf:
#tfidf = TfidfVectorizer(max_features=max_features, strip_accents='unicode', analyzer='word', smooth_idf=True,sublinear_tf=True,max_df=0.5,min_df=min_df,ngram_range=(1,2),use_idf=True)
tfidf = TfidfVectorizer(max_features=max_features,strip_accents='unicode', analyzer='word', smooth_idf=True,sublinear_tf=True,max_df=0.5,min_df=5,ngram_range=(1,2),use_idf=True)
#tfidf.fit(np.hstack((X_train,X_test))) #fitting on the whole data resulted in a worse mse score
tfidf.fit(X_train)
X_train = tfidf.transform(X_train)
X_test = tfidf.transform(X_test)
if is_LSA:
LSA = TruncatedSVD(n_components=10000, algorithm='randomized', n_iter=5, random_state=0, tol=0.0)
X_train = LSA.fit_transform(X_train)
X_test = LSA.transform(X_test)
else:
vectorizer = CountVectorizer( binary = True )
X_train = vectorizer.fit_transform(X_train)
X_test = vectorizer.transform(X_test)
if is_nominal:
if (y_train.shape[1] < 16):
y_rest = y_train.copy()
X_train_list = []
y_weight_list = []
y_train_list = []
for i in range(y_rest.shape[1]):
X_train_list.append(X_train) # repeat X to matchup
y_weight_list.append(np.apply_along_axis(lambda a: a.max(), 1, y_rest)) # get the maximum in y_rest
y_train_list.append(np.apply_along_axis(lambda a: a.argmax(), 1, y_rest).astype(int)) # get the position of the maximum in y_rest
y_rest = np.apply_along_axis(lambda a: [0 if i == a.argmax() else x for i,x in enumerate(a)], 1, y_rest) #set maximum to zero
# y_weight = np.concatenate((y_weight, np.apply_along_axis(lambda a: a.max(), 1, y_rest)))
# y_train = np.concatenate((y_train, np.apply_along_axis(lambda a: a.argmax(), 1, y_rest).astype(int)))
# y_train = np.apply_along_axis(lambda a: [np.floor(x) if x != a.max() else 1 for x in a], 1, y_train).astype(bool)
not_kind = True
X_train = scipy.sparse.vstack(X_train_list)
y_train = np.concatenate(y_train_list)
y_weight = np.concatenate(y_weight_list)
else:
not_kind = False
#y_train = np.apply_along_axis(lambda a: [np.floor(x) if i != a.argmax() else 1 for i,x in enumerate(a)], 1, y_train).astype(bool)
#y_train = np.ceil(y_train).astype(bool)
#y_weight = y_train.copy()
if perlabel:
test_prediction=[]
for i in range(y_train.shape[1]):
if is_nominal:
model.fit(X_train,y_train[:,i]) #sample_weight=y_weight[:,i]
pred = model.predict_proba(X_test)
# pred = model.predict_log_proba(X_test) # for log in SGDRegressor
print pred.shape
test_prediction.append(pred)
else:
model.fit(X_train,y_train[:,i])
test_prediction.append(model.predict(X_test))
pred = np.array(test_prediction).T
if kbest:
ch2 = SelectKBest(chi2,kbest,k=1000)
#yb = y_train
yb = np.around(y_train).astype(bool)
X_train = ch2.fit_transform(X_train, yb)
X_test = ch2.transform(X_test)
if not is_sparse:
X_train = X_train.toarray()
X_test = X_test.toarray()
#nmf = decomposition.NMF(n_components=y_train.shape[1]).fit(tfidf)
#cca = CCA(n_components=100)
#X_train = cca.fit_transform(X_train)
#X_test = cca.transform(X_test)
if not perlabel:
if is_nominal and not_kind:
model.fit(X_train, y_train,sample_weight=y_weight)
pred = model.predict_proba(X_test)
#model.fit(X_train.toarray(), y_train.toarray(),sample_weight=y_weight)
#pred = model.predict_proba(X_test.toarray())
# model.fit(scipy.sparse.csr_matrix(X_train), scipy.sparse.csr_matrix(y_train),sample_weight=y_weight) # added tocsr() !!!
# pred = model.predict_proba(scipy.sparse.csr_matrix(X_test))
#model.fit(scipy.sparse.csr_matrix(X_train), y_train,sample_weight=y_weight) #perceptron
#pred = model.predict_proba(scipy.sparse.csr_matrix(X_test))
else:
model.fit(X_train, y_train)
pred = model.predict(X_test)
if scale:
if (y_train.shape[1] < 6):
pred = np.apply_along_axis(lambda a: a/(np.max(a)-np.min(a)),1,pred)
if is_normal and (y_train.shape[1] < 6):
#pred[pred < 0.1] = 0.0
#pred[pred > 0.9] = 1.0
row_sums = pred.sum(axis=1)
pred = pred / row_sums[:, np.newaxis]
pred = np.around(pred,3)
pred = pred.clip(0,1)
if is_cv:
return pred,y_true
else:
return pred
def cv_loop(train, t2, model, is_sparse=True,kbest=0,is_class=False,is_tfidf=True,max_features=20000,perlabel=False,min_df=5,is_nominal=False,is_normal=False,is_LSA=False,scale=False):
y = np.array(train.ix[:,4:])
ys = y[:,:5]#4:9 labeles of sentiment
yw = y[:,5:9]#9:13 labels of when
yk = y[:,9:]#13: labels of kind
if is_class:
ys,yw,yk = [np.around(y).astype(bool) for y in (ys,yw,yk)]
if perlabel:
pred,ytrue = vectorize(y,train,t2,model,is_tfidf = is_tfidf,kbest=kbest,is_sparse=is_sparse,max_features=max_features,is_cv=True,perlabel=perlabel,is_nominal=is_nominal,is_normal=is_normal,min_df=min_df,scale=scale)
else:
#(preds,ys_true),(predw,yw_true) = [vectorize(y,train,t2,model,is_tfidf = is_tfidf,kbest=kbest,is_sparse=is_sparse,max_features=max_features,is_cv=True,perlabel=perlabel,min_df=min_df,is_nominal=is_nominal,is_normal=is_normal) for y in (ys,yw)]
#pred = np.hstack((preds,predw))
#ytrue = np.hstack((ys_true,yw_true))
(preds,ys_true),(predw,yw_true),(predk,yk_true) = [vectorize(y,train,t2,model,is_tfidf = is_tfidf,kbest=kbest,is_sparse=is_sparse,max_features=max_features,is_cv=True,perlabel=perlabel,min_df=min_df,is_nominal=is_nominal,is_normal=is_normal,is_LSA=is_LSA,scale=scale) for y in (ys,yw,yk)]
pred = np.hstack((preds,predw,predk))
ytrue = np.hstack((ys_true,yw_true,yk_true))
#pred[pred < 0.01] = 0.0
#pred[pred > 0.99] = 1.0
mse = np.sqrt(np.sum(np.array(pred-ytrue)**2)/(ytrue.shape[0]*float(ytrue.shape[1])))
print 'Train error: {0}'.format(mse)
return pred,ytrue
def submission(predictions,filename='prediction.csv'):
col = '%i,' + '%.2f,'*23 + '%.2f'
header = "id,s1,s2,s3,s4,s5,w1,w2,w3,w4,k1,k2,k3,k4,k5,k6,k7,k8,k9,k10,k11,k12,k13,k14,k15"
np.savetxt(filename, predictions,col, header=header,delimiter=',') # need to remove first two characters in the output file before submitting!
def crossvalidate(clf,X,y,cv=3):
scores=[]
for i in range(int(y.shape[1])):
clf.fit(X,y[:,i])
scores.append(cross_val_score(clf, X, y[:,i]))
print scores[-1],
return scores
def predictMe(clf,X,y,test,ids):
test_prediction=[]
for i in range(int(y.shape[1])):
clf.fit(X,y[:,i])
test_prediction.append(clf.predict(test))
testpred = np.array(test_prediction)
prediction = np.array(np.hstack([np.matrix(ids).T, testpred.T]))
return prediction
def predictThis(clf,train,t2,kbest=0,max_features=20000,is_tfidf=True,is_sparse=True,is_nominal=False,is_LSA=False,min_df=5):
y = np.array(train.ix[:,4:])
ys = y[:,:5]#4:9 labeles of sentiment
yw = y[:,5:9]#9:13 labels of when
yk = y[:,9:]#13: labels of kind
if is_tfidf:
#create a tf-idf class with the given features (stop_words='english' is removed since this is done in preprocessing)
tfidf = TfidfVectorizer(max_features=max_features, strip_accents='unicode', analyzer='word', smooth_idf=True,sublinear_tf=True,max_df=0.5, min_df=min_df, ngram_range=(1,2))
sent, when, kind = [vectorize(y,train,t2,clf,is_tfidf = is_tfidf,kbest=kbest,is_sparse=is_sparse,max_features=max_features,is_nominal=is_nominal,is_LSA=is_LSA) for y in (ys,yw,yk)]
testpred = np.hstack((sent, when, kind))
testpred = np.around(testpred.clip(0,1),3)
prediction = np.array(np.hstack([np.matrix(t2['id']).T, testpred]))
return prediction
# to learn about indexing in pandas: http://pandas.pydata.org/pandas-docs/stable/indexing.html#advanced-indexing-with-hierarchical-index
def predictKind(train_file,test_file):
train_file='train.csv'
test_file='test.csv'
#read files into pandas
train = p.read_csv(train_file)
t2 = p.read_csv(test_file)
for row in train.index:
train['tweet'][row]=' '.join([train['tweet'][row],train['state'][row],str(train['location'][row])])
#preprocessing for kind prediction: emoticons and stop words can be ignored
# for row in train.index:
# train['tweet'][row] = pre.preprocess_pipeline(' '.join([train['tweet'][row],train['state'][row],str(train['location'][row])]), return_as_str=True, do_remove_stopwords=True,do_emoticons=True)
# for row in t2.index:
# t2['tweet'][row] = pre.preprocess_pipeline(' '.join([t2['tweet'][row],str(t2['state'][row]),str(t2['location'][row])]), return_as_str=True, do_remove_stopwords=True,do_emoticons=True)
clf = linear_model.Ridge (alpha = 3.0, normalize = True)
#pred,ytrue = cv_loop(train, t2, clf)
#row_sums = pred.sum(axis=1)
#pred_norm = pred / row_sums[:, numpy.newaxis]
#mse = np.sqrt(np.sum(np.array(pred_norm-ytrue)**2)/(pred_norm.shape[0]*24.0))
#print 'Normalized train error: {0}'.format(mse) #Normalized train error: 0.366281924654
prediction = predictThis(clf,train,t2)
submission(prediction,'prediction.csv')
#metadata =sparse.csr_matrix([ metadata ]).T
#X = sparse.hstack([X, metadata]).tocsr()
#metadata = (metadata - mean(metadata))/(max(metadata) - min(metadata))
if __name__ == "__main__":
# parse commandline arguments
parser = argparse.ArgumentParser()
parser.add_argument("train_file", help="a filename name must be specified")
parser.add_argument("test_file", help="a filename name must be specified")
args = parser.parse_args()
prediction = predictKind(args.train_file, args.test_file)
print 'elh'
"""
# a nice document classification example: http://scikit-learn.org/stable/auto_examples/document_classification_20newsgroups.html
# we like ensemble: http://scikit-learn.org/stable/modules/ensemble.html
You use the vocabulary parameter to specify what features should be used. For example, if you want only emoticons to be extracted, you can do the following:
emoticons = {":)":0, ":P":1, ":(":2}
vect = TfidfVectorizer(vocabulary=emoticons)
matrix = vect.fit_transform(traindata)
This will return a <Nx3 sparse matrix of type '<class 'numpy.int64'>' with M stored elements in Compressed Sparse Row format>]. Notice there are only 3 columns, one for each feature.
If you want the vocabulary to include the emoticons as well as the N most common features, you could calculate the most frequent features first, then merge them with the emoticons and re-vectorize like so:
# calculate the most frequent features first
vect = TfidfVectorizer(vocabulary=emoticons)
matrix = vect.fit_transform(traindata, max_features=10)
top_features = vect.vocabulary_
n = len(top_features)
# insert the emoticons into the vocabulary of common features
emoticons = {":)":0, ":P":1, ":(":2)}
for feature, index in emoticons.items():
top_features[feature] = n + index
# re-vectorize using both sets of features
# at this point len(top_features) == 13
vect = TfidfVectorizer(vocabulary=top_features)
matrix = vect.fit_transform(traindata)
"""
| |
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Datacratic Inc. All rights reserved.
# @Author: Alexis Tremblay
# @Email: atremblay@datacratic.com
# @Date: 2015-01-07 15:45:01
# @Last Modified by: Alexis Tremblay
# @Last Modified time: 2015-06-02 08:32:09
# @File Name: data.py
import pandas as pd
from pymldb.query import Query
from pymldb.index import Time, Index
import requests
import logging
import numpy as np
logging.basicConfig(level=logging.DEBUG)
class BatFrame(object):
def __init__(self, dataset_url):
self.dataset_url = dataset_url
self.query = Query(dataset_url)
self._time = Time(dataset_url)
self._index = Index(self)
def __getitem__(self, val):
if isinstance(val, str):
col = Column(val, self.dataset_url)
col.query.mergeQuery(self.query)
return col
elif isinstance(val, Query):
bf = self.copy()
# bf.query.addSELECT('*')
bf.query.mergeQuery(val)
return bf
elif isinstance(val, slice):
start = val.start
stop = val.stop
# step = val.step
bf = self.copy()
# bf.query.addSELECT('*')
if start is not None:
bf.query.setOFFSET(start)
if stop is not None:
bf.query.setLIMIT(stop)
return bf
elif isinstance(val, list):
bf = self.copy()
for value in val:
bf.query.addSELECT("\"{}\"".format(value))
return bf
elif isinstance(val, Column):
bf = self.copy()
bf.query.addWHERE("({})".format(val.execution_name))
return bf
@property
def columns(self):
"""Returns a numpy array of the columns name"""
return requests.get(self.dataset_url + '/columns').json()
@property
def rows(self):
"""Returns a numpy array of the rows name"""
bf = self.copy()
result = bf.query.executeQuery(format="soa")
return result["_rowName"]
@property
def time(self):
copy_time = self._time.copy()
return copy_time.query.mergeQuery(self.Query)
@property
def ix(self):
copy_index = self._index.copy()
return copy_index
def copy(self):
bf = BatFrame(self.dataset_url)
bf.query = self.query.copy()
return bf
def toPandas(self):
result = self.query.executeQuery(format="aos")
if len(result) == 0:
return pd.DataFrame()
return pd.DataFrame.from_records(result, index="_rowName")
def head(self, num_rows=5):
bf = self.copy()
bf.query.setLIMIT(num_rows)
return bf
def query(self, query):
raise NotImplementedError()
def sort(self, value, ascending=True):
bf = self.copy()
if not isinstance(value, list):
value = [value]
if not isinstance(ascending, list):
ascending = [ascending]*len(value)
if len(value) != len(ascending):
raise RuntimeError("len(value) != len(ascending)")
for by, asc in zip(value, ascending):
if asc:
sort = "ASC"
else:
sort = "DESC"
bf.query.addORDERBY("\"{}\" {}".format(by, sort))
return bf
@property
def shape(self):
"""
Returns (rowCount, valueCount)
"""
bf = self.copy()
content = requests.get(bf.dataset_url).json()
rowCount = content['status']['rowCount']
valueCount = content['status']['valueCount']
return (rowCount, valueCount)
def __repr__(self):
bf = self.copy()
bf.query.setLIMIT(40)
print(bf.toPandas())
response = requests.get(bf.dataset_url).json()
try:
rowCount = response['status']['rowCount']
except:
rowCount = None
if rowCount is not None and rowCount > 40:
print("{} rows".format(rowCount))
return ""
class Column(object):
def __init__(self, name, dataset_url):
"""
Parameters
----------
name: string
Name of the column. No check is actually done to see if the column
exists.
dataset_id:
The base url where the dataset is located.
e.g. localhost:8888/v1/datasets/<dataset_name>
"""
logging.debug("Instanciating Column with {}".format(name))
self.name = "\"{}\"".format(name)
self.execution_name = "\"{}\"".format(name)
self.dataset_url = dataset_url
self.query = Query(dataset_url)
self.query.addSELECT(self.name)
@property
def values(self):
result = self.query.executeQuery(format="soa")
if len(result) > 2:
raise RuntimeError("Only one column should be returned")
colName = [x for x in result.keys() if x != "_rowName"][0]
return np.array(result[colName])
def __getitem__(self, val):
if isinstance(val, slice):
start = val.start
stop = val.stop
# step = val.step
col = self.copy()
if start is not None:
col.query.setOFFSET(start)
if stop is not None:
col.query.setLIMIT(stop)
return col
elif isinstance(val, Query):
col = self.copy()
col.query.mergeQuery(val)
return col
elif isinstance(val, str):
col = self.copy()
col.query.addWHERE("(rowName()='{}')".format(val))
return col
####################
# Rich comparison #
####################
def _comparison(self, value, operator):
"""
Parameters
----------
value: Column object or base type
The value against which to compare the column. It can either be
another column or a base type value (e.g. int)
Returns
-------
self.query
Notes
-----
Returning self.query will allow the next object to use this column
ops and concatenate something else
"""
if isinstance(value, Column):
self.query.addWHERE("(({}){}({}))".format(
self.execution_name,
operator,
value.execution_name))
elif isinstance(value, str):
self.query.addWHERE("(({}){}\'{}\')".format(
self.execution_name,
operator,
value))
else:
self.query.addWHERE("(({}){}({}))".format(
self.execution_name,
operator,
value))
copy = self.copy()
copy.query.removeSELECT("{}".format(copy.execution_name))
return copy.query
def __eq__(self, value):
return self._comparison(value, '=')
def __ne__(self, value):
return self._comparison(value, '!=')
def __gt__(self, value):
return self._comparison(value, '>')
def __ge__(self, value):
return self._comparison(value, '>=')
def __lt__(self, value):
return self._comparison(value, '<')
def __le__(self, value):
return self._comparison(value, '<=')
##################################
# Binary arithmetic operations #
##################################
def _binary_arithemtic(self, left, binary, right):
"""
Parameters
----------
operand: Column object, integer or float
Value on which to apply operator to this column
binary: char
binary arithmetic operator (-, +, *, /, ^, %)
Returns
-------
self
Notes
-----
Returning self will allow the next object to use this column ops and
concatenate something else
"""
if isinstance(right, (int, float)):
right = right
elif isinstance(right, Column):
right = right.execution_name
else:
raise AttributeError(
"{} can only be used ".format(binary)
+ "with integer, float or column")
if isinstance(left, (int, float)):
left = left
elif isinstance(left, Column):
left = left.execution_name
else:
raise AttributeError(
"{} can only be used ".format(binary)
+ "with integer, float or column")
copy = self.copy()
copy.query.removeSELECT("{}".format(copy.execution_name))
if binary == '^': # POWER needs a different treatment
copy.execution_name = "pow({},{})".format(left, right)
else:
copy.execution_name = "{}{}{}".format(left, binary, right)
copy.query.addSELECT(copy.execution_name)
return copy
def __mul__(self, value):
return self._binary_arithemtic(self, '*', value)
def __rmul__(self, value):
return self._binary_arithemtic(value, '*', self)
def __div__(self, value):
if isinstance(value, (int, float)) and value == 0:
raise ValueError(
"Cannot divide by zero. "
"Do you really want to explode the planet?")
return self._binary_arithemtic(self, '/', value)
def __rdiv__(self, value):
return self._binary_arithemtic(value, '/', self)
def __truediv__(self, value):
if isinstance(value, (int, float)) and value == 0:
raise ValueError(
"Cannot divide by zero. "
"Do you really want to explode the planet?")
return self._binary_arithemtic(self, '/', value)
def __rtruediv__(self, value):
return self._binary_arithemtic(value, '/', self)
def __sub__(self, value):
return self._binary_arithemtic(self, '-', value)
def __rsub__(self, value):
return self._binary_arithemtic(value, '-', self)
def __add__(self, value):
return self._binary_arithemtic(self, '+', value)
def __radd__(self, value):
return self._binary_arithemtic(value, '+', self)
def __pow__(self, value):
return self._binary_arithemtic(self, '^', value)
def __rpow__(self, value):
return self._binary_arithemtic(value, '^', self)
def __mod__(self, value):
return self._binary_arithemtic(self, '%', value)
def __rmod__(self, value):
return self._binary_arithemtic(value, '%', self)
def __or__(self, value):
col = self.copy()
left = self.execution_name
right = value
col.query.removeSELECT(left)
if isinstance(right, Column):
right = value.execution_name
col.query.removeSELECT(right)
elif isinstance(right, Query):
right = right.WHERE
col.query.addWHERE('(({}) OR ({}))'.format(left, right))
return col.query
def __and__(self, value):
col = self.copy()
left = self.execution_name
right = value
col.query.removeSELECT(left)
if isinstance(right, Column):
right = value.execution_name
col.query.removeSELECT(right)
elif isinstance(right, Query):
right = right.WHERE
col.query.addWHERE('(({}) AND ({}))'.format(left, right))
return col.query
def __rand__(self, value):
col = self.copy()
left = self.execution_name
right = value
col.query.removeSELECT(left)
if isinstance(right, Column):
right = value.execution_name
col.query.removeSELECT(right)
elif isinstance(right, Query):
right = right.WHERE
col.query.addWHERE('(({}) AND ({}))'.format(right, left))
def __ror__(self, value):
col = self.copy()
left = self.execution_name
right = value
col.query.removeSELECT(left)
if isinstance(right, Column):
right = value.execution_name
col.query.removeSELECT(right)
elif isinstance(right, Query):
right = right.WHERE
col.query.addWHERE('(({}) OR ({}))'.format(right, left))
return col.query
#################################
# Unary arithmetic operations #
#################################
def _unary_arithmetic(self, unary):
"""
Parameters
----------
unary: char
Unary arithmetic operator (-, +) to be applied to this column
Returns
-------
self
Notes
-----
Returning self will allow the next object to use this column ops and
concatenate something else
"""
copy = self.copy()
copy.query.removeSELECT("{}".format(copy.execution_name))
copy.execution_name = "{}({})".format(unary, self.execution_name)
copy.query.addSELECT(copy.execution_name)
return copy
def __neg__(self):
return self._unary_arithmetic('-')
def __pos__(self):
raise NotImplementedError()
def __invert__(self):
copy = self.copy()
copy.execution_name = "NOT {}".format(copy.execution_name)
return copy
def __abs__(self):
raise NotImplementedError()
#############
# Casting #
#############
def __float__(self):
raise NotImplementedError()
def __int__(self):
raise NotImplementedError()
def __long__(self):
raise NotImplementedError()
###########
# Other #
###########
def __iter__(self):
result = self.query.executeQuery(format="soa")
if len(result) > 2:
raise RuntimeError("Only one column should be returned")
colName = [x for x in result.keys() if x != "_rowName"][0]
values = result[colName]
i = 0
while i < len(values):
yield values[i]
i += 1
def max(self):
copy = self.copy()
copy.query.removeSELECT("{}".format(copy.execution_name))
copy.execution_name = "max({})".format(self.execution_name)
copy.query.addSELECT(copy.execution_name)
copy.query.addGROUPBY(1)
result = copy.query.executeQuery(format="table")
return result[1][1]
def min(self):
copy = self.copy()
copy.query.removeSELECT("{}".format(copy.execution_name))
copy.execution_name = "min({})".format(self.execution_name)
copy.query.addSELECT(copy.execution_name)
copy.query.addGROUPBY(1)
result = copy.query.executeQuery(format="table")
return result[1][1]
def copy(self):
name = self.name[1:-1] # Removing the surrounding ''
col = Column(name, self.dataset_url)
col.execution_name = self.execution_name
col.query = self.query.copy()
return col
def count(self):
"""Return number of non-NA/null observations in the Series"""
raise NotImplementedError()
def head(self, n=5):
"""Returns first n rows"""
col = self.copy()
col.query.setLIMIT(n)
return col.toPandas()
def isnull(self):
raise NotImplementedError()
def isin(self, values):
raise NotImplementedError()
def value_counts(self):
raise NotImplementedError()
def unique(self):
if self.name == self.execution_name:
url = self.dataset_url + '/columns/{}/values'.format(
self.name[1:-1])
logging.debug("Getting values at {}".format(url))
return requests.get(url).json()
else:
result = self.query.executeQuery(format="soa")
if len(result) > 2:
raise RuntimeError("Only one column should be returned")
colName = [x for x in result.keys() if x != "_rowName"][0]
return set(result[colName])
def sort(self, ascending=True):
col = self.copy()
if ascending:
sort = "ASC"
else:
sort = "DESC"
col.query.addORDERBY("{} {}".format(col.execution_name, sort))
return col
def toPandas(self):
result = self.query.executeQuery(format="soa")
if len(result) > 2:
raise RuntimeError("Only one column should be returned")
colName = [x for x in result.keys() if x != "_rowName"][0]
values = result[colName]
rowName = result["_rowName"]
if len(values) > 0:
s = pd.Series(values, index=rowName)
else:
s = pd.Series()
return s
def __repr__(self):
col = self.copy()
col.query.setLIMIT(40)
print(col.toPandas())
response = requests.get(col.dataset_url).json()
try:
rowCount = response['status']['rowCount']
except:
rowCount = None
if rowCount is not None and rowCount > 40:
print("{} rows".format(rowCount))
return ""
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Software License Agreement (BSD License)
#
# Copyright (c) 2010, Antons Rebguns, Cody Jorgensen, Cara Slutter
# 2010-2011, Antons Rebguns
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of University of Arizona nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__author__ = 'Antons Rebguns, Cody Jorgensen, Cara Slutter'
__copyright__ = 'Copyright (c) 2010-2011 Antons Rebguns, Cody Jorgensen, Cara Slutter'
__license__ = 'BSD'
__maintainer__ = 'Antons Rebguns'
__email__ = 'anton@email.arizona.edu'
from threading import Thread, Lock
import sys
import rospy
from dynamixel_driver.dynamixel_serial_proxy import SerialProxy
from diagnostic_msgs.msg import DiagnosticArray
from diagnostic_msgs.msg import DiagnosticStatus
from diagnostic_msgs.msg import KeyValue
from dynamixel_controllers.srv import StartController
from dynamixel_controllers.srv import StartControllerResponse
from dynamixel_controllers.srv import StopController
from dynamixel_controllers.srv import StopControllerResponse
from dynamixel_controllers.srv import RestartController
from dynamixel_controllers.srv import RestartControllerResponse
class ControllerManager:
def __init__(self):
rospy.init_node('dynamixel_controller_manager', anonymous=True)
rospy.on_shutdown(self.on_shutdown)
self.waiting_meta_controllers = []
self.controllers = {}
self.serial_proxies = {}
self.diagnostics_rate = rospy.get_param('~diagnostics_rate', 1)
self.start_controller_lock = Lock()
self.stop_controller_lock = Lock()
manager_namespace = rospy.get_param('~namespace')
serial_ports = rospy.get_param('~serial_ports')
for port_namespace,port_config in serial_ports.items():
port_name = port_config['port_name']
baud_rate = port_config['baud_rate']
readback_echo = port_config['readback_echo'] if 'readback_echo' in port_config else False
min_motor_id = port_config['min_motor_id'] if 'min_motor_id' in port_config else 0
max_motor_id = port_config['max_motor_id'] if 'max_motor_id' in port_config else 253
update_rate = port_config['update_rate'] if 'update_rate' in port_config else 5
error_level_temp = 75
warn_level_temp = 70
if 'diagnostics' in port_config:
if 'error_level_temp' in port_config['diagnostics']:
error_level_temp = port_config['diagnostics']['error_level_temp']
if 'warn_level_temp' in port_config['diagnostics']:
warn_level_temp = port_config['diagnostics']['warn_level_temp']
serial_proxy = SerialProxy(port_name,
port_namespace,
baud_rate,
min_motor_id,
max_motor_id,
update_rate,
self.diagnostics_rate,
error_level_temp,
warn_level_temp,
readback_echo)
serial_proxy.connect()
# will create a set of services for each serial port under common manager namesapce
# e.g. /dynamixel_manager/robot_arm_port/start_controller
# /dynamixel_manager/robot_head_port/start_controller
# where 'dynamixel_manager' is manager's namespace
# 'robot_arm_port' and 'robot_head_port' are human readable names for serial ports
rospy.Service('%s/%s/start_controller' % (manager_namespace, port_namespace), StartController, self.start_controller)
rospy.Service('%s/%s/stop_controller' % (manager_namespace, port_namespace), StopController, self.stop_controller)
rospy.Service('%s/%s/restart_controller' % (manager_namespace, port_namespace), RestartController, self.restart_controller)
self.serial_proxies[port_namespace] = serial_proxy
# services for 'meta' controllers, e.g. joint trajectory controller
# these controllers don't have their own serial port, instead they rely
# on regular controllers for serial connection. The advantage of meta
# controller is that it can pack commands for multiple motors on multiple
# serial ports.
# NOTE: all serial ports that meta controller needs should be managed by
# the same controler manager.
rospy.Service('%s/meta/start_controller' % manager_namespace, StartController, self.start_controller)
rospy.Service('%s/meta/stop_controller' % manager_namespace, StopController, self.stop_controller)
rospy.Service('%s/meta/restart_controller' % manager_namespace, RestartController, self.restart_controller)
self.diagnostics_pub = rospy.Publisher('/diagnostics', DiagnosticArray, queue_size=10)
if self.diagnostics_rate > 0: Thread(target=self.diagnostics_processor).start()
def on_shutdown(self):
for serial_proxy in self.serial_proxies.values():
serial_proxy.disconnect()
def diagnostics_processor(self):
diag_msg = DiagnosticArray()
rate = rospy.Rate(self.diagnostics_rate)
while not rospy.is_shutdown():
diag_msg.status = []
diag_msg.header.stamp = rospy.Time.now()
for controller in self.controllers.values():
try:
joint_state = controller.joint_state
temps = joint_state.motor_temps
max_temp = max(temps)
status = DiagnosticStatus()
status.name = 'Joint Controller (%s)' % controller.joint_name
status.hardware_id = 'Robotis Dynamixel %s on port %s' % (str(joint_state.motor_ids), controller.port_namespace)
status.values.append(KeyValue('Goal', str(joint_state.goal_pos)))
status.values.append(KeyValue('Position', str(joint_state.current_pos)))
status.values.append(KeyValue('Error', str(joint_state.error)))
status.values.append(KeyValue('Velocity', str(joint_state.velocity)))
status.values.append(KeyValue('Load', str(joint_state.load)))
status.values.append(KeyValue('Moving', str(joint_state.is_moving)))
status.values.append(KeyValue('Temperature', str(max_temp)))
status.level = DiagnosticStatus.OK
status.message = 'OK'
diag_msg.status.append(status)
except:
pass
self.diagnostics_pub.publish(diag_msg)
rate.sleep()
def check_deps(self):
controllers_still_waiting = []
for i,(controller_name,deps,kls) in enumerate(self.waiting_meta_controllers):
if not set(deps).issubset(self.controllers.keys()):
controllers_still_waiting.append(self.waiting_meta_controllers[i])
rospy.logwarn('[%s] not all dependencies started, still waiting for %s...' % (controller_name, str(list(set(deps).difference(self.controllers.keys())))))
else:
dependencies = [self.controllers[dep_name] for dep_name in deps]
controller = kls(controller_name, dependencies)
if controller.initialize():
controller.start()
self.controllers[controller_name] = controller
self.waiting_meta_controllers = controllers_still_waiting[:]
def start_controller(self, req):
port_name = req.port_name
package_path = req.package_path
module_name = req.module_name
class_name = req.class_name
controller_name = req.controller_name
self.start_controller_lock.acquire()
if controller_name in self.controllers:
self.start_controller_lock.release()
return StartControllerResponse(False, 'Controller [%s] already started. If you want to restart it, call restart.' % controller_name)
try:
if module_name not in sys.modules:
# import if module not previously imported
package_module = __import__(package_path, globals(), locals(), [module_name], -1)
else:
# reload module if previously imported
package_module = reload(sys.modules[package_path])
controller_module = getattr(package_module, module_name)
except ImportError, ie:
self.start_controller_lock.release()
return StartControllerResponse(False, 'Cannot find controller module. Unable to start controller %s\n%s' % (module_name, str(ie)))
except SyntaxError, se:
self.start_controller_lock.release()
return StartControllerResponse(False, 'Syntax error in controller module. Unable to start controller %s\n%s' % (module_name, str(se)))
except Exception, e:
self.start_controller_lock.release()
return StartControllerResponse(False, 'Unknown error has occured. Unable to start controller %s\n%s' % (module_name, str(e)))
kls = getattr(controller_module, class_name)
if port_name == 'meta':
self.waiting_meta_controllers.append((controller_name,req.dependencies,kls))
self.check_deps()
self.start_controller_lock.release()
return StartControllerResponse(True, '')
if port_name != 'meta' and (port_name not in self.serial_proxies):
self.start_controller_lock.release()
return StartControllerResponse(False, 'Specified port [%s] not found, available ports are %s. Unable to start controller %s' % (port_name, str(self.serial_proxies.keys()), controller_name))
controller = kls(self.serial_proxies[port_name].dxl_io, controller_name, port_name)
if controller.initialize():
controller.start()
self.controllers[controller_name] = controller
self.check_deps()
self.start_controller_lock.release()
return StartControllerResponse(True, 'Controller %s successfully started.' % controller_name)
else:
self.start_controller_lock.release()
return StartControllerResponse(False, 'Initialization failed. Unable to start controller %s' % controller_name)
def stop_controller(self, req):
controller_name = req.controller_name
self.stop_controller_lock.acquire()
if controller_name in self.controllers:
self.controllers[controller_name].stop()
del self.controllers[controller_name]
self.stop_controller_lock.release()
return StopControllerResponse(True, 'controller %s successfully stopped.' % controller_name)
else:
self.self.stop_controller_lock.release()
return StopControllerResponse(False, 'controller %s was not running.' % controller_name)
def restart_controller(self, req):
response1 = self.stop_controller(StopController(req.controller_name))
response2 = self.start_controller(req)
return RestartControllerResponse(response1.success and response2.success, '%s\n%s' % (response1.reason, response2.reason))
if __name__ == '__main__':
try:
manager = ControllerManager()
rospy.spin()
except rospy.ROSInterruptException: pass
| |
from future import standard_library
standard_library.install_aliases()
from builtins import str
from configparser import ConfigParser
import errno
import logging
import os
import sys
import textwrap
try:
from cryptography.fernet import Fernet
except:
pass
def generate_fernet_key():
try:
FERNET_KEY = Fernet.generate_key().decode()
except NameError:
FERNET_KEY = "cryptography_not_found_storing_passwords_in_plain_text"
return FERNET_KEY
def expand_env_var(env_var):
"""
Expands (potentially nested) env vars by repeatedly applying
`expandvars` and `expanduser` until interpolation stops having
any effect.
"""
while True:
interpolated = os.path.expanduser(os.path.expandvars(str(env_var)))
if interpolated == env_var:
return interpolated
else:
env_var = interpolated
class AirflowConfigException(Exception):
pass
defaults = {
'core': {
'unit_test_mode': False,
'parallelism': 32,
'load_examples': True,
'plugins_folder': None,
'security': None,
'donot_pickle': False,
},
'webserver': {
'base_url': 'http://localhost:8080',
'web_server_host': '0.0.0.0',
'web_server_port': '8080',
'authenticate': False,
'filter_by_owner': False,
'demo_mode': False,
'secret_key': 'airflowified',
'expose_config': False,
'threads': 4,
},
'scheduler': {
'statsd_on': False,
'statsd_host': 'localhost',
'statsd_port': 8125,
'statsd_prefix': 'airflow',
'job_heartbeat_sec': 5,
'scheduler_heartbeat_sec': 60,
'authenticate': False,
},
'celery': {
'default_queue': 'default',
'flower_port': '5555'
},
'smtp': {
'smtp_starttls': True,
},
'kerberos': {
'ccache': '/tmp/airflow_krb5_ccache',
'principal': 'airflow', # gets augmented with fqdn
'reinit_frequency': '3600',
'kinit_path': 'kinit',
'keytab': 'airflow.keytab',
}
}
DEFAULT_CONFIG = """\
[core]
# The home folder for airflow, default is ~/airflow
airflow_home = {AIRFLOW_HOME}
# The folder where your airflow pipelines live, most likely a
# subfolder in a code repository
dags_folder = {AIRFLOW_HOME}/dags
# The folder where airflow should store its log files
base_log_folder = {AIRFLOW_HOME}/logs
# The executor class that airflow should use. Choices include
# SequentialExecutor, LocalExecutor, CeleryExecutor
executor = SequentialExecutor
# The SqlAlchemy connection string to the metadata database.
# SqlAlchemy supports many different database engine, more information
# their website
sql_alchemy_conn = sqlite:///{AIRFLOW_HOME}/airflow.db
# The amount of parallelism as a setting to the executor. This defines
# the max number of task instances that should run simultaneously
# on this airflow installation
parallelism = 32
# Whether to load the examples that ship with Airflow. It's good to
# get started, but you probably want to set this to False in a production
# environment
load_examples = True
# Where your Airflow plugins are stored
plugins_folder = {AIRFLOW_HOME}/plugins
# Secret key to save connection passwords in the db
fernet_key = {FERNET_KEY}
# Whether to disable pickling dags
donot_pickle = False
[webserver]
# The base url of your website as airflow cannot guess what domain or
# cname you are using. This is use in automated emails that
# airflow sends to point links to the right web server
base_url = http://localhost:8080
# The ip specified when starting the web server
web_server_host = 0.0.0.0
# The port on which to run the web server
web_server_port = 8080
# Secret key used to run your flask app
secret_key = temporary_key
# number of threads to run the Gunicorn web server
thread = 4
# Expose the configuration file in the web server
expose_config = true
# Set to true to turn on authentication : http://pythonhosted.org/airflow/installation.html#web-authentication
authenticate = False
# Filter the list of dags by owner name (requires authentication to be enabled)
filter_by_owner = False
[smtp]
# If you want airflow to send emails on retries, failure, and you want to
# the airflow.utils.send_email function, you have to configure an smtp
# server here
smtp_host = localhost
smtp_starttls = True
smtp_user = airflow
smtp_port = 25
smtp_password = airflow
smtp_mail_from = airflow@airflow.com
[celery]
# This section only applies if you are using the CeleryExecutor in
# [core] section above
# The app name that will be used by celery
celery_app_name = airflow.executors.celery_executor
# The concurrency that will be used when starting workers with the
# "airflow worker" command. This defines the number of task instances that
# a worker will take, so size up your workers based on the resources on
# your worker box and the nature of your tasks
celeryd_concurrency = 16
# When you start an airflow worker, airflow starts a tiny web server
# subprocess to serve the workers local log files to the airflow main
# web server, who then builds pages and sends them to users. This defines
# the port on which the logs are served. It needs to be unused, and open
# visible from the main web server to connect into the workers.
worker_log_server_port = 8793
# The Celery broker URL. Celery supports RabbitMQ, Redis and experimentally
# a sqlalchemy database. Refer to the Celery documentation for more
# information.
broker_url = sqla+mysql://airflow:airflow@localhost:3306/airflow
# Another key Celery setting
celery_result_backend = db+mysql://airflow:airflow@localhost:3306/airflow
# Celery Flower is a sweet UI for Celery. Airflow has a shortcut to start
# it `airflow flower`. This defines the port that Celery Flower runs on
flower_port = 5555
# Default queue that tasks get assigned to and that worker listen on.
default_queue = default
[scheduler]
# Task instances listen for external kill signal (when you clear tasks
# from the CLI or the UI), this defines the frequency at which they should
# listen (in seconds).
job_heartbeat_sec = 5
# The scheduler constantly tries to trigger new tasks (look at the
# scheduler section in the docs for more information). This defines
# how often the scheduler should run (in seconds).
scheduler_heartbeat_sec = 5
# Statsd (https://github.com/etsy/statsd) integration settings
# statsd_on = False
# statsd_host = localhost
# statsd_port = 8125
# statsd_prefix = airflow
[mesos]
# Mesos master address which MesosExecutor will connect to.
master = localhost:5050
# The framework name which Airflow scheduler will register itself as on mesos
framework_name = Airflow
# Number of cpu cores required for running one task instance using
# 'airflow run <dag_id> <task_id> <execution_date> --local -p <pickle_id>'
# command on a mesos slave
task_cpu = 1
# Memory in MB required for running one task instance using
# 'airflow run <dag_id> <task_id> <execution_date> --local -p <pickle_id>'
# command on a mesos slave
task_memory = 256
# Enable framework checkpointing for mesos
# See http://mesos.apache.org/documentation/latest/slave-recovery/
checkpoint = False
# Enable framework authentication for mesos
# See http://mesos.apache.org/documentation/latest/configuration/
authenticate = False
# Mesos credentials, if authentication is enabled
# default_principal = admin
# default_secret = admin
"""
TEST_CONFIG = """\
[core]
airflow_home = {AIRFLOW_HOME}
dags_folder = {AIRFLOW_HOME}/dags
base_log_folder = {AIRFLOW_HOME}/logs
executor = SequentialExecutor
sql_alchemy_conn = sqlite:///{AIRFLOW_HOME}/unittests.db
unit_test_mode = True
load_examples = True
donot_pickle = False
[webserver]
base_url = http://localhost:8080
web_server_host = 0.0.0.0
web_server_port = 8080
[smtp]
smtp_host = localhost
smtp_user = airflow
smtp_port = 25
smtp_password = airflow
smtp_mail_from = airflow@airflow.com
[celery]
celery_app_name = airflow.executors.celery_executor
celeryd_concurrency = 16
worker_log_server_port = 8793
broker_url = sqla+mysql://airflow:airflow@localhost:3306/airflow
celery_result_backend = db+mysql://airflow:airflow@localhost:3306/airflow
flower_port = 5555
default_queue = default
[scheduler]
job_heartbeat_sec = 1
scheduler_heartbeat_sec = 5
authenticate = true
"""
class ConfigParserWithDefaults(ConfigParser):
def __init__(self, defaults, *args, **kwargs):
self.defaults = defaults
ConfigParser.__init__(self, *args, **kwargs)
def get(self, section, key):
section = str(section).lower()
key = str(key).lower()
d = self.defaults
# environment variables get precedence
# must have format AIRFLOW__{SESTION}__{KEY} (note double underscore)
env_var = 'AIRFLOW__{S}__{K}'.format(S=section.upper(), K=key.upper())
if env_var in os.environ:
return expand_env_var(os.environ[env_var])
# ...then the config file
elif self.has_option(section, key):
return expand_env_var(ConfigParser.get(self, section, key))
# ...then the defaults
elif section in d and key in d[section]:
return expand_env_var(d[section][key])
else:
raise AirflowConfigException(
"section/key [{section}/{key}] not found "
"in config".format(**locals()))
def getboolean(self, section, key):
val = str(self.get(section, key)).lower().strip()
if '#' in val:
val = val.split('#')[0].strip()
if val == "true":
return True
elif val == "false":
return False
else:
raise AirflowConfigException("Not a boolean.")
def getint(self, section, key):
return int(self.get(section, key))
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise AirflowConfigException('Had trouble creating a directory')
"""
Setting AIRFLOW_HOME and AIRFLOW_CONFIG from environment variables, using
"~/airflow" and "~/airflow/airflow.cfg" respectively as defaults.
"""
if 'AIRFLOW_HOME' not in os.environ:
AIRFLOW_HOME = expand_env_var('~/airflow')
else:
AIRFLOW_HOME = expand_env_var(os.environ['AIRFLOW_HOME'])
mkdir_p(AIRFLOW_HOME)
if 'AIRFLOW_CONFIG' not in os.environ:
if os.path.isfile(expand_env_var('~/airflow.cfg')):
AIRFLOW_CONFIG = expand_env_var('~/airflow.cfg')
else:
AIRFLOW_CONFIG = AIRFLOW_HOME + '/airflow.cfg'
else:
AIRFLOW_CONFIG = expand_env_var(os.environ['AIRFLOW_CONFIG'])
if not os.path.isfile(AIRFLOW_CONFIG):
"""
These configuration options are used to generate a default configuration
when it is missing. The right way to change your configuration is to alter
your configuration file, not this code.
"""
FERNET_KEY = generate_fernet_key()
logging.info("Creating new config file in: " + AIRFLOW_CONFIG)
f = open(AIRFLOW_CONFIG, 'w')
f.write(DEFAULT_CONFIG.format(**locals()))
f.close()
TEST_CONFIG_FILE = AIRFLOW_HOME + '/unittests.cfg'
if not os.path.isfile(TEST_CONFIG_FILE):
logging.info("Creating new config file in: " + TEST_CONFIG_FILE)
f = open(TEST_CONFIG_FILE, 'w')
f.write(TEST_CONFIG.format(**locals()))
f.close()
logging.info("Reading the config from " + AIRFLOW_CONFIG)
def test_mode():
conf = ConfigParserWithDefaults(defaults)
conf.read(TEST_CONFIG)
conf = ConfigParserWithDefaults(defaults)
conf.read(AIRFLOW_CONFIG)
if 'cryptography' in sys.modules and not conf.has_option('core', 'fernet_key'):
logging.warning(textwrap.dedent("""
Your system supports encrypted passwords for Airflow connections but is
currently storing them in plaintext! To turn on encryption, add a
"fernet_key" option to the "core" section of your airflow.cfg file,
like this:
[core]
fernet_key = <YOUR FERNET KEY>
Your airflow.cfg file is located at: {cfg}.
If you need to generate a fernet key, you can run this code:
from airflow.configuration import generate_fernet_key
generate_fernet_key()
""".format(cfg=AIRFLOW_CONFIG)))
| |
# Copyright (c) 2012-2014 Jonathan Warren
# Copyright (c) 2012-2014 The Bitmessage developers
comment= """
This is not what you run to run the Bitmessage API. Instead, enable the API
( https://bitmessage.org/wiki/API ) and optionally enable daemon mode
( https://bitmessage.org/wiki/Daemon ) then run bitmessagemain.py.
"""
if __name__ == "__main__":
print comment
import sys
sys.exit(0)
from SimpleXMLRPCServer import SimpleXMLRPCRequestHandler
import json
import shared
import time
from addresses import decodeAddress,addBMIfNotPresent,decodeVarint,calculateInventoryHash,varintDecodeError
import helper_inbox
import helper_sent
import hashlib
from pyelliptic.openssl import OpenSSL
from struct import pack
# Classes
from helper_sql import sqlQuery,sqlExecute,SqlBulkExecute
from debug import logger
# Helper Functions
import proofofwork
str_chan = '[chan]'
class APIError(Exception):
def __init__(self, error_number, error_message):
super(APIError, self).__init__()
self.error_number = error_number
self.error_message = error_message
def __str__(self):
return "API Error %04i: %s" % (self.error_number, self.error_message)
# This is one of several classes that constitute the API
# This class was written by Vaibhav Bhatia. Modified by Jonathan Warren (Atheros).
# http://code.activestate.com/recipes/501148-xmlrpc-serverclient-which-does-cookie-handling-and/
class MySimpleXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
def do_POST(self):
# Handles the HTTP POST request.
# Attempts to interpret all HTTP POST requests as XML-RPC calls,
# which are forwarded to the server's _dispatch method for handling.
# Note: this method is the same as in SimpleXMLRPCRequestHandler,
# just hacked to handle cookies
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return
try:
# Get arguments by reading body of request.
# We read this in chunks to avoid straining
# socket.read(); around the 10 or 15Mb mark, some platforms
# begin to have problems (bug #792570).
max_chunk_size = 10 * 1024 * 1024
size_remaining = int(self.headers["content-length"])
L = []
while size_remaining:
chunk_size = min(size_remaining, max_chunk_size)
L.append(self.rfile.read(chunk_size))
size_remaining -= len(L[-1])
data = ''.join(L)
# In previous versions of SimpleXMLRPCServer, _dispatch
# could be overridden in this class, instead of in
# SimpleXMLRPCDispatcher. To maintain backwards compatibility,
# check to see if a subclass implements _dispatch and dispatch
# using that method if present.
response = self.server._marshaled_dispatch(
data, getattr(self, '_dispatch', None)
)
except: # This should only happen if the module is buggy
# internal error, report as HTTP server error
self.send_response(500)
self.end_headers()
else:
# got a valid XML RPC response
self.send_response(200)
self.send_header("Content-type", "text/xml")
self.send_header("Content-length", str(len(response)))
# HACK :start -> sends cookies here
if self.cookies:
for cookie in self.cookies:
self.send_header('Set-Cookie', cookie.output(header=''))
# HACK :end
self.end_headers()
self.wfile.write(response)
# shut down the connection
self.wfile.flush()
self.connection.shutdown(1)
def APIAuthenticateClient(self):
if 'Authorization' in self.headers:
# handle Basic authentication
(enctype, encstr) = self.headers.get('Authorization').split()
(emailid, password) = encstr.decode('base64').split(':')
if emailid == shared.config.get('bitmessagesettings', 'apiusername') and password == shared.config.get('bitmessagesettings', 'apipassword'):
return True
else:
return False
else:
logger.warn('Authentication failed because header lacks Authentication field')
time.sleep(2)
return False
return False
def _decode(self, text, decode_type):
try:
return text.decode(decode_type)
except Exception as e:
raise APIError(22, "Decode error - " + str(e) + ". Had trouble while decoding string: " + repr(text))
def _verifyAddress(self, address):
status, addressVersionNumber, streamNumber, ripe = decodeAddress(address)
if status != 'success':
logger.warn('API Error 0007: Could not decode address %s. Status: %s.', address, status)
if status == 'checksumfailed':
raise APIError(8, 'Checksum failed for address: ' + address)
if status == 'invalidcharacters':
raise APIError(9, 'Invalid characters in address: ' + address)
if status == 'versiontoohigh':
raise APIError(10, 'Address version number too high (or zero) in address: ' + address)
if status == 'varintmalformed':
raise APIError(26, 'Malformed varint in address: ' + address)
raise APIError(7, 'Could not decode address: ' + address + ' : ' + status)
if addressVersionNumber < 2 or addressVersionNumber > 4:
raise APIError(11, 'The address version number currently must be 2, 3 or 4. Others aren\'t supported. Check the address.')
if streamNumber != 1:
raise APIError(12, 'The stream number must be 1. Others aren\'t supported. Check the address.')
return (status, addressVersionNumber, streamNumber, ripe)
#Request Handlers
def HandleListAddresses(self, method):
data = '{"addresses":['
configSections = shared.config.sections()
for addressInKeysFile in configSections:
if addressInKeysFile != 'bitmessagesettings':
status, addressVersionNumber, streamNumber, hash01 = decodeAddress(
addressInKeysFile)
if len(data) > 20:
data += ','
if shared.config.has_option(addressInKeysFile, 'chan'):
chan = shared.config.getboolean(addressInKeysFile, 'chan')
else:
chan = False
label = shared.config.get(addressInKeysFile, 'label')
if method == 'listAddresses2':
label = label.encode('base64')
data += json.dumps({'label': label, 'address': addressInKeysFile, 'stream':
streamNumber, 'enabled': shared.config.getboolean(addressInKeysFile, 'enabled'), 'chan': chan}, indent=4, separators=(',', ': '))
data += ']}'
return data
def HandleListAddressBookEntries(self, params):
queryreturn = sqlQuery('''SELECT label, address from addressbook''')
data = '{"addresses":['
for row in queryreturn:
label, address = row
label = shared.fixPotentiallyInvalidUTF8Data(label)
if len(data) > 20:
data += ','
data += json.dumps({'label':label.encode('base64'), 'address': address}, indent=4, separators=(',', ': '))
data += ']}'
return data
def HandleAddAddressBookEntry(self, params):
if len(params) != 2:
raise APIError(0, "I need label and address")
address, label = params
label = self._decode(label, "base64")
address = addBMIfNotPresent(address)
self._verifyAddress(address)
queryreturn = sqlQuery("SELECT address FROM addressbook WHERE address=?", address)
if queryreturn != []:
raise APIError(16, 'You already have this address in your address book.')
sqlExecute("INSERT INTO addressbook VALUES(?,?)", label, address)
shared.UISignalQueue.put(('rerenderInboxFromLabels',''))
shared.UISignalQueue.put(('rerenderSentToLabels',''))
shared.UISignalQueue.put(('rerenderAddressBook',''))
return "Added address %s to address book" % address
def HandleDeleteAddressBookEntry(self, params):
if len(params) != 1:
raise APIError(0, "I need an address")
address, = params
address = addBMIfNotPresent(address)
self._verifyAddress(address)
sqlExecute('DELETE FROM addressbook WHERE address=?', address)
shared.UISignalQueue.put(('rerenderInboxFromLabels',''))
shared.UISignalQueue.put(('rerenderSentToLabels',''))
shared.UISignalQueue.put(('rerenderAddressBook',''))
return "Deleted address book entry for %s if it existed" % address
def HandleCreateRandomAddress(self, params):
if len(params) == 0:
raise APIError(0, 'I need parameters!')
elif len(params) == 1:
label, = params
eighteenByteRipe = False
nonceTrialsPerByte = shared.config.get(
'bitmessagesettings', 'defaultnoncetrialsperbyte')
payloadLengthExtraBytes = shared.config.get(
'bitmessagesettings', 'defaultpayloadlengthextrabytes')
elif len(params) == 2:
label, eighteenByteRipe = params
nonceTrialsPerByte = shared.config.get(
'bitmessagesettings', 'defaultnoncetrialsperbyte')
payloadLengthExtraBytes = shared.config.get(
'bitmessagesettings', 'defaultpayloadlengthextrabytes')
elif len(params) == 3:
label, eighteenByteRipe, totalDifficulty = params
nonceTrialsPerByte = int(
shared.networkDefaultProofOfWorkNonceTrialsPerByte * totalDifficulty)
payloadLengthExtraBytes = shared.config.get(
'bitmessagesettings', 'defaultpayloadlengthextrabytes')
elif len(params) == 4:
label, eighteenByteRipe, totalDifficulty, smallMessageDifficulty = params
nonceTrialsPerByte = int(
shared.networkDefaultProofOfWorkNonceTrialsPerByte * totalDifficulty)
payloadLengthExtraBytes = int(
shared.networkDefaultPayloadLengthExtraBytes * smallMessageDifficulty)
else:
raise APIError(0, 'Too many parameters!')
label = self._decode(label, "base64")
try:
unicode(label, 'utf-8')
except:
raise APIError(17, 'Label is not valid UTF-8 data.')
shared.apiAddressGeneratorReturnQueue.queue.clear()
streamNumberForAddress = 1
shared.addressGeneratorQueue.put((
'createRandomAddress', 4, streamNumberForAddress, label, 1, "", eighteenByteRipe, nonceTrialsPerByte, payloadLengthExtraBytes))
return shared.apiAddressGeneratorReturnQueue.get()
def HandleCreateDeterministicAddresses(self, params):
if len(params) == 0:
raise APIError(0, 'I need parameters!')
elif len(params) == 1:
passphrase, = params
numberOfAddresses = 1
addressVersionNumber = 0
streamNumber = 0
eighteenByteRipe = False
nonceTrialsPerByte = shared.config.get(
'bitmessagesettings', 'defaultnoncetrialsperbyte')
payloadLengthExtraBytes = shared.config.get(
'bitmessagesettings', 'defaultpayloadlengthextrabytes')
elif len(params) == 2:
passphrase, numberOfAddresses = params
addressVersionNumber = 0
streamNumber = 0
eighteenByteRipe = False
nonceTrialsPerByte = shared.config.get(
'bitmessagesettings', 'defaultnoncetrialsperbyte')
payloadLengthExtraBytes = shared.config.get(
'bitmessagesettings', 'defaultpayloadlengthextrabytes')
elif len(params) == 3:
passphrase, numberOfAddresses, addressVersionNumber = params
streamNumber = 0
eighteenByteRipe = False
nonceTrialsPerByte = shared.config.get(
'bitmessagesettings', 'defaultnoncetrialsperbyte')
payloadLengthExtraBytes = shared.config.get(
'bitmessagesettings', 'defaultpayloadlengthextrabytes')
elif len(params) == 4:
passphrase, numberOfAddresses, addressVersionNumber, streamNumber = params
eighteenByteRipe = False
nonceTrialsPerByte = shared.config.get(
'bitmessagesettings', 'defaultnoncetrialsperbyte')
payloadLengthExtraBytes = shared.config.get(
'bitmessagesettings', 'defaultpayloadlengthextrabytes')
elif len(params) == 5:
passphrase, numberOfAddresses, addressVersionNumber, streamNumber, eighteenByteRipe = params
nonceTrialsPerByte = shared.config.get(
'bitmessagesettings', 'defaultnoncetrialsperbyte')
payloadLengthExtraBytes = shared.config.get(
'bitmessagesettings', 'defaultpayloadlengthextrabytes')
elif len(params) == 6:
passphrase, numberOfAddresses, addressVersionNumber, streamNumber, eighteenByteRipe, totalDifficulty = params
nonceTrialsPerByte = int(
shared.networkDefaultProofOfWorkNonceTrialsPerByte * totalDifficulty)
payloadLengthExtraBytes = shared.config.get(
'bitmessagesettings', 'defaultpayloadlengthextrabytes')
elif len(params) == 7:
passphrase, numberOfAddresses, addressVersionNumber, streamNumber, eighteenByteRipe, totalDifficulty, smallMessageDifficulty = params
nonceTrialsPerByte = int(
shared.networkDefaultProofOfWorkNonceTrialsPerByte * totalDifficulty)
payloadLengthExtraBytes = int(
shared.networkDefaultPayloadLengthExtraBytes * smallMessageDifficulty)
else:
raise APIError(0, 'Too many parameters!')
if len(passphrase) == 0:
raise APIError(1, 'The specified passphrase is blank.')
if not isinstance(eighteenByteRipe, bool):
raise APIError(23, 'Bool expected in eighteenByteRipe, saw %s instead' % type(eighteenByteRipe))
passphrase = self._decode(passphrase, "base64")
if addressVersionNumber == 0: # 0 means "just use the proper addressVersionNumber"
addressVersionNumber = 4
if addressVersionNumber != 3 and addressVersionNumber != 4:
raise APIError(2,'The address version number currently must be 3, 4, or 0 (which means auto-select). ' + addressVersionNumber + ' isn\'t supported.')
if streamNumber == 0: # 0 means "just use the most available stream"
streamNumber = 1
if streamNumber != 1:
raise APIError(3,'The stream number must be 1 (or 0 which means auto-select). Others aren\'t supported.')
if numberOfAddresses == 0:
raise APIError(4, 'Why would you ask me to generate 0 addresses for you?')
if numberOfAddresses > 999:
raise APIError(5, 'You have (accidentally?) specified too many addresses to make. Maximum 999. This check only exists to prevent mischief; if you really want to create more addresses than this, contact the Bitmessage developers and we can modify the check or you can do it yourself by searching the source code for this message.')
shared.apiAddressGeneratorReturnQueue.queue.clear()
logger.debug('Requesting that the addressGenerator create %s addresses.', numberOfAddresses)
shared.addressGeneratorQueue.put(
('createDeterministicAddresses', addressVersionNumber, streamNumber,
'unused API address', numberOfAddresses, passphrase, eighteenByteRipe, nonceTrialsPerByte, payloadLengthExtraBytes))
data = '{"addresses":['
queueReturn = shared.apiAddressGeneratorReturnQueue.get()
for item in queueReturn:
if len(data) > 20:
data += ','
data += "\"" + item + "\""
data += ']}'
return data
def HandleGetDeterministicAddress(self, params):
if len(params) != 3:
raise APIError(0, 'I need exactly 3 parameters.')
passphrase, addressVersionNumber, streamNumber = params
numberOfAddresses = 1
eighteenByteRipe = False
if len(passphrase) == 0:
raise APIError(1, 'The specified passphrase is blank.')
passphrase = self._decode(passphrase, "base64")
if addressVersionNumber != 3 and addressVersionNumber != 4:
raise APIError(2, 'The address version number currently must be 3 or 4. ' + addressVersionNumber + ' isn\'t supported.')
if streamNumber != 1:
raise APIError(3, ' The stream number must be 1. Others aren\'t supported.')
shared.apiAddressGeneratorReturnQueue.queue.clear()
logger.debug('Requesting that the addressGenerator create %s addresses.', numberOfAddresses)
shared.addressGeneratorQueue.put(
('getDeterministicAddress', addressVersionNumber,
streamNumber, 'unused API address', numberOfAddresses, passphrase, eighteenByteRipe))
return shared.apiAddressGeneratorReturnQueue.get()
def HandleCreateChan(self, params):
if len(params) == 0:
raise APIError(0, 'I need parameters.')
elif len(params) == 1:
passphrase, = params
passphrase = self._decode(passphrase, "base64")
if len(passphrase) == 0:
raise APIError(1, 'The specified passphrase is blank.')
# It would be nice to make the label the passphrase but it is
# possible that the passphrase contains non-utf-8 characters.
try:
unicode(passphrase, 'utf-8')
label = str_chan + ' ' + passphrase
except:
label = str_chan + ' ' + repr(passphrase)
addressVersionNumber = 4
streamNumber = 1
shared.apiAddressGeneratorReturnQueue.queue.clear()
logger.debug('Requesting that the addressGenerator create chan %s.', passphrase)
shared.addressGeneratorQueue.put(('createChan', addressVersionNumber, streamNumber, label, passphrase))
queueReturn = shared.apiAddressGeneratorReturnQueue.get()
if len(queueReturn) == 0:
raise APIError(24, 'Chan address is already present.')
address = queueReturn[0]
return address
def HandleJoinChan(self, params):
if len(params) < 2:
raise APIError(0, 'I need two parameters.')
elif len(params) == 2:
passphrase, suppliedAddress= params
passphrase = self._decode(passphrase, "base64")
if len(passphrase) == 0:
raise APIError(1, 'The specified passphrase is blank.')
# It would be nice to make the label the passphrase but it is
# possible that the passphrase contains non-utf-8 characters.
try:
unicode(passphrase, 'utf-8')
label = str_chan + ' ' + passphrase
except:
label = str_chan + ' ' + repr(passphrase)
status, addressVersionNumber, streamNumber, toRipe = self._verifyAddress(suppliedAddress)
suppliedAddress = addBMIfNotPresent(suppliedAddress)
shared.apiAddressGeneratorReturnQueue.queue.clear()
shared.addressGeneratorQueue.put(('joinChan', suppliedAddress, label, passphrase))
addressGeneratorReturnValue = shared.apiAddressGeneratorReturnQueue.get()
if addressGeneratorReturnValue == 'chan name does not match address':
raise APIError(18, 'Chan name does not match address.')
if len(addressGeneratorReturnValue) == 0:
raise APIError(24, 'Chan address is already present.')
#TODO: this variable is not used to anything
createdAddress = addressGeneratorReturnValue[0] # in case we ever want it for anything.
return "success"
def HandleLeaveChan(self, params):
if len(params) == 0:
raise APIError(0, 'I need parameters.')
elif len(params) == 1:
address, = params
status, addressVersionNumber, streamNumber, toRipe = self._verifyAddress(address)
address = addBMIfNotPresent(address)
if not shared.config.has_section(address):
raise APIError(13, 'Could not find this address in your keys.dat file.')
if not shared.safeConfigGetBoolean(address, 'chan'):
raise APIError(25, 'Specified address is not a chan address. Use deleteAddress API call instead.')
shared.config.remove_section(address)
with open(shared.appdata + 'keys.dat', 'wb') as configfile:
shared.config.write(configfile)
return 'success'
def HandleDeleteAddress(self, params):
if len(params) == 0:
raise APIError(0, 'I need parameters.')
elif len(params) == 1:
address, = params
status, addressVersionNumber, streamNumber, toRipe = self._verifyAddress(address)
address = addBMIfNotPresent(address)
if not shared.config.has_section(address):
raise APIError(13, 'Could not find this address in your keys.dat file.')
shared.config.remove_section(address)
with open(shared.appdata + 'keys.dat', 'wb') as configfile:
shared.config.write(configfile)
shared.UISignalQueue.put(('rerenderInboxFromLabels',''))
shared.UISignalQueue.put(('rerenderSentToLabels',''))
shared.reloadMyAddressHashes()
return 'success'
def HandleGetAllInboxMessages(self, params):
queryreturn = sqlQuery(
'''SELECT msgid, toaddress, fromaddress, subject, received, message, encodingtype, read FROM inbox where folder='inbox' ORDER BY received''')
data = '{"inboxMessages":['
for row in queryreturn:
msgid, toAddress, fromAddress, subject, received, message, encodingtype, read = row
subject = shared.fixPotentiallyInvalidUTF8Data(subject)
message = shared.fixPotentiallyInvalidUTF8Data(message)
if len(data) > 25:
data += ','
data += json.dumps({'msgid': msgid.encode('hex'), 'toAddress': toAddress, 'fromAddress': fromAddress, 'subject': subject.encode(
'base64'), 'message': message.encode('base64'), 'encodingType': encodingtype, 'receivedTime': received, 'read': read}, indent=4, separators=(',', ': '))
data += ']}'
return data
def HandleGetAllInboxMessageIds(self, params):
queryreturn = sqlQuery(
'''SELECT msgid FROM inbox where folder='inbox' ORDER BY received''')
data = '{"inboxMessageIds":['
for row in queryreturn:
msgid = row[0]
if len(data) > 25:
data += ','
data += json.dumps({'msgid': msgid.encode('hex')}, indent=4, separators=(',', ': '))
data += ']}'
return data
def HandleGetInboxMessageById(self, params):
if len(params) == 0:
raise APIError(0, 'I need parameters!')
elif len(params) == 1:
msgid = self._decode(params[0], "hex")
elif len(params) >= 2:
msgid = self._decode(params[0], "hex")
readStatus = params[1]
if not isinstance(readStatus, bool):
raise APIError(23, 'Bool expected in readStatus, saw %s instead.' % type(readStatus))
queryreturn = sqlQuery('''SELECT read FROM inbox WHERE msgid=?''', msgid)
# UPDATE is slow, only update if status is different
if queryreturn != [] and (queryreturn[0][0] == 1) != readStatus:
sqlExecute('''UPDATE inbox set read = ? WHERE msgid=?''', readStatus, msgid)
shared.UISignalQueue.put(('changedInboxUnread', None))
queryreturn = sqlQuery('''SELECT msgid, toaddress, fromaddress, subject, received, message, encodingtype, read FROM inbox WHERE msgid=?''', msgid)
data = '{"inboxMessage":['
for row in queryreturn:
msgid, toAddress, fromAddress, subject, received, message, encodingtype, read = row
subject = shared.fixPotentiallyInvalidUTF8Data(subject)
message = shared.fixPotentiallyInvalidUTF8Data(message)
data += json.dumps({'msgid':msgid.encode('hex'), 'toAddress':toAddress, 'fromAddress':fromAddress, 'subject':subject.encode('base64'), 'message':message.encode('base64'), 'encodingType':encodingtype, 'receivedTime':received, 'read': read}, indent=4, separators=(',', ': '))
data += ']}'
return data
def HandleGetAllSentMessages(self, params):
queryreturn = sqlQuery('''SELECT msgid, toaddress, fromaddress, subject, lastactiontime, message, encodingtype, status, ackdata FROM sent where folder='sent' ORDER BY lastactiontime''')
data = '{"sentMessages":['
for row in queryreturn:
msgid, toAddress, fromAddress, subject, lastactiontime, message, encodingtype, status, ackdata = row
subject = shared.fixPotentiallyInvalidUTF8Data(subject)
message = shared.fixPotentiallyInvalidUTF8Data(message)
if len(data) > 25:
data += ','
data += json.dumps({'msgid':msgid.encode('hex'), 'toAddress':toAddress, 'fromAddress':fromAddress, 'subject':subject.encode('base64'), 'message':message.encode('base64'), 'encodingType':encodingtype, 'lastActionTime':lastactiontime, 'status':status, 'ackData':ackdata.encode('hex')}, indent=4, separators=(',', ': '))
data += ']}'
return data
def HandleGetAllSentMessageIds(self, params):
queryreturn = sqlQuery('''SELECT msgid FROM sent where folder='sent' ORDER BY lastactiontime''')
data = '{"sentMessageIds":['
for row in queryreturn:
msgid = row[0]
if len(data) > 25:
data += ','
data += json.dumps({'msgid':msgid.encode('hex')}, indent=4, separators=(',', ': '))
data += ']}'
return data
def HandleInboxMessagesByReceiver(self, params):
if len(params) == 0:
raise APIError(0, 'I need parameters!')
toAddress = params[0]
queryreturn = sqlQuery('''SELECT msgid, toaddress, fromaddress, subject, received, message, encodingtype FROM inbox WHERE folder='inbox' AND toAddress=?''', toAddress)
data = '{"inboxMessages":['
for row in queryreturn:
msgid, toAddress, fromAddress, subject, received, message, encodingtype = row
subject = shared.fixPotentiallyInvalidUTF8Data(subject)
message = shared.fixPotentiallyInvalidUTF8Data(message)
if len(data) > 25:
data += ','
data += json.dumps({'msgid':msgid.encode('hex'), 'toAddress':toAddress, 'fromAddress':fromAddress, 'subject':subject.encode('base64'), 'message':message.encode('base64'), 'encodingType':encodingtype, 'receivedTime':received}, indent=4, separators=(',', ': '))
data += ']}'
return data
def HandleGetSentMessageById(self, params):
if len(params) == 0:
raise APIError(0, 'I need parameters!')
msgid = self._decode(params[0], "hex")
queryreturn = sqlQuery('''SELECT msgid, toaddress, fromaddress, subject, lastactiontime, message, encodingtype, status, ackdata FROM sent WHERE msgid=?''', msgid)
data = '{"sentMessage":['
for row in queryreturn:
msgid, toAddress, fromAddress, subject, lastactiontime, message, encodingtype, status, ackdata = row
subject = shared.fixPotentiallyInvalidUTF8Data(subject)
message = shared.fixPotentiallyInvalidUTF8Data(message)
data += json.dumps({'msgid':msgid.encode('hex'), 'toAddress':toAddress, 'fromAddress':fromAddress, 'subject':subject.encode('base64'), 'message':message.encode('base64'), 'encodingType':encodingtype, 'lastActionTime':lastactiontime, 'status':status, 'ackData':ackdata.encode('hex')}, indent=4, separators=(',', ': '))
data += ']}'
return data
def HandleGetSentMessagesByAddress(self, params):
if len(params) == 0:
raise APIError(0, 'I need parameters!')
fromAddress = params[0]
queryreturn = sqlQuery('''SELECT msgid, toaddress, fromaddress, subject, lastactiontime, message, encodingtype, status, ackdata FROM sent WHERE folder='sent' AND fromAddress=? ORDER BY lastactiontime''',
fromAddress)
data = '{"sentMessages":['
for row in queryreturn:
msgid, toAddress, fromAddress, subject, lastactiontime, message, encodingtype, status, ackdata = row
subject = shared.fixPotentiallyInvalidUTF8Data(subject)
message = shared.fixPotentiallyInvalidUTF8Data(message)
if len(data) > 25:
data += ','
data += json.dumps({'msgid':msgid.encode('hex'), 'toAddress':toAddress, 'fromAddress':fromAddress, 'subject':subject.encode('base64'), 'message':message.encode('base64'), 'encodingType':encodingtype, 'lastActionTime':lastactiontime, 'status':status, 'ackData':ackdata.encode('hex')}, indent=4, separators=(',', ': '))
data += ']}'
return data
def HandleGetSentMessagesByAckData(self, params):
if len(params) == 0:
raise APIError(0, 'I need parameters!')
ackData = self._decode(params[0], "hex")
queryreturn = sqlQuery('''SELECT msgid, toaddress, fromaddress, subject, lastactiontime, message, encodingtype, status, ackdata FROM sent WHERE ackdata=?''',
ackData)
data = '{"sentMessage":['
for row in queryreturn:
msgid, toAddress, fromAddress, subject, lastactiontime, message, encodingtype, status, ackdata = row
subject = shared.fixPotentiallyInvalidUTF8Data(subject)
message = shared.fixPotentiallyInvalidUTF8Data(message)
data += json.dumps({'msgid':msgid.encode('hex'), 'toAddress':toAddress, 'fromAddress':fromAddress, 'subject':subject.encode('base64'), 'message':message.encode('base64'), 'encodingType':encodingtype, 'lastActionTime':lastactiontime, 'status':status, 'ackData':ackdata.encode('hex')}, indent=4, separators=(',', ': '))
data += ']}'
return data
def HandleTrashMessage(self, params):
if len(params) == 0:
raise APIError(0, 'I need parameters!')
msgid = self._decode(params[0], "hex")
# Trash if in inbox table
helper_inbox.trash(msgid)
# Trash if in sent table
sqlExecute('''UPDATE sent SET folder='trash' WHERE msgid=?''', msgid)
return 'Trashed message (assuming message existed).'
def HandleTrashInboxMessage(self, params):
if len(params) == 0:
raise APIError(0, 'I need parameters!')
msgid = self._decode(params[0], "hex")
helper_inbox.trash(msgid)
return 'Trashed inbox message (assuming message existed).'
def HandleTrashSentMessage(self, params):
if len(params) == 0:
raise APIError(0, 'I need parameters!')
msgid = self._decode(params[0], "hex")
sqlExecute('''UPDATE sent SET folder='trash' WHERE msgid=?''', msgid)
return 'Trashed sent message (assuming message existed).'
def HandleSendMessage(self, params):
if len(params) == 0:
raise APIError(0, 'I need parameters!')
elif len(params) == 4:
toAddress, fromAddress, subject, message = params
encodingType = 2
TTL = 4*24*60*60
elif len(params) == 5:
toAddress, fromAddress, subject, message, encodingType = params
TTL = 4*24*60*60
elif len(params) == 6:
toAddress, fromAddress, subject, message, encodingType, TTL = params
if encodingType != 2:
raise APIError(6, 'The encoding type must be 2 because that is the only one this program currently supports.')
subject = self._decode(subject, "base64")
message = self._decode(message, "base64")
if len(subject + message) > (2 ** 18 - 500):
raise APIError(27, 'Message is too long.')
if TTL < 60*60:
TTL = 60*60
if TTL > 28*24*60*60:
TTL = 28*24*60*60
toAddress = addBMIfNotPresent(toAddress)
fromAddress = addBMIfNotPresent(fromAddress)
status, addressVersionNumber, streamNumber, toRipe = self._verifyAddress(toAddress)
self._verifyAddress(fromAddress)
try:
fromAddressEnabled = shared.config.getboolean(
fromAddress, 'enabled')
except:
raise APIError(13, 'Could not find your fromAddress in the keys.dat file.')
if not fromAddressEnabled:
raise APIError(14, 'Your fromAddress is disabled. Cannot send.')
ackdata = OpenSSL.rand(32)
t = ('',
toAddress,
toRipe,
fromAddress,
subject,
message,
ackdata,
int(time.time()), # sentTime (this won't change)
int(time.time()), # lastActionTime
0,
'msgqueued',
0,
'sent',
2,
TTL)
helper_sent.insert(t)
toLabel = ''
queryreturn = sqlQuery('''select label from addressbook where address=?''', toAddress)
if queryreturn != []:
for row in queryreturn:
toLabel, = row
# apiSignalQueue.put(('displayNewSentMessage',(toAddress,toLabel,fromAddress,subject,message,ackdata)))
shared.UISignalQueue.put(('displayNewSentMessage', (
toAddress, toLabel, fromAddress, subject, message, ackdata)))
shared.workerQueue.put(('sendmessage', toAddress))
return ackdata.encode('hex')
def HandleSendBroadcast(self, params):
if len(params) == 0:
raise APIError(0, 'I need parameters!')
if len(params) == 3:
fromAddress, subject, message = params
encodingType = 2
TTL = 4*24*60*60
elif len(params) == 4:
fromAddress, subject, message, encodingType = params
TTL = 4*24*60*60
elif len(params) == 5:
fromAddress, subject, message, encodingType, TTL = params
if encodingType != 2:
raise APIError(6, 'The encoding type must be 2 because that is the only one this program currently supports.')
subject = self._decode(subject, "base64")
message = self._decode(message, "base64")
if len(subject + message) > (2 ** 18 - 500):
raise APIError(27, 'Message is too long.')
if TTL < 60*60:
TTL = 60*60
if TTL > 28*24*60*60:
TTL = 28*24*60*60
fromAddress = addBMIfNotPresent(fromAddress)
self._verifyAddress(fromAddress)
try:
fromAddressEnabled = shared.config.getboolean(
fromAddress, 'enabled')
except:
raise APIError(13, 'could not find your fromAddress in the keys.dat file.')
ackdata = OpenSSL.rand(32)
toAddress = '[Broadcast subscribers]'
ripe = ''
t = ('',
toAddress,
ripe,
fromAddress,
subject,
message,
ackdata,
int(time.time()), # sentTime (this doesn't change)
int(time.time()), # lastActionTime
0,
'broadcastqueued',
0,
'sent',
2,
TTL)
helper_sent.insert(t)
toLabel = '[Broadcast subscribers]'
shared.UISignalQueue.put(('displayNewSentMessage', (
toAddress, toLabel, fromAddress, subject, message, ackdata)))
shared.workerQueue.put(('sendbroadcast', ''))
return ackdata.encode('hex')
def HandleGetStatus(self, params):
if len(params) != 1:
raise APIError(0, 'I need one parameter!')
ackdata, = params
if len(ackdata) != 64:
raise APIError(15, 'The length of ackData should be 32 bytes (encoded in hex thus 64 characters).')
ackdata = self._decode(ackdata, "hex")
queryreturn = sqlQuery(
'''SELECT status FROM sent where ackdata=?''',
ackdata)
if queryreturn == []:
return 'notfound'
for row in queryreturn:
status, = row
return status
def HandleAddSubscription(self, params):
if len(params) == 0:
raise APIError(0, 'I need parameters!')
if len(params) == 1:
address, = params
label = ''
if len(params) == 2:
address, label = params
label = self._decode(label, "base64")
try:
unicode(label, 'utf-8')
except:
raise APIError(17, 'Label is not valid UTF-8 data.')
if len(params) > 2:
raise APIError(0, 'I need either 1 or 2 parameters!')
address = addBMIfNotPresent(address)
self._verifyAddress(address)
# First we must check to see if the address is already in the
# subscriptions list.
queryreturn = sqlQuery('''select * from subscriptions where address=?''', address)
if queryreturn != []:
raise APIError(16, 'You are already subscribed to that address.')
sqlExecute('''INSERT INTO subscriptions VALUES (?,?,?)''',label, address, True)
shared.reloadBroadcastSendersForWhichImWatching()
shared.UISignalQueue.put(('rerenderInboxFromLabels', ''))
shared.UISignalQueue.put(('rerenderSubscriptions', ''))
return 'Added subscription.'
def HandleDeleteSubscription(self, params):
if len(params) != 1:
raise APIError(0, 'I need 1 parameter!')
address, = params
address = addBMIfNotPresent(address)
sqlExecute('''DELETE FROM subscriptions WHERE address=?''', address)
shared.reloadBroadcastSendersForWhichImWatching()
shared.UISignalQueue.put(('rerenderInboxFromLabels', ''))
shared.UISignalQueue.put(('rerenderSubscriptions', ''))
return 'Deleted subscription if it existed.'
def ListSubscriptions(self, params):
queryreturn = sqlQuery('''SELECT label, address, enabled FROM subscriptions''')
data = '{"subscriptions":['
for row in queryreturn:
label, address, enabled = row
label = shared.fixPotentiallyInvalidUTF8Data(label)
if len(data) > 20:
data += ','
data += json.dumps({'label':label.encode('base64'), 'address': address, 'enabled': enabled == 1}, indent=4, separators=(',',': '))
data += ']}'
return data
def HandleDisseminatePreEncryptedMsg(self, params):
# The device issuing this command to PyBitmessage supplies a msg object that has
# already been encrypted but which still needs the POW to be done. PyBitmessage
# accepts this msg object and sends it out to the rest of the Bitmessage network
# as if it had generated the message itself. Please do not yet add this to the
# api doc.
if len(params) != 3:
raise APIError(0, 'I need 3 parameter!')
encryptedPayload, requiredAverageProofOfWorkNonceTrialsPerByte, requiredPayloadLengthExtraBytes = params
encryptedPayload = self._decode(encryptedPayload, "hex")
# Let us do the POW and attach it to the front
target = 2**64 / ((len(encryptedPayload)+requiredPayloadLengthExtraBytes+8) * requiredAverageProofOfWorkNonceTrialsPerByte)
with shared.printLock:
print '(For msg message via API) Doing proof of work. Total required difficulty:', float(requiredAverageProofOfWorkNonceTrialsPerByte) / shared.networkDefaultProofOfWorkNonceTrialsPerByte, 'Required small message difficulty:', float(requiredPayloadLengthExtraBytes) / shared.networkDefaultPayloadLengthExtraBytes
powStartTime = time.time()
initialHash = hashlib.sha512(encryptedPayload).digest()
trialValue, nonce = proofofwork.run(target, initialHash)
with shared.printLock:
print '(For msg message via API) Found proof of work', trialValue, 'Nonce:', nonce
try:
print 'POW took', int(time.time() - powStartTime), 'seconds.', nonce / (time.time() - powStartTime), 'nonce trials per second.'
except:
pass
encryptedPayload = pack('>Q', nonce) + encryptedPayload
toStreamNumber = decodeVarint(encryptedPayload[16:26])[0]
inventoryHash = calculateInventoryHash(encryptedPayload)
objectType = 2
TTL = 2.5 * 24 * 60 * 60
shared.inventory[inventoryHash] = (
objectType, toStreamNumber, encryptedPayload, int(time.time()) + TTL,'')
shared.inventorySets[toStreamNumber].add(inventoryHash)
with shared.printLock:
print 'Broadcasting inv for msg(API disseminatePreEncryptedMsg command):', inventoryHash.encode('hex')
shared.broadcastToSendDataQueues((
toStreamNumber, 'advertiseobject', inventoryHash))
def HandleTrashSentMessageByAckDAta(self, params):
# This API method should only be used when msgid is not available
if len(params) == 0:
raise APIError(0, 'I need parameters!')
ackdata = self._decode(params[0], "hex")
sqlExecute('''UPDATE sent SET folder='trash' WHERE ackdata=?''', ackdata)
return 'Trashed sent message (assuming message existed).'
def HandleDissimatePubKey(self, params):
# The device issuing this command to PyBitmessage supplies a pubkey object to be
# disseminated to the rest of the Bitmessage network. PyBitmessage accepts this
# pubkey object and sends it out to the rest of the Bitmessage network as if it
# had generated the pubkey object itself. Please do not yet add this to the api
# doc.
if len(params) != 1:
raise APIError(0, 'I need 1 parameter!')
payload, = params
payload = self._decode(payload, "hex")
# Let us do the POW
target = 2 ** 64 / ((len(payload) + shared.networkDefaultPayloadLengthExtraBytes +
8) * shared.networkDefaultProofOfWorkNonceTrialsPerByte)
print '(For pubkey message via API) Doing proof of work...'
initialHash = hashlib.sha512(payload).digest()
trialValue, nonce = proofofwork.run(target, initialHash)
print '(For pubkey message via API) Found proof of work', trialValue, 'Nonce:', nonce
payload = pack('>Q', nonce) + payload
pubkeyReadPosition = 8 # bypass the nonce
if payload[pubkeyReadPosition:pubkeyReadPosition+4] == '\x00\x00\x00\x00': # if this pubkey uses 8 byte time
pubkeyReadPosition += 8
else:
pubkeyReadPosition += 4
addressVersion, addressVersionLength = decodeVarint(payload[pubkeyReadPosition:pubkeyReadPosition+10])
pubkeyReadPosition += addressVersionLength
pubkeyStreamNumber = decodeVarint(payload[pubkeyReadPosition:pubkeyReadPosition+10])[0]
inventoryHash = calculateInventoryHash(payload)
objectType = 1
#todo: support v4 pubkeys
TTL = 28 * 24 * 60 * 60
shared.inventory[inventoryHash] = (
objectType, pubkeyStreamNumber, payload, int(time.time()) + TTL,'')
shared.inventorySets[pubkeyStreamNumber].add(inventoryHash)
with shared.printLock:
print 'broadcasting inv within API command disseminatePubkey with hash:', inventoryHash.encode('hex')
shared.broadcastToSendDataQueues((
streamNumber, 'advertiseobject', inventoryHash))
def HandleGetMessageDataByDestinationHash(self, params):
# Method will eventually be used by a particular Android app to
# select relevant messages. Do not yet add this to the api
# doc.
if len(params) != 1:
raise APIError(0, 'I need 1 parameter!')
requestedHash, = params
if len(requestedHash) != 32:
raise APIError(19, 'The length of hash should be 32 bytes (encoded in hex thus 64 characters).')
requestedHash = self._decode(requestedHash, "hex")
# This is not a particularly commonly used API function. Before we
# use it we'll need to fill out a field in our inventory database
# which is blank by default (first20bytesofencryptedmessage).
queryreturn = sqlQuery(
'''SELECT hash, payload FROM inventory WHERE tag = '' and objecttype = 2 ; ''')
with SqlBulkExecute() as sql:
for row in queryreturn:
hash01, payload = row
readPosition = 16 # Nonce length + time length
readPosition += decodeVarint(payload[readPosition:readPosition+10])[1] # Stream Number length
t = (payload[readPosition:readPosition+32],hash01)
sql.execute('''UPDATE inventory SET tag=? WHERE hash=?; ''', *t)
queryreturn = sqlQuery('''SELECT payload FROM inventory WHERE tag = ?''',
requestedHash)
data = '{"receivedMessageDatas":['
for row in queryreturn:
payload, = row
if len(data) > 25:
data += ','
data += json.dumps({'data':payload.encode('hex')}, indent=4, separators=(',', ': '))
data += ']}'
return data
def HandleClientStatus(self, params):
if len(shared.connectedHostsList) == 0:
networkStatus = 'notConnected'
elif len(shared.connectedHostsList) > 0 and not shared.clientHasReceivedIncomingConnections:
networkStatus = 'connectedButHaveNotReceivedIncomingConnections'
else:
networkStatus = 'connectedAndReceivingIncomingConnections'
return json.dumps({'networkConnections':len(shared.connectedHostsList),'numberOfMessagesProcessed':shared.numberOfMessagesProcessed, 'numberOfBroadcastsProcessed':shared.numberOfBroadcastsProcessed, 'numberOfPubkeysProcessed':shared.numberOfPubkeysProcessed, 'networkStatus':networkStatus, 'softwareName':'PyBitmessage','softwareVersion':shared.softwareVersion}, indent=4, separators=(',', ': '))
def HandleDecodeAddress(self, params):
# Return a meaningful decoding of an address.
if len(params) != 1:
raise APIError(0, 'I need 1 parameter!')
address, = params
status, addressVersion, streamNumber, ripe = decodeAddress(address)
return json.dumps({'status':status, 'addressVersion':addressVersion,
'streamNumber':streamNumber, 'ripe':ripe.encode('base64')}, indent=4,
separators=(',', ': '))
def HandleHelloWorld(self, params):
(a, b) = params
return a + '-' + b
def HandleAdd(self, params):
(a, b) = params
return a + b
def HandleStatusBar(self, params):
message, = params
shared.UISignalQueue.put(('updateStatusBar', message))
handlers = {}
handlers['helloWorld'] = HandleHelloWorld
handlers['add'] = HandleAdd
handlers['statusBar'] = HandleStatusBar
handlers['listAddresses'] = HandleListAddresses
handlers['listAddressBookEntries'] = HandleListAddressBookEntries;
handlers['listAddressbook'] = HandleListAddressBookEntries # the listAddressbook alias should be removed eventually.
handlers['addAddressBookEntry'] = HandleAddAddressBookEntry
handlers['addAddressbook'] = HandleAddAddressBookEntry # the addAddressbook alias should be deleted eventually.
handlers['deleteAddressBookEntry'] = HandleDeleteAddressBookEntry
handlers['deleteAddressbook'] = HandleDeleteAddressBookEntry # The deleteAddressbook alias should be deleted eventually.
handlers['createRandomAddress'] = HandleCreateRandomAddress
handlers['createDeterministicAddresses'] = HandleCreateDeterministicAddresses
handlers['getDeterministicAddress'] = HandleGetDeterministicAddress
handlers['createChan'] = HandleCreateChan
handlers['joinChan'] = HandleJoinChan
handlers['leaveChan'] = HandleLeaveChan
handlers['deleteAddress'] = HandleDeleteAddress
handlers['getAllInboxMessages'] = HandleGetAllInboxMessages
handlers['getAllInboxMessageIds'] = HandleGetAllInboxMessageIds
handlers['getAllInboxMessageIDs'] = HandleGetAllInboxMessageIds
handlers['getInboxMessageById'] = HandleGetInboxMessageById
handlers['getInboxMessageByID'] = HandleGetInboxMessageById
handlers['getAllSentMessages'] = HandleGetAllSentMessages
handlers['getAllSentMessageIds'] = HandleGetAllSentMessageIds
handlers['getAllSentMessageIDs'] = HandleGetAllSentMessageIds
handlers['getInboxMessagesByReceiver'] = HandleInboxMessagesByReceiver
handlers['getInboxMessagesByAddress'] = HandleInboxMessagesByReceiver #after some time getInboxMessagesByAddress should be removed
handlers['getSentMessageById'] = HandleGetSentMessageById
handlers['getSentMessageByID'] = HandleGetSentMessageById
handlers['getSentMessagesByAddress'] = HandleGetSentMessagesByAddress
handlers['getSentMessagesBySender'] = HandleGetSentMessagesByAddress
handlers['getSentMessageByAckData'] = HandleGetSentMessagesByAckData
handlers['trashMessage'] = HandleTrashMessage
handlers['trashInboxMessage'] = HandleTrashInboxMessage
handlers['trashSentMessage'] = HandleTrashSentMessage
handlers['trashSentMessageByAckData'] = HandleTrashSentMessageByAckDAta
handlers['sendMessage'] = HandleSendMessage
handlers['sendBroadcast'] = HandleSendBroadcast
handlers['getStatus'] = HandleGetStatus
handlers['addSubscription'] = HandleAddSubscription
handlers['deleteSubscription'] = HandleDeleteSubscription
handlers['listSubscriptions'] = ListSubscriptions
handlers['disseminatePreEncryptedMsg'] = HandleDisseminatePreEncryptedMsg
handlers['disseminatePubkey'] = HandleDissimatePubKey
handlers['getMessageDataByDestinationHash'] = HandleGetMessageDataByDestinationHash
handlers['getMessageDataByDestinationTag'] = HandleGetMessageDataByDestinationHash
handlers['clientStatus'] = HandleClientStatus
handlers['decodeAddress'] = HandleDecodeAddress
def _handle_request(self, method, params):
if (self.handlers.has_key(method)):
return self.handlers[method](self ,params)
else:
raise APIError(20, 'Invalid method: %s' % method)
def _dispatch(self, method, params):
self.cookies = []
validuser = self.APIAuthenticateClient()
if not validuser:
time.sleep(2)
return "RPC Username or password incorrect or HTTP header lacks authentication at all."
try:
return self._handle_request(method, params)
except APIError as e:
return str(e)
except varintDecodeError as e:
logger.error(e)
return "API Error 0026: Data contains a malformed varint. Some details: %s" % e
except Exception as e:
logger.exception(e)
return "API Error 0021: Unexpected API Failure - %s" % str(e)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparseReshape."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
class SparseReshapeTest(test.TestCase):
def _SparseTensorPlaceholder(self):
return sparse_tensor.SparseTensor(
array_ops.placeholder(dtypes.int64),
array_ops.placeholder(dtypes.float64),
array_ops.placeholder(dtypes.int64))
def _SparseTensorValue_5x6(self):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2],
[3, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.float64)
shape = np.array([5, 6]).astype(np.int64)
return sparse_tensor.SparseTensorValue(ind, val, shape)
def _SparseTensorValue_2x3x4(self):
ind = np.array([[0, 0, 1], [0, 1, 0], [0, 1, 2], [1, 0, 3], [1, 1, 1],
[1, 1, 3], [1, 2, 2]])
val = np.array([1, 10, 12, 103, 111, 113, 122])
shape = np.array([2, 3, 4])
return sparse_tensor.SparseTensorValue(ind, val, shape)
def testSameShape(self):
with self.test_session(use_gpu=False) as sess:
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(input_val, [5, 6])
output_val = sess.run(sp_output)
self.assertAllEqual(output_val.indices, input_val.indices)
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, input_val.dense_shape)
def testFeedSameShape(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [5, 6])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices, input_val.indices)
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, input_val.dense_shape)
def testFeedSameShapeWithInferredDim(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [-1, 6])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices, input_val.indices)
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, input_val.dense_shape)
def testFeedNewShapeSameRank(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [3, 10])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices,
np.array([[0, 0], [0, 6], [0, 9], [1, 0], [2, 0],
[2, 1]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [3, 10])
def testFeedNewShapeSameRankWithInferredDim(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [3, -1])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices,
np.array([[0, 0], [0, 6], [0, 9], [1, 0], [2, 0],
[2, 1]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [3, 10])
def testUpRank(self):
with self.test_session(use_gpu=False) as sess:
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(input_val, [2, 3, 5])
output_val = sess.run(sp_output)
self.assertAllEqual(output_val.indices,
np.array([[0, 0, 0], [0, 1, 1], [0, 1, 4], [0, 2, 0],
[1, 1, 0], [1, 1, 1]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [2, 3, 5])
def testFeedUpRank(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [2, 3, 5])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices,
np.array([[0, 0, 0], [0, 1, 1], [0, 1, 4], [0, 2, 0],
[1, 1, 0], [1, 1, 1]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [2, 3, 5])
def testFeedUpRankWithInferredDim(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [2, -1, 5])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices,
np.array([[0, 0, 0], [0, 1, 1], [0, 1, 4], [0, 2, 0],
[1, 1, 0], [1, 1, 1]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [2, 3, 5])
def testFeedDownRank(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_2x3x4()
sp_output = sparse_ops.sparse_reshape(sp_input, [6, 4])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices,
np.array([[0, 1], [1, 0], [1, 2], [3, 3], [4, 1],
[4, 3], [5, 2]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [6, 4])
def testFeedDownRankWithInferredDim(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_2x3x4()
sp_output = sparse_ops.sparse_reshape(sp_input, [6, -1])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices,
np.array([[0, 1], [1, 0], [1, 2], [3, 3], [4, 1],
[4, 3], [5, 2]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [6, 4])
def testFeedMultipleInferredDims(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [4, -1, -1])
with self.assertRaisesOpError("only one output shape size may be -1"):
sess.run(sp_output, {sp_input: input_val})
def testFeedMismatchedSizes(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [4, 7])
with self.assertRaisesOpError(
"Input to reshape is a tensor with 30 dense values"):
sess.run(sp_output, {sp_input: input_val})
def testFeedMismatchedSizesWithInferredDim(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [4, -1])
with self.assertRaisesOpError("requested shape requires a multiple"):
sess.run(sp_output, {sp_input: input_val})
def testFeedPartialShapes(self):
with self.test_session(use_gpu=False):
# Incorporate new rank into shape information if known
sp_input = self._SparseTensorPlaceholder()
sp_output = sparse_ops.sparse_reshape(sp_input, [2, 3, 5])
self.assertListEqual(sp_output.indices.get_shape().as_list(), [None, 3])
self.assertListEqual(sp_output.dense_shape.get_shape().as_list(), [3])
# Incorporate known shape information about input indices in output
# indices
sp_input = self._SparseTensorPlaceholder()
sp_input.indices.set_shape([5, None])
sp_output = sparse_ops.sparse_reshape(sp_input, [2, 3, 5])
self.assertListEqual(sp_output.indices.get_shape().as_list(), [5, 3])
self.assertListEqual(sp_output.dense_shape.get_shape().as_list(), [3])
# Even if new_shape has no shape information, we know the ranks of
# output indices and shape
sp_input = self._SparseTensorPlaceholder()
sp_input.indices.set_shape([5, None])
new_shape = array_ops.placeholder(dtypes.int64)
sp_output = sparse_ops.sparse_reshape(sp_input, new_shape)
self.assertListEqual(sp_output.indices.get_shape().as_list(), [5, None])
self.assertListEqual(sp_output.dense_shape.get_shape().as_list(), [None])
def testFeedDenseReshapeSemantics(self):
with self.test_session(use_gpu=False) as sess:
# Compute a random rank-5 initial shape and new shape, randomly sparsify
# it, and check that the output of SparseReshape has the same semantics
# as a dense reshape.
factors = np.array([2] * 4 + [3] * 4 + [5] * 4) # 810k total elements
orig_rank = np.random.randint(2, 7)
orig_map = np.random.randint(orig_rank, size=factors.shape)
orig_shape = [np.prod(factors[orig_map == d]) for d in range(orig_rank)]
new_rank = np.random.randint(2, 7)
new_map = np.random.randint(new_rank, size=factors.shape)
new_shape = [np.prod(factors[new_map == d]) for d in range(new_rank)]
orig_dense = np.random.uniform(size=orig_shape)
orig_indices = np.transpose(np.nonzero(orig_dense < 0.5))
orig_values = orig_dense[orig_dense < 0.5]
new_dense = np.reshape(orig_dense, new_shape)
new_indices = np.transpose(np.nonzero(new_dense < 0.5))
new_values = new_dense[new_dense < 0.5]
sp_input = self._SparseTensorPlaceholder()
input_val = sparse_tensor.SparseTensorValue(orig_indices, orig_values,
orig_shape)
sp_output = sparse_ops.sparse_reshape(sp_input, new_shape)
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices, new_indices)
self.assertAllEqual(output_val.values, new_values)
self.assertAllEqual(output_val.dense_shape, new_shape)
if __name__ == "__main__":
test.main()
| |
"""
Created: 11 November 2016
Last Updated: 11 April 2018
Dan Marley
daniel.edison.marley@cernSPAMNOT.ch
Texas A&M University
-----
Base class for performing deep learning
Designed for running on desktop at TAMU
with specific set of software installed
--> not guaranteed to work in CMSSW environment!
Does not use ROOT directly.
Instead, this is setup to use flat ntuples
that are accessed via uproot.
> UPROOT: https://github.com/scikit-hep/uproot
> KERAS: https://keras.io/
> TENSORFLOW: https://www.tensorflow.org/
> PYTORCH: http://pytorch.org/
> LWTNN: https://github.com/lwtnn/lwtnn
Expandable: Do 'testing' phase later than training phase
Diagnostics post-training phase
Different model (PyTorch)
"""
import json
import util
import datetime
import matplotlib
matplotlib.use('PDF') # png not supported at LPC; do this before anything else tries to set the backend
import uproot
import numpy as np
import pandas as pd
import keras
from keras.models import Sequential,model_from_json,load_model
from keras.layers import Dense, Activation
from keras.callbacks import EarlyStopping
from keras.utils.np_utils import to_categorical
from sklearn.model_selection import train_test_split,StratifiedKFold
from sklearn.metrics import roc_curve, auc
from deepLearningPlotter import DeepLearningPlotter
# fix random seed for reproducibility
seed = 2018
np.random.seed(seed)
class DeepLearning(object):
"""Deep Learning base class"""
def __init__(self):
self.date = datetime.date.today().strftime('%d%b%Y')
## Handling NN objects and data -- set in the class
self.df = None # dataframe containing physics information
self.fpr = None # ROC curve: false positive rate
self.tpr = None # ROC curve: true positive rate
self.model = None # Keras model
self.accuracy = {'mean':0,'std':0} # k-fold accuracies
self.histories = [] # model history (for ecah k-fold)
self.train_data = {} # set later
self.test_data = {} # set later
self.train_predictions = [] # set later
self.test_predictions = [] # set later
## NN architecture & parameters -- set by config file
self.treename = 'features' # Name of TTree to access in ROOT file (via uproot)
self.useLWTNN = True # export (& load model from) files for LWTNN
self.dnn_name = "dnn" # name to access in lwtnn ('variables.json')
self.hep_data = "" # Name for loading features (physics data) -- assumes all data in one file
self.model_name = "" # Name for saving/loading model
self.output_dir = 'data/dnn/' # directory for storing NN data
self.dnn_method = None # DNN method applied: classification/regression: ['binary','multi','regression']
self.runDiagnostics = True # Make plots pre/post training
self.verbose_level = 'INFO'
self.verbose = False
self.target_names = ["top","antitop"]
self.target_values = [0,1]
self.loss = 'binary_crossentropy' # preferred for binary classification
self.init = 'normal'
self.nNodes = []
self.dropout = None
self.metrics = ['accuracy']
self.features = []
self.epochs = 1
self.optimizer = 'adam'
self.input_dim = 1 # len(self.features)
self.output_dim = 1 # number of output dimensions (# of categories/# of predictions for regression)
self.batch_size = 32
self.activations = ['elu'] # https://keras.io/activations/
self.kfold_splits = 2
self.nHiddenLayers = 1
self.earlystopping = {} # {'monitor':'loss','min_delta':0.0001,'patience':5,'mode':'auto'}
def initialize(self): #,config):
"""Initialize a few parameters after they've been set by user"""
self.msg_svc = util.VERBOSE()
self.msg_svc.level = self.verbose_level
self.msg_svc.initialize()
self.verbose = not self.msg_svc.compare(self.verbose_level,"WARNING") # verbose if level is <"WARNING"
# Set name for the model, if needed
if not self.model_name:
self.model_name = self.hep_data.split('/')[-1].split('.')[0]+'_'+self.date
# initialize empty dictionaries, lists
self.test_data = {'X':[],'Y':[]}
self.train_data = {'X':[],'Y':[]}
self.test_predictions = []
self.train_predictions = []
self.fpr = [] # false positive rate
self.tpr = [] # true positive rate
self.histories = []
## -- Plotting framework
print " >> Store output in ",self.output_dir
self.plotter = DeepLearningPlotter() # class for plotting relevant NN information
self.plotter.output_dir = self.output_dir
self.plotter.image_format = 'png'
if self.dnn_method!='regression':
self.plotter.classification = self.dnn_method
self.plotter.regression = False
else:
self.plotter.classification = False
self.plotter.regression = True
## -- Adjust model architecture parameters (flexibilty in config file)
if len(self.nNodes)==1 and self.nHiddenLayers>0:
# All layers (initial & hidden) have the same number of nodes
self.msg_svc.DEBUG("DL : Setting all layers ({0}) to have the same number of nodes ({1})".format(self.nHiddenLayers+1,self.nNodes))
nodes_per_layer = self.nNodes[0]
self.nNodes = [nodes_per_layer for _ in range(self.nHiddenLayers+1)] # 1st layer + nHiddenLayers
## -- Adjust activation function parameter (flexibilty in config file)
if len(self.activations)==1:
# Assume the same activation function for all layers (input,hidden,output)
self.msg_svc.DEBUG("DL : Setting input, hidden, and output layers ({0}) \n".format(self.nHiddenLayers+2)+\
" to have the same activation function {0}".format(self.activations[0]) )
activation = self.activations[0]
self.activations = [activation for _ in range(self.nHiddenLayers+2)] # 1st layer + nHiddenLayers + output
elif len(self.activations)==2 and self.nHiddenLayers>0:
# Assume the last activation is for the output and the first+hidden layers have the first activation
self.msg_svc.DEBUG("DL : Setting input and hidden layers ({0}) to the same activation function, {1},\n".format(self.nHiddenLayers+1,self.activations[0])+\
" and the output activation to {0}".format(self.activations[1]) )
first_hidden_act = self.activations[0]
output_act = self.activations[1]
self.activations = [first_hidden_act for _ in range(self.nHiddenLayers+1)]+[output_act]
return
## Single functions to run all of the necessary pieces
def runTraining(self,extra_branches=[]):
"""Train NN model"""
self.load_hep_data(extra_branches)
self.build_model()
# hard-coded :/
self.plotter.initialize(self.df,self.target_names,self.target_values)
if self.runDiagnostics:
self.diagnostics(preTraining=True) # save plots of the features and model architecture
self.train_model()
self.msg_svc.INFO(" SAVE MODEL")
self.save_model(self.useLWTNN)
if self.runDiagnostics:
self.diagnostics(postTraining=True) # save plots of the performance in training/testing
return
def runInference(self,data=None):
"""
Run inference of the NN model
User responsible for diagnostics if not doing training:
-> save all predictions (& labels) using 'self.test_predictions'
then call individual functions:
plot_features() -> compare features of the inputs
plot_prediction() -> compare output prediction (works for classification)
plot_ROC() -> signal vs background efficiency (need self.fpr, self.tpr filled)
"""
self.load_model(self.useLWTNN)
if data is None:
try:
self.load_hep_data()
data = self.df[self.features]
except:
self.msg_svc.ERROR("DL : runInference() cannot proceed because 'data' is None and cannot load HEP data")
self.msg_svc.ERROR("DL : Please check your implementation.")
return -999
prediction = self.predict(data)
return prediction
## Specific functions to perform training/inference tasks
def build_model(self):
"""Construct the NN model -- only Keras support for now"""
self.msg_svc.INFO("DL : Build the neural network model")
## Declare the model
self.model = Sequential() # The Keras Sequential model is a linear stack of layers.
## Add 1st layer
self.model.add( Dense( int(self.nNodes[0]), input_dim=self.input_dim, kernel_initializer=self.init, activation=self.activations[0]) )
## Add hidden layer(s)
for h in range(self.nHiddenLayers):
self.model.add( Dense( int(self.nNodes[h+1]), kernel_initializer=self.init, activation=self.activations[h+1]) )
## Add the output layer
self.model.add( Dense(self.output_dim,kernel_initializer=self.init, activation=self.activations[-1]) )
## Build the model
self.model.compile(loss=self.loss, optimizer=self.optimizer, metrics=self.metrics)
return
def train_model(self):
"""Setup for training the model using k-fold cross-validation"""
self.msg_svc.INFO("DL : Train the model!")
callbacks_list = []
if self.earlystopping:
earlystop = EarlyStopping(**self.earlystopping)
callbacks_list = [earlystop]
targets = []
for i in self.target_values:
tmp_target = self.df[ self.df['target']==i ]
targets.append(tmp_target)
# Access features and labels from dataframe
X = self.df[self.features].values
Y = self.df['target'].values
# Setup k-fold process
kfold = StratifiedKFold(n_splits=self.kfold_splits, shuffle=True, random_state=seed)
nsplits = kfold.get_n_splits(X,Y)
cvpredictions = [] # compare outputs from each cross-validation
self.msg_svc.INFO("DL : Fitting K-Fold cross validation".format(self.kfold_splits))
for ind,(train,test) in enumerate(kfold.split(X,Y)):
self.msg_svc.DEBUG("DL : - Fitting K-Fold {0}".format(ind))
# store test/train data from each k-fold to compare later
self.test_data['X'].append(X[test])
self.test_data['Y'].append(Y[test])
self.train_data['X'].append(X[train])
self.train_data['Y'].append(Y[train])
# Fit the model to training data & save the history
Y_train = Y[train]
Y_test = Y[test]
if self.dnn_method=='multi' or self.dnn_method=='regression' and not np.array_equal(Y_train,(Y_train[0],self.output_dim)):
train_shape = Y_train.shape[0]
train_total_array = []
test_shape = Y_test.shape[0]
test_total_array = []
for a in range(self.output_dim):
dummy_train = np.zeros(train_shape)
dummy_train[Y[train][0]==a] = 1
train_total_array.append( dummy_train.tolist() )
dummy_test = np.zeros(test_shape)
dummy_test[Y[test][0]==a] = 1
test_total_array.append( dummy_test.tolist() )
Y_train = np.array(train_total_array).T
Y_test = np.array(test_total_array).T
history = self.model.fit(X[train],Y_train,epochs=self.epochs,\
callbacks=callbacks_list,batch_size=self.batch_size,verbose=self.verbose)
self.histories.append(history)
# evaluate the model
self.msg_svc.DEBUG("DL : + Evaluate the model: ")
predictions = self.model.evaluate(X[test], Y_test,verbose=self.verbose,batch_size=self.batch_size)
cvpredictions.append(predictions[1] * 100)
self.msg_svc.DEBUG("DL : {0}: {1:.2f}%".format(self.model.metrics_names[1], predictions[1]*100))
# Evaluate training sample
train_predictions = self.predict(X[train])
self.train_predictions.append( train_predictions )
# Evaluate test sample
test_predictions = self.predict(X[test])
self.test_predictions.append( test_predictions )
# Make ROC curve from test sample
if self.dnn_method=='binary':
fpr,tpr,_ = roc_curve( Y[test], test_predictions )
self.fpr.append(fpr)
self.tpr.append(tpr)
self.msg_svc.INFO("DL : Finished K-Fold cross-validation: ")
self.accuracy = {'mean':np.mean(cvpredictions),'std':np.std(cvpredictions)}
self.msg_svc.INFO("DL : - Accuracy: {0:.2f}% (+/- {1:.2f}%)".format(np.mean(cvpredictions), np.std(cvpredictions)))
return
def predict(self,data=None):
"""Return the prediction from a test sample"""
self.msg_svc.DEBUG("DL : Get the DNN prediction")
if data is None:
self.msg_svc.ERROR("DL : predict() given NoneType data. Returning -999.")
self.msg_svc.ERROR("DL : Please check your configuration!")
return -999.
return self.model.predict( data )
def load_hep_data(self,variables2plot=[]):
"""
Load the physics data (flat ntuple) for NN using uproot
Convert to DataFrame for easier slicing
@param variables2plot If there are extra variables to plot,
that aren't features of the NN, include them here
"""
file = uproot.open(self.hep_data)
data = file[self.treename]
dataframe = data.pandas.df( self.features+['target']+variables2plot )
self.metadata = file['metadata'] # names of samples, target values, etc.
applySelection = False
if applySelection:
# special treatment (add some selection after making the ntuples)
# this is hard-coded because it is user specific for now,
# may build in way to do this more easily
BEST_top = self.df['ljet_BEST_t']
BEST_jet = self.df['ljet_BEST_j']
mask = BEST_top / (BEST_top+BEST_jet) >0.8
dataframe = dataframe[mask]
self.df = dataframe
return
def load_model(self,from_lwtnn=False):
"""Load existing model to make plots or predictions"""
self.model = None
if from_lwtnn:
model_json = open(self.model_name+"_model.json",'r').read()
self.model = model_from_json(model_json)
self.model.load_weights(self.model_name+"_weights.h5")
self.model.compile(loss=self.loss, optimizer=self.optimizer, metrics=self.metrics)
else:
self.model = load_model('{0}.h5'.format(self.model_name))
return
def save_model(self,to_lwtnn=False):
"""Save the model for use later"""
output = self.output_dir+'/'+self.model_name
if to_lwtnn:
## Save to format for LWTNN
self.save_features() ## Save variables to JSON file
## model architecture
model_json = self.model.to_json()
with open(output+'_model.json', 'w') as outfile:
outfile.write(model_json)
## save the model weights
self.model.save_weights(output+'_weights.h5')
else:
self.model.save('{0}.h5'.format(output)) # creates a HDF5 file of model
return
def save_features(self):
"""
Save the features to a json file to load via lwtnn later
Hard-coded scale & offset; must change later if necessary
"""
text = """ {
"inputs": ["""
for fe,feature in enumerate(self.features):
comma = "," if fe!=len(self.features) else ""
tmp = """
{"name": "%(feature)s",
"scale": 1,
"offset": 0}%(comma)s""" % {'feature':feature,'comma':comma}
text += tmp
text += "],"
text += """
"class_labels": ["%(name)s"],
"keras_version": "%(version)s",
"miscellaneous": {}
}
""" % {'version':keras.__version__,'name':self.dnn_name}
varsFileName = self.output_dir+'/variables.json'
varsFile = open(varsFileName,'w')
varsFile.write(text)
return
def diagnostics(self,preTraining=False,postTraining=False):
"""Diagnostic tests of the NN"""
self.msg_svc.INFO("DL : Diagnostics")
# Diagnostics before the training
if preTraining:
self.msg_svc.INFO("DL : -- pre-training")
self.plotter.features() # compare features for different targets
self.plotter.feature_correlations() # correlations of features
self.plotter.model(self.model,self.model_name) # Keras plot of the model architecture
# post training/testing
if postTraining:
self.msg_svc.INFO("DL : -- post-training")
self.msg_svc.INFO("DL : -- post-training :: PREDICTIONS ")
train = {'X':self.train_predictions,'Y':self.train_data['Y']}
test = {'X':self.test_predictions,'Y':self.test_data['Y']}
self.plotter.prediction(train,test) # compare DNN prediction for different targets
self.msg_svc.INFO("DL : -- post-training :: ROC")
self.plotter.ROC(self.fpr,self.tpr,self.accuracy) # ROC curve for signal vs background
self.msg_svc.INFO("DL : -- post-training :: History")
self.plotter.loss_history(self.histories) # loss as a function of epoch
return
## THE END ##
| |
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import mock
from mox3 import mox
import netaddr
from oslo_db import exception as db_exc
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from nova.cells import rpcapi as cells_rpcapi
from nova.compute import flavors
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova.network import model as network_model
from nova import notifications
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova.objects import instance
from nova.objects import instance_info_cache
from nova.objects import pci_device
from nova.objects import security_group
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.objects import test_instance_device_metadata
from nova.tests.unit.objects import test_instance_fault
from nova.tests.unit.objects import test_instance_info_cache
from nova.tests.unit.objects import test_instance_numa_topology
from nova.tests.unit.objects import test_instance_pci_requests
from nova.tests.unit.objects import test_migration_context as test_mig_ctxt
from nova.tests.unit.objects import test_objects
from nova.tests.unit.objects import test_security_group
from nova.tests.unit.objects import test_vcpu_model
from nova.tests import uuidsentinel as uuids
from nova import utils
class _TestInstanceObject(object):
@property
def fake_instance(self):
db_inst = fake_instance.fake_db_instance(id=2,
access_ip_v4='1.2.3.4',
access_ip_v6='::1')
db_inst['uuid'] = uuids.db_instance
db_inst['cell_name'] = 'api!child'
db_inst['terminated_at'] = None
db_inst['deleted_at'] = None
db_inst['created_at'] = None
db_inst['updated_at'] = None
db_inst['launched_at'] = datetime.datetime(1955, 11, 12,
22, 4, 0)
db_inst['deleted'] = False
db_inst['security_groups'] = []
db_inst['pci_devices'] = []
db_inst['user_id'] = self.context.user_id
db_inst['project_id'] = self.context.project_id
db_inst['tags'] = []
db_inst['info_cache'] = dict(test_instance_info_cache.fake_info_cache,
instance_uuid=db_inst['uuid'])
db_inst['system_metadata'] = {
'image_name': 'os2-warp',
'image_min_ram': 100,
'image_hw_disk_bus': 'ide',
'image_hw_vif_model': 'ne2k_pci',
}
return db_inst
def test_datetime_deserialization(self):
red_letter_date = timeutils.parse_isotime(
utils.isotime(datetime.datetime(1955, 11, 5)))
inst = objects.Instance(uuid=uuids.instance,
launched_at=red_letter_date)
primitive = inst.obj_to_primitive()
expected = {'nova_object.name': 'Instance',
'nova_object.namespace': 'nova',
'nova_object.version': inst.VERSION,
'nova_object.data':
{'uuid': uuids.instance,
'launched_at': '1955-11-05T00:00:00Z'},
'nova_object.changes': ['launched_at', 'uuid']}
self.assertJsonEqual(primitive, expected)
inst2 = objects.Instance.obj_from_primitive(primitive)
self.assertIsInstance(inst2.launched_at, datetime.datetime)
self.assertEqual(red_letter_date, inst2.launched_at)
def test_ip_deserialization(self):
inst = objects.Instance(uuid=uuids.instance, access_ip_v4='1.2.3.4',
access_ip_v6='::1')
primitive = inst.obj_to_primitive()
expected = {'nova_object.name': 'Instance',
'nova_object.namespace': 'nova',
'nova_object.version': inst.VERSION,
'nova_object.data':
{'uuid': uuids.instance,
'access_ip_v4': '1.2.3.4',
'access_ip_v6': '::1'},
'nova_object.changes': ['uuid', 'access_ip_v6',
'access_ip_v4']}
self.assertJsonEqual(primitive, expected)
inst2 = objects.Instance.obj_from_primitive(primitive)
self.assertIsInstance(inst2.access_ip_v4, netaddr.IPAddress)
self.assertIsInstance(inst2.access_ip_v6, netaddr.IPAddress)
self.assertEqual(netaddr.IPAddress('1.2.3.4'), inst2.access_ip_v4)
self.assertEqual(netaddr.IPAddress('::1'), inst2.access_ip_v6)
@mock.patch.object(db, 'instance_get_by_uuid')
def test_get_without_expected(self, mock_get):
mock_get.return_value = self.fake_instance
inst = objects.Instance.get_by_uuid(self.context, 'uuid',
expected_attrs=[])
for attr in instance.INSTANCE_OPTIONAL_ATTRS:
self.assertFalse(inst.obj_attr_is_set(attr))
mock_get.assert_called_once_with(self.context, 'uuid',
columns_to_join=[])
@mock.patch.object(db, 'instance_extra_get_by_instance_uuid')
@mock.patch.object(db, 'instance_fault_get_by_instance_uuids')
@mock.patch.object(db, 'instance_get_by_uuid')
def test_get_with_expected(self, mock_get, mock_fault_get, mock_extra_get):
exp_cols = instance.INSTANCE_OPTIONAL_ATTRS[:]
exp_cols.remove('fault')
exp_cols.remove('numa_topology')
exp_cols.remove('pci_requests')
exp_cols.remove('vcpu_model')
exp_cols.remove('ec2_ids')
exp_cols.remove('migration_context')
exp_cols.remove('keypairs')
exp_cols.remove('device_metadata')
exp_cols = [exp_col for exp_col in exp_cols if 'flavor' not in exp_col]
exp_cols.extend(['extra', 'extra.numa_topology', 'extra.pci_requests',
'extra.flavor', 'extra.vcpu_model',
'extra.migration_context', 'extra.keypairs',
'extra.device_metadata'])
fake_topology = (test_instance_numa_topology.
fake_db_topology['numa_topology'])
fake_requests = jsonutils.dumps(test_instance_pci_requests.
fake_pci_requests)
fake_devices_metadata = \
test_instance_device_metadata.fake_devices_metadata
fake_flavor = jsonutils.dumps(
{'cur': objects.Flavor().obj_to_primitive(),
'old': None, 'new': None})
fake_vcpu_model = jsonutils.dumps(
test_vcpu_model.fake_vcpumodel.obj_to_primitive())
fake_mig_context = jsonutils.dumps(
test_mig_ctxt.fake_migration_context_obj.obj_to_primitive())
fake_keypairlist = objects.KeyPairList(objects=[
objects.KeyPair(name='foo')])
fake_keypairs = jsonutils.dumps(
fake_keypairlist.obj_to_primitive())
fake_service = {'created_at': None, 'updated_at': None,
'deleted_at': None, 'deleted': False, 'id': 123,
'host': 'fake-host', 'binary': 'nova-fake',
'topic': 'fake-service-topic', 'report_count': 1,
'forced_down': False, 'disabled': False,
'disabled_reason': None, 'last_seen_up': None,
'version': 1, 'uuid': uuids.service,
}
fake_instance = dict(self.fake_instance,
services=[fake_service],
extra={
'numa_topology': fake_topology,
'pci_requests': fake_requests,
'device_metadata': fake_devices_metadata,
'flavor': fake_flavor,
'vcpu_model': fake_vcpu_model,
'migration_context': fake_mig_context,
'keypairs': fake_keypairs,
})
mock_get.return_value = fake_instance
fake_faults = test_instance_fault.fake_faults
mock_fault_get.return_value = fake_faults
inst = objects.Instance.get_by_uuid(
self.context, 'uuid',
expected_attrs=instance.INSTANCE_OPTIONAL_ATTRS)
for attr in instance.INSTANCE_OPTIONAL_ATTRS:
self.assertTrue(inst.obj_attr_is_set(attr))
self.assertEqual(123, inst.services[0].id)
self.assertEqual('foo', inst.keypairs[0].name)
mock_get.assert_called_once_with(self.context, 'uuid',
columns_to_join=exp_cols)
mock_fault_get.assert_called_once_with(self.context,
[fake_instance['uuid']])
self.assertFalse(mock_extra_get.called)
def test_lazy_load_services_on_deleted_instance(self):
# We should avoid trying to hit the database to reload the instance
# and just set the services attribute to an empty list.
instance = objects.Instance(self.context, uuid=uuids.instance,
deleted=True)
self.assertEqual(0, len(instance.services))
def test_lazy_load_tags_on_deleted_instance(self):
# We should avoid trying to hit the database to reload the instance
# and just set the tags attribute to an empty list.
instance = objects.Instance(self.context, uuid=uuids.instance,
deleted=True)
self.assertEqual(0, len(instance.tags))
def test_lazy_load_tags(self):
instance = objects.Instance(self.context, uuid=uuids.instance,
user_id=self.context.user_id,
project_id=self.context.project_id)
instance.create()
tag = objects.Tag(self.context, resource_id=instance.uuid, tag='foo')
tag.create()
self.assertNotIn('tags', instance)
self.assertEqual(1, len(instance.tags))
self.assertEqual('foo', instance.tags[0].tag)
@mock.patch('nova.objects.instance.LOG.exception')
def test_save_does_not_log_exception_after_tags_loaded(self, mock_log):
instance = objects.Instance(self.context, uuid=uuids.instance,
user_id=self.context.user_id,
project_id=self.context.project_id)
instance.create()
tag = objects.Tag(self.context, resource_id=instance.uuid, tag='foo')
tag.create()
# this will lazy load tags so instance.tags will be set
self.assertEqual(1, len(instance.tags))
# instance.save will try to find a way to save tags but is should not
# spam the log with errors
instance.display_name = 'foobar'
instance.save()
self.assertFalse(mock_log.called)
@mock.patch.object(db, 'instance_get')
def test_get_by_id(self, mock_get):
mock_get.return_value = self.fake_instance
inst = objects.Instance.get_by_id(self.context, 'instid')
self.assertEqual(self.fake_instance['uuid'], inst.uuid)
mock_get.assert_called_once_with(self.context, 'instid',
columns_to_join=['info_cache', 'security_groups'])
@mock.patch.object(db, 'instance_get_by_uuid')
def test_load(self, mock_get):
fake_uuid = self.fake_instance['uuid']
fake_inst2 = dict(self.fake_instance,
metadata=[{'key': 'foo', 'value': 'bar'}])
mock_get.side_effect = [self.fake_instance, fake_inst2]
inst = objects.Instance.get_by_uuid(self.context, fake_uuid)
self.assertFalse(hasattr(inst, '_obj_metadata'))
meta = inst.metadata
self.assertEqual({'foo': 'bar'}, meta)
self.assertTrue(hasattr(inst, '_obj_metadata'))
# Make sure we don't run load again
meta2 = inst.metadata
self.assertEqual({'foo': 'bar'}, meta2)
call_list = [mock.call(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups']),
mock.call(self.context, fake_uuid,
columns_to_join=['metadata']),
]
mock_get.assert_has_calls(call_list, any_order=False)
def test_load_invalid(self):
inst = objects.Instance(context=self.context, uuid=uuids.instance)
self.assertRaises(exception.ObjectActionError,
inst.obj_load_attr, 'foo')
def test_create_and_load_keypairs_from_extra(self):
inst = objects.Instance(context=self.context,
user_id=self.context.user_id,
project_id=self.context.project_id)
inst.keypairs = objects.KeyPairList(objects=[
objects.KeyPair(name='foo')])
inst.create()
inst = objects.Instance.get_by_uuid(self.context, inst.uuid,
expected_attrs=['keypairs'])
self.assertEqual('foo', inst.keypairs[0].name)
def test_lazy_load_keypairs_from_extra(self):
inst = objects.Instance(context=self.context,
user_id=self.context.user_id,
project_id=self.context.project_id)
inst.keypairs = objects.KeyPairList(objects=[
objects.KeyPair(name='foo')])
inst.create()
inst = objects.Instance.get_by_uuid(self.context, inst.uuid)
self.assertNotIn('keypairs', inst)
self.assertEqual('foo', inst.keypairs[0].name)
self.assertNotIn('keypairs', inst.obj_what_changed())
@mock.patch('nova.objects.KeyPair.get_by_name')
def test_lazy_load_keypairs_from_legacy(self, mock_get):
mock_get.return_value = objects.KeyPair(name='foo')
inst = objects.Instance(context=self.context,
user_id=self.context.user_id,
key_name='foo',
project_id=self.context.project_id)
inst.create()
inst = objects.Instance.get_by_uuid(self.context, inst.uuid)
self.assertNotIn('keypairs', inst)
self.assertEqual('foo', inst.keypairs[0].name)
self.assertIn('keypairs', inst.obj_what_changed())
mock_get.assert_called_once_with(self.context,
inst.user_id,
inst.key_name,
localonly=True)
@mock.patch.object(db, 'instance_get_by_uuid')
def test_get_remote(self, mock_get):
# isotime doesn't have microseconds and is always UTC
fake_instance = self.fake_instance
mock_get.return_value = fake_instance
inst = objects.Instance.get_by_uuid(self.context, uuids.instance)
self.assertEqual(fake_instance['id'], inst.id)
self.assertEqual(fake_instance['launched_at'],
inst.launched_at.replace(tzinfo=None))
self.assertEqual(fake_instance['access_ip_v4'],
str(inst.access_ip_v4))
self.assertEqual(fake_instance['access_ip_v6'],
str(inst.access_ip_v6))
mock_get.assert_called_once_with(self.context, uuids.instance,
columns_to_join=['info_cache', 'security_groups'])
@mock.patch.object(instance_info_cache.InstanceInfoCache, 'refresh')
@mock.patch.object(db, 'instance_get_by_uuid')
def test_refresh(self, mock_get, mock_refresh):
fake_uuid = self.fake_instance['uuid']
fake_inst = dict(self.fake_instance, host='orig-host')
fake_inst2 = dict(self.fake_instance, host='new-host')
mock_get.side_effect = [fake_inst, fake_inst2]
inst = objects.Instance.get_by_uuid(self.context, fake_uuid)
self.assertEqual('orig-host', inst.host)
inst.refresh()
self.assertEqual('new-host', inst.host)
self.assertEqual(set([]), inst.obj_what_changed())
get_call_list = [mock.call(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups']),
mock.call(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups']),
]
mock_get.assert_has_calls(get_call_list, any_order=False)
mock_refresh.assert_called_once_with()
@mock.patch.object(objects.Instance, 'get_by_uuid')
def test_refresh_does_not_recurse(self, mock_get):
inst = objects.Instance(context=self.context, uuid=uuids.instance,
metadata={})
inst_copy = objects.Instance()
inst_copy.uuid = inst.uuid
mock_get.return_value = inst_copy
self.assertRaises(exception.OrphanedObjectError, inst.refresh)
mock_get.assert_called_once_with(self.context, uuid=inst.uuid,
expected_attrs=['metadata'], use_slave=False)
def _save_test_helper(self, cell_type, save_kwargs):
"""Common code for testing save() for cells/non-cells."""
if cell_type:
self.flags(enable=True, cell_type=cell_type, group='cells')
else:
self.flags(enable=False, group='cells')
old_ref = dict(self.fake_instance, host='oldhost', user_data='old',
vm_state='old', task_state='old')
fake_uuid = old_ref['uuid']
expected_updates = dict(vm_state='meow', task_state='wuff',
user_data='new')
new_ref = dict(old_ref, host='newhost', **expected_updates)
exp_vm_state = save_kwargs.get('expected_vm_state')
exp_task_state = save_kwargs.get('expected_task_state')
admin_reset = save_kwargs.get('admin_state_reset', False)
if exp_vm_state:
expected_updates['expected_vm_state'] = exp_vm_state
if exp_task_state:
if (exp_task_state == 'image_snapshot' and
'instance_version' in save_kwargs and
save_kwargs['instance_version'] == '1.9'):
expected_updates['expected_task_state'] = [
'image_snapshot', 'image_snapshot_pending']
else:
expected_updates['expected_task_state'] = exp_task_state
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.mox.StubOutWithMock(db, 'instance_info_cache_update')
cells_api_mock = self.mox.CreateMock(cells_rpcapi.CellsAPI)
self.mox.StubOutWithMock(cells_api_mock,
'instance_update_at_top')
self.mox.StubOutWithMock(cells_api_mock,
'instance_update_from_api')
self.mox.StubOutWithMock(cells_rpcapi, 'CellsAPI',
use_mock_anything=True)
self.mox.StubOutWithMock(notifications, 'send_update')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(old_ref)
db.instance_update_and_get_original(
self.context, fake_uuid, expected_updates,
columns_to_join=['info_cache', 'security_groups',
'system_metadata', 'extra', 'extra.flavor']
).AndReturn((old_ref, new_ref))
if cell_type == 'api':
cells_rpcapi.CellsAPI().AndReturn(cells_api_mock)
cells_api_mock.instance_update_from_api(
self.context, mox.IsA(objects.Instance),
exp_vm_state, exp_task_state, admin_reset)
elif cell_type == 'compute':
cells_rpcapi.CellsAPI().AndReturn(cells_api_mock)
cells_api_mock.instance_update_at_top(self.context,
mox.IsA(objects.Instance))
notifications.send_update(self.context, mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, old_ref['uuid'])
if 'instance_version' in save_kwargs:
inst.VERSION = save_kwargs.pop('instance_version')
self.assertEqual('old', inst.task_state)
self.assertEqual('old', inst.vm_state)
self.assertEqual('old', inst.user_data)
inst.vm_state = 'meow'
inst.task_state = 'wuff'
inst.user_data = 'new'
save_kwargs.pop('context', None)
inst.save(**save_kwargs)
self.assertEqual('newhost', inst.host)
self.assertEqual('meow', inst.vm_state)
self.assertEqual('wuff', inst.task_state)
self.assertEqual('new', inst.user_data)
# NOTE(danms): Ignore flavor migrations for the moment
self.assertEqual(set([]), inst.obj_what_changed() - set(['flavor']))
def test_save(self):
self._save_test_helper(None, {})
def test_save_in_api_cell(self):
self._save_test_helper('api', {})
def test_save_in_compute_cell(self):
self._save_test_helper('compute', {})
def test_save_exp_vm_state(self):
self._save_test_helper(None, {'expected_vm_state': ['meow']})
def test_save_exp_task_state(self):
self._save_test_helper(None, {'expected_task_state': ['meow']})
def test_save_exp_vm_state_api_cell(self):
self._save_test_helper('api', {'expected_vm_state': ['meow']})
def test_save_exp_task_state_api_cell(self):
self._save_test_helper('api', {'expected_task_state': ['meow']})
def test_save_exp_task_state_api_cell_admin_reset(self):
self._save_test_helper('api', {'admin_state_reset': True})
@mock.patch.object(db, 'instance_update_and_get_original')
@mock.patch.object(db, 'instance_get_by_uuid')
@mock.patch.object(notifications, 'send_update')
def test_save_rename_sends_notification(self, mock_send, mock_get,
mock_update_and_get):
# Tests that simply changing the 'display_name' on the instance
# will send a notification.
self.flags(enable=False, group='cells')
old_ref = dict(self.fake_instance, display_name='hello')
fake_uuid = old_ref['uuid']
expected_updates = dict(display_name='goodbye')
new_ref = dict(old_ref, **expected_updates)
mock_get.return_value = old_ref
mock_update_and_get.return_value = (old_ref, new_ref)
inst = objects.Instance.get_by_uuid(self.context, old_ref['uuid'],
use_slave=False)
self.assertEqual('hello', inst.display_name)
inst.display_name = 'goodbye'
inst.save()
self.assertEqual('goodbye', inst.display_name)
# NOTE(danms): Ignore flavor migrations for the moment
self.assertEqual(set([]), inst.obj_what_changed() - set(['flavor']))
mock_get.assert_called_once_with(self.context, fake_uuid,
columns_to_join=['info_cache', 'security_groups'])
mock_update_and_get.assert_called_once_with(self.context, fake_uuid,
expected_updates, columns_to_join=['info_cache', 'security_groups',
'system_metadata', 'extra', 'extra.flavor'])
mock_send.assert_called_once_with(self.context, mock.ANY, mock.ANY)
@mock.patch('nova.db.instance_extra_update_by_uuid')
def test_save_object_pci_requests(self, mock_instance_extra_update):
expected_json = ('[{"count": 1, "alias_name": null, "is_new": false,'
'"request_id": null, "spec": [{"vendor_id": "8086",'
'"product_id": "1502"}]}]')
inst = objects.Instance()
inst = objects.Instance._from_db_object(self.context, inst,
self.fake_instance)
inst.obj_reset_changes()
pci_req_obj = objects.InstancePCIRequest(
count=1, spec=[{'vendor_id': '8086', 'product_id': '1502'}])
inst.pci_requests = (
objects.InstancePCIRequests(requests=[pci_req_obj]))
inst.pci_requests.instance_uuid = inst.uuid
inst.save()
mock_instance_extra_update.assert_called_once_with(
self.context, inst.uuid, mock.ANY)
actual_args = (
mock_instance_extra_update.call_args[0][2]['pci_requests'])
mock_instance_extra_update.reset_mock()
self.assertJsonEqual(expected_json, actual_args)
inst.pci_requests = None
inst.save()
mock_instance_extra_update.assert_called_once_with(
self.context, inst.uuid, {'pci_requests': None})
mock_instance_extra_update.reset_mock()
inst.obj_reset_changes()
inst.save()
self.assertFalse(mock_instance_extra_update.called)
@mock.patch('nova.db.instance_update_and_get_original')
@mock.patch.object(instance.Instance, '_from_db_object')
def test_save_does_not_refresh_pci_devices(self, mock_fdo, mock_update):
# NOTE(danms): This tests that we don't update the pci_devices
# field from the contents of the database. This is not because we
# don't necessarily want to, but because the way pci_devices is
# currently implemented it causes versioning issues. When that is
# resolved, this test should go away.
mock_update.return_value = None, None
inst = objects.Instance(context=self.context, id=123)
inst.uuid = uuids.test_instance_not_refresh
inst.pci_devices = pci_device.PciDeviceList()
inst.save()
self.assertNotIn('pci_devices',
mock_fdo.call_args_list[0][1]['expected_attrs'])
@mock.patch('nova.db.instance_extra_update_by_uuid')
@mock.patch('nova.db.instance_update_and_get_original')
@mock.patch.object(instance.Instance, '_from_db_object')
def test_save_updates_numa_topology(self, mock_fdo, mock_update,
mock_extra_update):
fake_obj_numa_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([0]), memory=128),
objects.InstanceNUMACell(id=1, cpuset=set([1]), memory=128)])
fake_obj_numa_topology.instance_uuid = uuids.instance
jsonified = fake_obj_numa_topology._to_json()
mock_update.return_value = None, None
inst = objects.Instance(
context=self.context, id=123, uuid=uuids.instance)
inst.numa_topology = fake_obj_numa_topology
inst.save()
# NOTE(sdague): the json representation of nova object for
# NUMA isn't stable from a string comparison
# perspective. There are sets which get converted to lists,
# and based on platform differences may show up in different
# orders. So we can't have mock do the comparison. Instead
# manually compare the final parameter using our json equality
# operator which does the right thing here.
mock_extra_update.assert_called_once_with(
self.context, inst.uuid, mock.ANY)
called_arg = mock_extra_update.call_args_list[0][0][2]['numa_topology']
self.assertJsonEqual(called_arg, jsonified)
mock_extra_update.reset_mock()
inst.numa_topology = None
inst.save()
mock_extra_update.assert_called_once_with(
self.context, inst.uuid, {'numa_topology': None})
@mock.patch('nova.db.instance_extra_update_by_uuid')
def test_save_vcpu_model(self, mock_update):
inst = fake_instance.fake_instance_obj(self.context)
inst.vcpu_model = test_vcpu_model.fake_vcpumodel
inst.save()
self.assertTrue(mock_update.called)
self.assertEqual(1, mock_update.call_count)
actual_args = mock_update.call_args
self.assertEqual(self.context, actual_args[0][0])
self.assertEqual(inst.uuid, actual_args[0][1])
self.assertEqual(['vcpu_model'], list(actual_args[0][2].keys()))
self.assertJsonEqual(jsonutils.dumps(
test_vcpu_model.fake_vcpumodel.obj_to_primitive()),
actual_args[0][2]['vcpu_model'])
mock_update.reset_mock()
inst.vcpu_model = None
inst.save()
mock_update.assert_called_once_with(
self.context, inst.uuid, {'vcpu_model': None})
@mock.patch('nova.db.instance_extra_update_by_uuid')
def test_save_migration_context_model(self, mock_update):
inst = fake_instance.fake_instance_obj(self.context)
inst.migration_context = test_mig_ctxt.get_fake_migration_context_obj(
self.context)
inst.save()
self.assertTrue(mock_update.called)
self.assertEqual(1, mock_update.call_count)
actual_args = mock_update.call_args
self.assertEqual(self.context, actual_args[0][0])
self.assertEqual(inst.uuid, actual_args[0][1])
self.assertEqual(['migration_context'], list(actual_args[0][2].keys()))
self.assertIsInstance(
objects.MigrationContext.obj_from_db_obj(
actual_args[0][2]['migration_context']),
objects.MigrationContext)
mock_update.reset_mock()
inst.migration_context = None
inst.save()
mock_update.assert_called_once_with(
self.context, inst.uuid, {'migration_context': None})
def test_save_flavor_skips_unchanged_flavors(self):
inst = objects.Instance(context=self.context,
flavor=objects.Flavor())
inst.obj_reset_changes()
with mock.patch('nova.db.instance_extra_update_by_uuid') as mock_upd:
inst.save()
self.assertFalse(mock_upd.called)
@mock.patch('nova.db.instance_extra_update_by_uuid')
def test_save_multiple_extras_updates_once(self, mock_update):
inst = fake_instance.fake_instance_obj(self.context)
inst.numa_topology = None
inst.migration_context = None
inst.vcpu_model = test_vcpu_model.fake_vcpumodel
inst.save()
json_vcpu_model = jsonutils.dumps(
test_vcpu_model.fake_vcpumodel.obj_to_primitive())
expected_vals = {'numa_topology': None,
'migration_context': None,
'vcpu_model': json_vcpu_model}
mock_update.assert_called_once_with(self.context, inst.uuid,
expected_vals)
@mock.patch.object(notifications, 'send_update')
@mock.patch.object(cells_rpcapi.CellsAPI, 'instance_update_from_api')
@mock.patch.object(cells_rpcapi.CellsAPI, 'instance_update_at_top')
@mock.patch.object(db, 'instance_update_and_get_original')
def _test_skip_cells_sync_helper(self, mock_db_update, mock_update_at_top,
mock_update_from_api, mock_notif_update, cell_type):
self.flags(enable=True, cell_type=cell_type, group='cells')
inst = fake_instance.fake_instance_obj(self.context, cell_name='fake')
inst.vm_state = 'foo'
inst.task_state = 'bar'
inst.cell_name = 'foo!bar@baz'
old_ref = dict(base.obj_to_primitive(inst), vm_state='old',
task_state='old')
new_ref = dict(old_ref, vm_state='foo', task_state='bar')
newer_ref = dict(new_ref, vm_state='bar', task_state='foo')
mock_db_update.side_effect = [(old_ref, new_ref), (new_ref, newer_ref)]
with inst.skip_cells_sync():
inst.save()
mock_update_at_top.assert_has_calls([])
mock_update_from_api.assert_has_calls([])
self.assertFalse(mock_notif_update.called)
inst.vm_state = 'bar'
inst.task_state = 'foo'
def fake_update_from_api(context, instance, expected_vm_state,
expected_task_state, admin_state_reset):
self.assertEqual('foo!bar@baz', instance.cell_name)
# This is re-mocked so that cell_name can be checked above. Since
# instance objects have no equality testing assert_called_once_with
# doesn't work.
with mock.patch.object(cells_rpcapi.CellsAPI,
'instance_update_from_api',
side_effect=fake_update_from_api) as fake_update_from_api:
inst.save()
self.assertEqual('foo!bar@baz', inst.cell_name)
self.assertTrue(mock_notif_update.called)
if cell_type == 'compute':
mock_update_at_top.assert_called_once_with(self.context, mock.ANY)
# Compare primitives since we can't check instance object equality
expected_inst_p = base.obj_to_primitive(inst)
actual_inst = mock_update_at_top.call_args[0][1]
actual_inst_p = base.obj_to_primitive(actual_inst)
self.assertEqual(expected_inst_p, actual_inst_p)
self.assertFalse(fake_update_from_api.called)
elif cell_type == 'api':
self.assertFalse(mock_update_at_top.called)
fake_update_from_api.assert_called_once_with(self.context,
mock.ANY, None, None, False)
expected_calls = [
mock.call(self.context, inst.uuid,
{'vm_state': 'foo', 'task_state': 'bar',
'cell_name': 'foo!bar@baz'},
columns_to_join=['system_metadata', 'extra',
'extra.flavor']),
mock.call(self.context, inst.uuid,
{'vm_state': 'bar', 'task_state': 'foo'},
columns_to_join=['system_metadata'])]
mock_db_update.assert_has_calls(expected_calls)
def test_skip_cells_api(self):
self._test_skip_cells_sync_helper(cell_type='api')
def test_skip_cells_compute(self):
self._test_skip_cells_sync_helper(cell_type='compute')
@mock.patch.object(db, 'instance_get_by_uuid')
def test_get_deleted(self, mock_get):
fake_inst = dict(self.fake_instance, id=123, deleted=123)
fake_uuid = fake_inst['uuid']
mock_get.return_value = fake_inst
inst = objects.Instance.get_by_uuid(self.context, fake_uuid)
# NOTE(danms): Make sure it's actually a bool
self.assertTrue(inst.deleted)
mock_get.assert_called_once_with(self.context, fake_uuid,
columns_to_join=['info_cache', 'security_groups'])
@mock.patch.object(db, 'instance_get_by_uuid')
def test_get_not_cleaned(self, mock_get):
fake_inst = dict(self.fake_instance, id=123, cleaned=None)
fake_uuid = fake_inst['uuid']
mock_get.return_value = fake_inst
inst = objects.Instance.get_by_uuid(self.context, fake_uuid)
# NOTE(mikal): Make sure it's actually a bool
self.assertFalse(inst.cleaned)
mock_get.assert_called_once_with(self.context, fake_uuid,
columns_to_join=['info_cache', 'security_groups'])
@mock.patch.object(db, 'instance_get_by_uuid')
def test_get_cleaned(self, mock_get):
fake_inst = dict(self.fake_instance, id=123, cleaned=1)
fake_uuid = fake_inst['uuid']
mock_get.return_value = fake_inst
inst = objects.Instance.get_by_uuid(self.context, fake_uuid)
# NOTE(mikal): Make sure it's actually a bool
self.assertTrue(inst.cleaned)
mock_get.assert_called_once_with(self.context, fake_uuid,
columns_to_join=['info_cache', 'security_groups'])
@mock.patch.object(db, 'instance_update_and_get_original')
@mock.patch.object(db, 'instance_info_cache_update')
@mock.patch.object(db, 'instance_get_by_uuid')
def test_with_info_cache(self, mock_get, mock_upd_cache, mock_upd_and_get):
fake_inst = dict(self.fake_instance)
fake_uuid = fake_inst['uuid']
nwinfo1 = network_model.NetworkInfo.hydrate([{'address': 'foo'}])
nwinfo2 = network_model.NetworkInfo.hydrate([{'address': 'bar'}])
nwinfo1_json = nwinfo1.json()
nwinfo2_json = nwinfo2.json()
fake_info_cache = test_instance_info_cache.fake_info_cache
fake_inst['info_cache'] = dict(
fake_info_cache,
network_info=nwinfo1_json,
instance_uuid=fake_uuid)
mock_get.return_value = fake_inst
mock_upd_cache.return_value = fake_info_cache
inst = objects.Instance.get_by_uuid(self.context, fake_uuid)
self.assertEqual(nwinfo1, inst.info_cache.network_info)
self.assertEqual(fake_uuid, inst.info_cache.instance_uuid)
inst.info_cache.network_info = nwinfo2
inst.save()
mock_get.assert_called_once_with(self.context, fake_uuid,
columns_to_join=['info_cache', 'security_groups'])
mock_upd_cache.assert_called_once_with(self.context, fake_uuid,
{'network_info': nwinfo2_json})
self.assertFalse(mock_upd_and_get.called)
@mock.patch.object(db, 'instance_get_by_uuid')
def test_with_info_cache_none(self, mock_get):
fake_inst = dict(self.fake_instance, info_cache=None)
fake_uuid = fake_inst['uuid']
mock_get.return_value = fake_inst
inst = objects.Instance.get_by_uuid(self.context, fake_uuid,
['info_cache'])
self.assertIsNone(inst.info_cache)
mock_get.assert_called_once_with(self.context, fake_uuid,
columns_to_join=['info_cache'])
@mock.patch.object(db, 'security_group_update')
@mock.patch.object(db, 'instance_update_and_get_original')
@mock.patch.object(db, 'instance_get_by_uuid')
def test_with_security_groups(self, mock_get, mock_upd_and_get,
mock_upd_secgrp):
fake_inst = dict(self.fake_instance)
fake_uuid = fake_inst['uuid']
fake_inst['security_groups'] = [
{'id': 1, 'name': 'secgroup1', 'description': 'fake-desc',
'user_id': 'fake-user', 'project_id': 'fake_project',
'created_at': None, 'updated_at': None, 'deleted_at': None,
'deleted': False},
{'id': 2, 'name': 'secgroup2', 'description': 'fake-desc',
'user_id': 'fake-user', 'project_id': 'fake_project',
'created_at': None, 'updated_at': None, 'deleted_at': None,
'deleted': False},
]
mock_get.return_value = fake_inst
mock_upd_secgrp.return_value = fake_inst['security_groups'][0]
inst = objects.Instance.get_by_uuid(self.context, fake_uuid)
self.assertEqual(2, len(inst.security_groups))
for index, group in enumerate(fake_inst['security_groups']):
for key in group:
self.assertEqual(group[key],
getattr(inst.security_groups[index], key))
self.assertIsInstance(inst.security_groups[index],
security_group.SecurityGroup)
self.assertEqual(set(), inst.security_groups.obj_what_changed())
inst.security_groups[0].description = 'changed'
inst.save()
self.assertEqual(set(), inst.security_groups.obj_what_changed())
mock_get.assert_called_once_with(self.context, fake_uuid,
columns_to_join=['info_cache', 'security_groups'])
mock_upd_secgrp.assert_called_once_with(self.context, 1,
{'description': 'changed'})
self.assertFalse(mock_upd_and_get.called)
@mock.patch.object(db, 'instance_get_by_uuid')
def test_with_empty_security_groups(self, mock_get):
fake_inst = dict(self.fake_instance, security_groups=[])
fake_uuid = fake_inst['uuid']
mock_get.return_value = fake_inst
inst = objects.Instance.get_by_uuid(self.context, fake_uuid)
self.assertEqual(0, len(inst.security_groups))
mock_get.assert_called_once_with(self.context, fake_uuid,
columns_to_join=['info_cache', 'security_groups'])
@mock.patch.object(db, 'instance_get_by_uuid')
def test_with_empty_pci_devices(self, mock_get):
fake_inst = dict(self.fake_instance, pci_devices=[])
fake_uuid = fake_inst['uuid']
mock_get.return_value = fake_inst
inst = objects.Instance.get_by_uuid(self.context, fake_uuid,
['pci_devices'])
self.assertEqual(0, len(inst.pci_devices))
mock_get.assert_called_once_with(self.context, fake_uuid,
columns_to_join=['pci_devices'])
@mock.patch.object(db, 'instance_get_by_uuid')
def test_with_pci_devices(self, mock_get):
fake_inst = dict(self.fake_instance)
fake_uuid = fake_inst['uuid']
fake_inst['pci_devices'] = [
{'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': None,
'id': 2,
'compute_node_id': 1,
'address': 'a1',
'vendor_id': 'v1',
'numa_node': 0,
'product_id': 'p1',
'dev_type': fields.PciDeviceType.STANDARD,
'status': fields.PciDeviceStatus.ALLOCATED,
'dev_id': 'i',
'label': 'l',
'instance_uuid': fake_uuid,
'request_id': None,
'parent_addr': None,
'extra_info': '{}'},
{
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': None,
'id': 1,
'compute_node_id': 1,
'address': 'a',
'vendor_id': 'v',
'numa_node': 1,
'product_id': 'p',
'dev_type': fields.PciDeviceType.STANDARD,
'status': fields.PciDeviceStatus.ALLOCATED,
'dev_id': 'i',
'label': 'l',
'instance_uuid': fake_uuid,
'request_id': None,
'parent_addr': 'a1',
'extra_info': '{}'},
]
mock_get.return_value = fake_inst
inst = objects.Instance.get_by_uuid(self.context, fake_uuid,
['pci_devices'])
self.assertEqual(2, len(inst.pci_devices))
self.assertEqual(fake_uuid, inst.pci_devices[0].instance_uuid)
self.assertEqual(fake_uuid, inst.pci_devices[1].instance_uuid)
mock_get.assert_called_once_with(self.context, fake_uuid,
columns_to_join=['pci_devices'])
@mock.patch.object(db, 'instance_fault_get_by_instance_uuids')
@mock.patch.object(db, 'instance_get_by_uuid')
def test_with_fault(self, mock_get, mock_fault_get):
fake_inst = dict(self.fake_instance)
fake_uuid = fake_inst['uuid']
fake_faults = [dict(x, instance_uuid=fake_uuid)
for x in test_instance_fault.fake_faults['fake-uuid']]
mock_get.return_value = self.fake_instance
mock_fault_get.return_value = {fake_uuid: fake_faults}
inst = objects.Instance.get_by_uuid(self.context, fake_uuid,
expected_attrs=['fault'])
self.assertEqual(fake_faults[0], dict(inst.fault.items()))
mock_get.assert_called_once_with(self.context, fake_uuid,
columns_to_join=[])
mock_fault_get.assert_called_once_with(self.context, [fake_uuid])
@mock.patch('nova.objects.EC2Ids.get_by_instance')
@mock.patch('nova.db.instance_get_by_uuid')
def test_with_ec2_ids(self, mock_get, mock_ec2):
fake_inst = dict(self.fake_instance)
fake_uuid = fake_inst['uuid']
mock_get.return_value = fake_inst
fake_ec2_ids = objects.EC2Ids(instance_id='fake-inst',
ami_id='fake-ami')
mock_ec2.return_value = fake_ec2_ids
inst = objects.Instance.get_by_uuid(self.context, fake_uuid,
expected_attrs=['ec2_ids'])
mock_ec2.assert_called_once_with(self.context, mock.ANY)
self.assertEqual(fake_ec2_ids.instance_id, inst.ec2_ids.instance_id)
@mock.patch('nova.db.instance_get_by_uuid')
def test_with_image_meta(self, mock_get):
fake_inst = dict(self.fake_instance)
mock_get.return_value = fake_inst
inst = instance.Instance.get_by_uuid(self.context,
fake_inst['uuid'],
expected_attrs=['image_meta'])
image_meta = inst.image_meta
self.assertIsInstance(image_meta, objects.ImageMeta)
self.assertEqual(100, image_meta.min_ram)
self.assertEqual('ide', image_meta.properties.hw_disk_bus)
self.assertEqual('ne2k_pci', image_meta.properties.hw_vif_model)
def test_iteritems_with_extra_attrs(self):
self.stub_out('nova.objects.Instance.name', 'foo')
inst = objects.Instance(uuid=uuids.instance)
self.assertEqual(sorted({'uuid': uuids.instance,
'name': 'foo',
}.items()), sorted(inst.items()))
def _test_metadata_change_tracking(self, which):
inst = objects.Instance(uuid=uuids.instance)
setattr(inst, which, {})
inst.obj_reset_changes()
getattr(inst, which)['foo'] = 'bar'
self.assertEqual(set([which]), inst.obj_what_changed())
inst.obj_reset_changes()
self.assertEqual(set(), inst.obj_what_changed())
@mock.patch.object(db, 'instance_create')
def test_create_skip_scheduled_at(self, mock_create):
vals = {'host': 'foo-host',
'deleted': 0,
'memory_mb': 128,
'system_metadata': {'foo': 'bar'},
'extra': {
'vcpu_model': None,
'numa_topology': None,
'pci_requests': None,
'device_metadata': None,
}}
fake_inst = fake_instance.fake_db_instance(**vals)
mock_create.return_value = fake_inst
inst = objects.Instance(context=self.context,
host='foo-host', memory_mb=128,
scheduled_at=None,
system_metadata={'foo': 'bar'})
inst.create()
self.assertEqual('foo-host', inst.host)
mock_create.assert_called_once_with(self.context, vals)
def test_metadata_change_tracking(self):
self._test_metadata_change_tracking('metadata')
def test_system_metadata_change_tracking(self):
self._test_metadata_change_tracking('system_metadata')
@mock.patch.object(db, 'instance_create')
def test_create_stubbed(self, mock_create):
vals = {'host': 'foo-host',
'deleted': 0,
'memory_mb': 128,
'system_metadata': {'foo': 'bar'},
'extra': {
'vcpu_model': None,
'numa_topology': None,
'pci_requests': None,
'device_metadata': None,
}}
fake_inst = fake_instance.fake_db_instance(**vals)
mock_create.return_value = fake_inst
inst = objects.Instance(context=self.context,
host='foo-host', memory_mb=128,
system_metadata={'foo': 'bar'})
inst.create()
mock_create.assert_called_once_with(self.context, vals)
@mock.patch.object(db, 'instance_create')
def test_create(self, mock_create):
extras = {'vcpu_model': None,
'numa_topology': None,
'pci_requests': None,
'device_metadata': None}
mock_create.return_value = self.fake_instance
inst = objects.Instance(context=self.context)
inst.create()
self.assertEqual(self.fake_instance['id'], inst.id)
self.assertIsNotNone(inst.ec2_ids)
mock_create.assert_called_once_with(self.context, {'deleted': 0,
'extra': extras})
def test_create_with_values(self):
inst1 = objects.Instance(context=self.context,
user_id=self.context.user_id,
project_id=self.context.project_id,
host='foo-host')
inst1.create()
self.assertEqual('foo-host', inst1.host)
inst2 = objects.Instance.get_by_uuid(self.context, inst1.uuid)
self.assertEqual('foo-host', inst2.host)
def test_create_deleted(self):
inst1 = objects.Instance(context=self.context,
user_id=self.context.user_id,
project_id=self.context.project_id,
deleted=True)
self.assertRaises(exception.ObjectActionError, inst1.create)
def test_create_with_extras(self):
inst = objects.Instance(context=self.context,
uuid=self.fake_instance['uuid'],
numa_topology=test_instance_numa_topology.fake_obj_numa_topology,
pci_requests=objects.InstancePCIRequests(
requests=[
objects.InstancePCIRequest(count=123,
spec=[])]),
vcpu_model=test_vcpu_model.fake_vcpumodel,
)
inst.create()
self.assertIsNotNone(inst.numa_topology)
self.assertIsNotNone(inst.pci_requests)
self.assertEqual(1, len(inst.pci_requests.requests))
self.assertIsNotNone(inst.vcpu_model)
got_numa_topo = objects.InstanceNUMATopology.get_by_instance_uuid(
self.context, inst.uuid)
self.assertEqual(inst.numa_topology.instance_uuid,
got_numa_topo.instance_uuid)
got_pci_requests = objects.InstancePCIRequests.get_by_instance_uuid(
self.context, inst.uuid)
self.assertEqual(123, got_pci_requests.requests[0].count)
vcpu_model = objects.VirtCPUModel.get_by_instance_uuid(
self.context, inst.uuid)
self.assertEqual('fake-model', vcpu_model.model)
def test_recreate_fails(self):
inst = objects.Instance(context=self.context,
user_id=self.context.user_id,
project_id=self.context.project_id,
host='foo-host')
inst.create()
self.assertRaises(exception.ObjectActionError, inst.create)
@mock.patch.object(db, 'instance_create')
def test_create_with_special_things(self, mock_create):
fake_inst = fake_instance.fake_db_instance()
mock_create.return_value = fake_inst
secgroups = security_group.SecurityGroupList()
secgroups.objects = []
for name in ('foo', 'bar'):
secgroup = security_group.SecurityGroup()
secgroup.name = name
secgroups.objects.append(secgroup)
info_cache = instance_info_cache.InstanceInfoCache()
info_cache.network_info = network_model.NetworkInfo()
inst = objects.Instance(context=self.context,
host='foo-host', security_groups=secgroups,
info_cache=info_cache)
inst.create()
mock_create.assert_called_once_with(self.context,
{'host': 'foo-host',
'deleted': 0,
'security_groups': ['foo', 'bar'],
'info_cache': {'network_info': '[]'},
'extra': {
'vcpu_model': None,
'numa_topology': None,
'pci_requests': None,
'device_metadata': None,
},
})
@mock.patch.object(db, 'instance_destroy')
def test_destroy_stubbed(self, mock_destroy):
deleted_at = datetime.datetime(1955, 11, 6)
fake_inst = fake_instance.fake_db_instance(deleted_at=deleted_at,
deleted=True)
mock_destroy.return_value = fake_inst
inst = objects.Instance(context=self.context, id=1,
uuid=uuids.instance, host='foo')
inst.destroy()
self.assertEqual(timeutils.normalize_time(deleted_at),
timeutils.normalize_time(inst.deleted_at))
self.assertTrue(inst.deleted)
mock_destroy.assert_called_once_with(self.context, uuids.instance,
constraint=None)
def test_destroy(self):
values = {'user_id': self.context.user_id,
'project_id': self.context.project_id}
db_inst = db.instance_create(self.context, values)
inst = objects.Instance(context=self.context, id=db_inst['id'],
uuid=db_inst['uuid'])
inst.destroy()
self.assertRaises(exception.InstanceNotFound,
db.instance_get_by_uuid, self.context,
db_inst['uuid'])
def test_destroy_host_constraint(self):
values = {'user_id': self.context.user_id,
'project_id': self.context.project_id,
'host': 'foo'}
db_inst = db.instance_create(self.context, values)
inst = objects.Instance.get_by_uuid(self.context, db_inst['uuid'])
inst.host = None
self.assertRaises(exception.ObjectActionError,
inst.destroy)
@mock.patch.object(cells_rpcapi.CellsAPI, 'instance_destroy_at_top')
@mock.patch.object(db, 'instance_destroy')
def test_destroy_cell_sync_to_top(self, mock_destroy, mock_destroy_at_top):
self.flags(enable=True, cell_type='compute', group='cells')
fake_inst = fake_instance.fake_db_instance(deleted=True)
mock_destroy.return_value = fake_inst
inst = objects.Instance(context=self.context, id=1,
uuid=uuids.instance)
inst.destroy()
mock_destroy_at_top.assert_called_once_with(self.context, mock.ANY)
actual_inst = mock_destroy_at_top.call_args[0][1]
self.assertIsInstance(actual_inst, instance.Instance)
@mock.patch.object(cells_rpcapi.CellsAPI, 'instance_destroy_at_top')
@mock.patch.object(db, 'instance_destroy')
def test_destroy_no_cell_sync_to_top(self, mock_destroy,
mock_destroy_at_top):
fake_inst = fake_instance.fake_db_instance(deleted=True)
mock_destroy.return_value = fake_inst
inst = objects.Instance(context=self.context, id=1,
uuid=uuids.instance)
inst.destroy()
self.assertFalse(mock_destroy_at_top.called)
def test_name_does_not_trigger_lazy_loads(self):
values = {'user_id': self.context.user_id,
'project_id': self.context.project_id,
'host': 'foo'}
db_inst = db.instance_create(self.context, values)
inst = objects.Instance.get_by_uuid(self.context, db_inst['uuid'])
self.assertFalse(inst.obj_attr_is_set('fault'))
self.flags(instance_name_template='foo-%(uuid)s')
self.assertEqual('foo-%s' % db_inst['uuid'], inst.name)
self.assertFalse(inst.obj_attr_is_set('fault'))
def test_name_blank_if_no_id_pre_scheduling(self):
# inst.id is not set and can't be lazy loaded
inst = objects.Instance(context=self.context,
vm_state=vm_states.BUILDING,
task_state=task_states.SCHEDULING)
self.assertEqual('', inst.name)
def test_name_uuid_if_no_id_post_scheduling(self):
# inst.id is not set and can't be lazy loaded
inst = objects.Instance(context=self.context,
uuid=uuids.instance,
vm_state=vm_states.ACTIVE,
task_state=None)
self.assertEqual(uuids.instance, inst.name)
def test_from_db_object_not_overwrite_info_cache(self):
info_cache = instance_info_cache.InstanceInfoCache()
inst = objects.Instance(context=self.context,
info_cache=info_cache)
db_inst = fake_instance.fake_db_instance()
db_inst['info_cache'] = dict(
test_instance_info_cache.fake_info_cache)
inst._from_db_object(self.context, inst, db_inst,
expected_attrs=['info_cache'])
self.assertIs(info_cache, inst.info_cache)
def test_from_db_object_info_cache_not_set(self):
inst = instance.Instance(context=self.context,
info_cache=None)
db_inst = fake_instance.fake_db_instance()
db_inst.pop('info_cache')
inst._from_db_object(self.context, inst, db_inst,
expected_attrs=['info_cache'])
self.assertIsNone(inst.info_cache)
def test_from_db_object_security_groups_net_set(self):
inst = instance.Instance(context=self.context,
info_cache=None)
db_inst = fake_instance.fake_db_instance()
db_inst.pop('security_groups')
inst._from_db_object(self.context, inst, db_inst,
expected_attrs=['security_groups'])
self.assertEqual([], inst.security_groups.objects)
@mock.patch('nova.db.instance_extra_get_by_instance_uuid',
return_value=None)
def test_from_db_object_no_extra_db_calls(self, mock_get):
db_inst = fake_instance.fake_db_instance()
instance.Instance._from_db_object(
self.context, objects.Instance(), db_inst,
expected_attrs=instance._INSTANCE_EXTRA_FIELDS)
self.assertEqual(0, mock_get.call_count)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
def test_get_with_pci_requests(self, mock_get):
mock_get.return_value = objects.InstancePCIRequests()
db_instance = db.instance_create(self.context, {
'user_id': self.context.user_id,
'project_id': self.context.project_id})
instance = objects.Instance.get_by_uuid(
self.context, db_instance['uuid'],
expected_attrs=['pci_requests'])
self.assertTrue(instance.obj_attr_is_set('pci_requests'))
self.assertIsNotNone(instance.pci_requests)
def test_get_flavor(self):
db_flavor = flavors.get_default_flavor()
inst = objects.Instance(flavor=db_flavor)
self.assertEqual(db_flavor['flavorid'],
inst.get_flavor().flavorid)
def test_get_flavor_namespace(self):
db_flavor = flavors.get_default_flavor()
inst = objects.Instance(old_flavor=db_flavor)
self.assertEqual(db_flavor['flavorid'],
inst.get_flavor('old').flavorid)
@mock.patch.object(db, 'instance_metadata_delete')
def test_delete_metadata_key(self, db_delete):
inst = objects.Instance(context=self.context,
id=1, uuid=uuids.instance)
inst.metadata = {'foo': '1', 'bar': '2'}
inst.obj_reset_changes()
inst.delete_metadata_key('foo')
self.assertEqual({'bar': '2'}, inst.metadata)
self.assertEqual({}, inst.obj_get_changes())
db_delete.assert_called_once_with(self.context, inst.uuid, 'foo')
def test_reset_changes(self):
inst = objects.Instance()
inst.metadata = {'1985': 'present'}
inst.system_metadata = {'1955': 'past'}
self.assertEqual({}, inst._orig_metadata)
inst.obj_reset_changes(['metadata'])
self.assertEqual({'1985': 'present'}, inst._orig_metadata)
self.assertEqual({}, inst._orig_system_metadata)
def test_load_generic_calls_handler(self):
inst = objects.Instance(context=self.context, uuid=uuids.instance)
with mock.patch.object(inst, '_load_generic') as mock_load:
def fake_load(name):
inst.system_metadata = {}
mock_load.side_effect = fake_load
inst.system_metadata
mock_load.assert_called_once_with('system_metadata')
def test_load_fault_calls_handler(self):
inst = objects.Instance(context=self.context, uuid=uuids.instance)
with mock.patch.object(inst, '_load_fault') as mock_load:
def fake_load():
inst.fault = None
mock_load.side_effect = fake_load
inst.fault
mock_load.assert_called_once_with()
def test_load_ec2_ids_calls_handler(self):
inst = objects.Instance(context=self.context, uuid=uuids.instance)
with mock.patch.object(inst, '_load_ec2_ids') as mock_load:
def fake_load():
inst.ec2_ids = objects.EC2Ids(instance_id='fake-inst',
ami_id='fake-ami')
mock_load.side_effect = fake_load
inst.ec2_ids
mock_load.assert_called_once_with()
def test_load_migration_context(self):
inst = instance.Instance(context=self.context, uuid=uuids.instance)
with mock.patch.object(
objects.MigrationContext, 'get_by_instance_uuid',
return_value=test_mig_ctxt.fake_migration_context_obj
) as mock_get:
inst.migration_context
mock_get.assert_called_once_with(self.context, inst.uuid)
def test_load_migration_context_no_context(self):
inst = instance.Instance(context=self.context, uuid=uuids.instance)
with mock.patch.object(
objects.MigrationContext, 'get_by_instance_uuid',
side_effect=exception.MigrationContextNotFound(
instance_uuid=inst.uuid)
) as mock_get:
mig_ctxt = inst.migration_context
mock_get.assert_called_once_with(self.context, inst.uuid)
self.assertIsNone(mig_ctxt)
def test_load_migration_context_no_data(self):
inst = instance.Instance(context=self.context, uuid=uuids.instance)
with mock.patch.object(
objects.MigrationContext, 'get_by_instance_uuid') as mock_get:
loaded_ctxt = inst._load_migration_context(db_context=None)
self.assertFalse(mock_get.called)
self.assertIsNone(loaded_ctxt)
def test_apply_revert_migration_context(self):
inst = instance.Instance(context=self.context, uuid=uuids.instance,
numa_topology=None, pci_requests=None,
pci_devices=None)
inst.migration_context = test_mig_ctxt.get_fake_migration_context_obj(
self.context)
inst.apply_migration_context()
attrs_type = {'numa_topology': objects.InstanceNUMATopology,
'pci_requests': objects.InstancePCIRequests,
'pci_devices': objects.PciDeviceList}
for attr_name in instance._MIGRATION_CONTEXT_ATTRS:
value = getattr(inst, attr_name)
self.assertIsInstance(value, attrs_type[attr_name])
inst.revert_migration_context()
for attr_name in instance._MIGRATION_CONTEXT_ATTRS:
value = getattr(inst, attr_name)
self.assertIsNone(value)
def test_drop_migration_context(self):
inst = instance.Instance(context=self.context, uuid=uuids.instance)
inst.migration_context = test_mig_ctxt.get_fake_migration_context_obj(
self.context)
inst.migration_context.instance_uuid = inst.uuid
inst.migration_context.id = 7
with mock.patch(
'nova.db.instance_extra_update_by_uuid') as update_extra:
inst.drop_migration_context()
self.assertIsNone(inst.migration_context)
update_extra.assert_called_once_with(self.context, inst.uuid,
{"migration_context": None})
def test_mutated_migration_context(self):
numa_topology = (test_instance_numa_topology.
fake_obj_numa_topology.obj_clone())
numa_topology.cells[0].memory = 1024
numa_topology.cells[1].memory = 1024
pci_requests = objects.InstancePCIRequests(requests=[
objects.InstancePCIRequest(count=1, spec=[])])
pci_devices = pci_device.PciDeviceList()
inst = instance.Instance(context=self.context, uuid=uuids.instance,
numa_topology=numa_topology,
pci_requests=pci_requests,
pci_devices=pci_devices)
expected_objs = {'numa_topology': numa_topology,
'pci_requests': pci_requests,
'pci_devices': pci_devices}
inst.migration_context = test_mig_ctxt.get_fake_migration_context_obj(
self.context)
with inst.mutated_migration_context():
for attr_name in instance._MIGRATION_CONTEXT_ATTRS:
inst_value = getattr(inst, attr_name)
migration_context_value = (
getattr(inst.migration_context, 'new_' + attr_name))
self.assertIs(inst_value, migration_context_value)
for attr_name in instance._MIGRATION_CONTEXT_ATTRS:
inst_value = getattr(inst, attr_name)
self.assertIs(expected_objs[attr_name], inst_value)
def test_clear_numa_topology(self):
numa_topology = (test_instance_numa_topology.
fake_obj_numa_topology.obj_clone())
numa_topology.cells[0].id = 42
numa_topology.cells[1].id = 43
inst = instance.Instance(context=self.context, uuid=uuids.instance,
numa_topology=numa_topology)
inst.obj_reset_changes()
inst.clear_numa_topology()
self.assertIn('numa_topology', inst.obj_what_changed())
self.assertEqual(-1, numa_topology.cells[0].id)
self.assertEqual(-1, numa_topology.cells[1].id)
@mock.patch.object(objects.Instance, 'get_by_uuid')
def test_load_generic(self, mock_get):
inst2 = instance.Instance(metadata={'foo': 'bar'})
mock_get.return_value = inst2
inst = instance.Instance(context=self.context, uuid=uuids.instance)
inst.metadata
@mock.patch('nova.db.instance_fault_get_by_instance_uuids')
def test_load_fault(self, mock_get):
fake_fault = test_instance_fault.fake_faults['fake-uuid'][0]
mock_get.return_value = {uuids.load_fault_instance: [fake_fault]}
inst = objects.Instance(context=self.context,
uuid=uuids.load_fault_instance)
fault = inst.fault
mock_get.assert_called_once_with(self.context,
[uuids.load_fault_instance])
self.assertEqual(fake_fault['id'], fault.id)
self.assertNotIn('metadata', inst.obj_what_changed())
@mock.patch('nova.objects.EC2Ids.get_by_instance')
def test_load_ec2_ids(self, mock_get):
fake_ec2_ids = objects.EC2Ids(instance_id='fake-inst',
ami_id='fake-ami')
mock_get.return_value = fake_ec2_ids
inst = objects.Instance(context=self.context, uuid=uuids.instance)
ec2_ids = inst.ec2_ids
mock_get.assert_called_once_with(self.context, inst)
self.assertEqual(fake_ec2_ids, ec2_ids)
@mock.patch('nova.objects.SecurityGroupList.get_by_instance')
def test_load_security_groups(self, mock_get):
secgroups = []
for name in ('foo', 'bar'):
secgroup = security_group.SecurityGroup()
secgroup.name = name
secgroups.append(secgroup)
fake_secgroups = security_group.SecurityGroupList(objects=secgroups)
mock_get.return_value = fake_secgroups
inst = objects.Instance(context=self.context, uuid=uuids.instance)
secgroups = inst.security_groups
mock_get.assert_called_once_with(self.context, inst)
self.assertEqual(fake_secgroups, secgroups)
@mock.patch('nova.objects.PciDeviceList.get_by_instance_uuid')
def test_load_pci_devices(self, mock_get):
fake_pci_devices = pci_device.PciDeviceList()
mock_get.return_value = fake_pci_devices
inst = objects.Instance(context=self.context, uuid=uuids.pci_devices)
pci_devices = inst.pci_devices
mock_get.assert_called_once_with(self.context, uuids.pci_devices)
self.assertEqual(fake_pci_devices, pci_devices)
def test_get_with_extras(self):
pci_requests = objects.InstancePCIRequests(requests=[
objects.InstancePCIRequest(count=123, spec=[])])
inst = objects.Instance(context=self.context,
user_id=self.context.user_id,
project_id=self.context.project_id,
pci_requests=pci_requests)
inst.create()
uuid = inst.uuid
inst = objects.Instance.get_by_uuid(self.context, uuid)
self.assertFalse(inst.obj_attr_is_set('pci_requests'))
inst = objects.Instance.get_by_uuid(
self.context, uuid, expected_attrs=['pci_requests'])
self.assertTrue(inst.obj_attr_is_set('pci_requests'))
def test_obj_clone(self):
# Make sure clone shows no changes when no metadata is set
inst1 = objects.Instance(uuid=uuids.instance)
inst1.obj_reset_changes()
inst1 = inst1.obj_clone()
self.assertEqual(len(inst1.obj_what_changed()), 0)
# Make sure clone shows no changes when metadata is set
inst1 = objects.Instance(uuid=uuids.instance)
inst1.metadata = dict(key1='val1')
inst1.system_metadata = dict(key1='val1')
inst1.obj_reset_changes()
inst1 = inst1.obj_clone()
self.assertEqual(len(inst1.obj_what_changed()), 0)
class TestInstanceObject(test_objects._LocalTest,
_TestInstanceObject):
def _test_save_objectfield_fk_constraint_fails(self, foreign_key,
expected_exception):
# NOTE(danms): Do this here and not in the remote test because
# we're mocking out obj_attr_is_set() without the thing actually
# being set, which confuses the heck out of the serialization
# stuff.
error = db_exc.DBReferenceError('table', 'constraint', foreign_key,
'key_table')
# Prevent lazy-loading any fields, results in InstanceNotFound
attrs = objects.instance.INSTANCE_OPTIONAL_ATTRS
instance = fake_instance.fake_instance_obj(self.context,
expected_attrs=attrs)
fields_with_save_methods = [field for field in instance.fields
if hasattr(instance, '_save_%s' % field)]
for field in fields_with_save_methods:
@mock.patch.object(instance, '_save_%s' % field)
@mock.patch.object(instance, 'obj_attr_is_set')
def _test(mock_is_set, mock_save_field):
mock_is_set.return_value = True
mock_save_field.side_effect = error
instance.obj_reset_changes(fields=[field])
instance._changed_fields.add(field)
self.assertRaises(expected_exception, instance.save)
instance.obj_reset_changes(fields=[field])
_test()
def test_save_objectfield_missing_instance_row(self):
self._test_save_objectfield_fk_constraint_fails(
'instance_uuid', exception.InstanceNotFound)
def test_save_objectfield_reraises_if_not_instance_related(self):
self._test_save_objectfield_fk_constraint_fails(
'other_foreign_key', db_exc.DBReferenceError)
class TestRemoteInstanceObject(test_objects._RemoteTest,
_TestInstanceObject):
pass
class _TestInstanceListObject(object):
def fake_instance(self, id, updates=None):
db_inst = fake_instance.fake_db_instance(id=2,
access_ip_v4='1.2.3.4',
access_ip_v6='::1')
db_inst['terminated_at'] = None
db_inst['deleted_at'] = None
db_inst['created_at'] = None
db_inst['updated_at'] = None
db_inst['launched_at'] = datetime.datetime(1955, 11, 12,
22, 4, 0)
db_inst['security_groups'] = []
db_inst['deleted'] = 0
db_inst['info_cache'] = dict(test_instance_info_cache.fake_info_cache,
instance_uuid=db_inst['uuid'])
if updates:
db_inst.update(updates)
return db_inst
@mock.patch.object(db, 'instance_get_all_by_filters')
def test_get_all_by_filters(self, mock_get_all):
fakes = [self.fake_instance(1), self.fake_instance(2)]
mock_get_all.return_value = fakes
inst_list = objects.InstanceList.get_by_filters(
self.context, {'foo': 'bar'}, 'uuid', 'asc',
expected_attrs=['metadata'], use_slave=False)
for i in range(0, len(fakes)):
self.assertIsInstance(inst_list.objects[i], instance.Instance)
self.assertEqual(fakes[i]['uuid'], inst_list.objects[i].uuid)
mock_get_all.assert_called_once_with(self.context, {'foo': 'bar'},
'uuid', 'asc', limit=None, marker=None,
columns_to_join=['metadata'])
@mock.patch.object(db, 'instance_get_all_by_filters_sort')
def test_get_all_by_filters_sorted(self, mock_get_all):
fakes = [self.fake_instance(1), self.fake_instance(2)]
mock_get_all.return_value = fakes
inst_list = objects.InstanceList.get_by_filters(
self.context, {'foo': 'bar'}, expected_attrs=['metadata'],
use_slave=False, sort_keys=['uuid'], sort_dirs=['asc'])
for i in range(0, len(fakes)):
self.assertIsInstance(inst_list.objects[i], instance.Instance)
self.assertEqual(fakes[i]['uuid'], inst_list.objects[i].uuid)
mock_get_all.assert_called_once_with(self.context, {'foo': 'bar'},
limit=None, marker=None,
columns_to_join=['metadata'],
sort_keys=['uuid'],
sort_dirs=['asc'])
@mock.patch.object(db, 'instance_get_all_by_filters_sort')
@mock.patch.object(db, 'instance_get_all_by_filters')
def test_get_all_by_filters_calls_non_sort(self,
mock_get_by_filters,
mock_get_by_filters_sort):
'''Verifies InstanceList.get_by_filters calls correct DB function.'''
# Single sort key/direction is set, call non-sorted DB function
objects.InstanceList.get_by_filters(
self.context, {'foo': 'bar'}, sort_key='key', sort_dir='dir',
limit=100, marker='uuid', use_slave=True)
mock_get_by_filters.assert_called_once_with(
self.context, {'foo': 'bar'}, 'key', 'dir', limit=100,
marker='uuid', columns_to_join=None)
self.assertEqual(0, mock_get_by_filters_sort.call_count)
@mock.patch.object(db, 'instance_get_all_by_filters_sort')
@mock.patch.object(db, 'instance_get_all_by_filters')
def test_get_all_by_filters_calls_sort(self,
mock_get_by_filters,
mock_get_by_filters_sort):
'''Verifies InstanceList.get_by_filters calls correct DB function.'''
# Multiple sort keys/directions are set, call sorted DB function
objects.InstanceList.get_by_filters(
self.context, {'foo': 'bar'}, limit=100, marker='uuid',
use_slave=True, sort_keys=['key1', 'key2'],
sort_dirs=['dir1', 'dir2'])
mock_get_by_filters_sort.assert_called_once_with(
self.context, {'foo': 'bar'}, limit=100,
marker='uuid', columns_to_join=None,
sort_keys=['key1', 'key2'], sort_dirs=['dir1', 'dir2'])
self.assertEqual(0, mock_get_by_filters.call_count)
@mock.patch.object(db, 'instance_get_all_by_filters')
def test_get_all_by_filters_works_for_cleaned(self, mock_get_all):
fakes = [self.fake_instance(1),
self.fake_instance(2, updates={'deleted': 2,
'cleaned': None})]
self.context.read_deleted = 'yes'
mock_get_all.return_value = [fakes[1]]
inst_list = objects.InstanceList.get_by_filters(
self.context, {'deleted': True, 'cleaned': False}, 'uuid', 'asc',
expected_attrs=['metadata'], use_slave=False)
self.assertEqual(1, len(inst_list))
self.assertIsInstance(inst_list.objects[0], instance.Instance)
self.assertEqual(fakes[1]['uuid'], inst_list.objects[0].uuid)
mock_get_all.assert_called_once_with(
self.context,
{'deleted': True, 'cleaned': False},
'uuid', 'asc',
limit=None, marker=None,
columns_to_join=['metadata'])
@mock.patch.object(db, 'instance_get_all_by_host')
def test_get_by_host(self, mock_get_all):
fakes = [self.fake_instance(1),
self.fake_instance(2)]
mock_get_all.return_value = fakes
inst_list = objects.InstanceList.get_by_host(self.context, 'foo')
for i in range(0, len(fakes)):
self.assertIsInstance(inst_list.objects[i], instance.Instance)
self.assertEqual(fakes[i]['uuid'], inst_list.objects[i].uuid)
self.assertEqual(self.context, inst_list.objects[i]._context)
self.assertEqual(set(), inst_list.obj_what_changed())
mock_get_all.assert_called_once_with(self.context, 'foo',
columns_to_join=None)
@mock.patch.object(db, 'instance_get_all_by_host_and_node')
def test_get_by_host_and_node(self, mock_get_all):
fakes = [self.fake_instance(1),
self.fake_instance(2)]
mock_get_all.return_value = fakes
inst_list = objects.InstanceList.get_by_host_and_node(self.context,
'foo', 'bar')
for i in range(0, len(fakes)):
self.assertIsInstance(inst_list.objects[i], instance.Instance)
self.assertEqual(fakes[i]['uuid'], inst_list.objects[i].uuid)
mock_get_all.assert_called_once_with(self.context, 'foo', 'bar',
columns_to_join=None)
@mock.patch.object(db, 'instance_get_all_by_host_and_not_type')
def test_get_by_host_and_not_type(self, mock_get_all):
fakes = [self.fake_instance(1),
self.fake_instance(2)]
mock_get_all.return_value = fakes
inst_list = objects.InstanceList.get_by_host_and_not_type(
self.context, 'foo', 'bar')
for i in range(0, len(fakes)):
self.assertIsInstance(inst_list.objects[i], instance.Instance)
self.assertEqual(fakes[i]['uuid'], inst_list.objects[i].uuid)
mock_get_all.assert_called_once_with(self.context, 'foo',
type_id='bar')
@mock.patch('nova.objects.instance._expected_cols')
@mock.patch('nova.db.instance_get_all')
def test_get_all(self, mock_get_all, mock_exp):
fakes = [self.fake_instance(1), self.fake_instance(2)]
mock_get_all.return_value = fakes
mock_exp.return_value = mock.sentinel.exp_att
inst_list = objects.InstanceList.get_all(
self.context, expected_attrs='fake')
mock_get_all.assert_called_once_with(
self.context, columns_to_join=mock.sentinel.exp_att)
for i in range(0, len(fakes)):
self.assertIsInstance(inst_list.objects[i], instance.Instance)
self.assertEqual(fakes[i]['uuid'], inst_list.objects[i].uuid)
@mock.patch.object(db, 'instance_get_all_hung_in_rebooting')
def test_get_hung_in_rebooting(self, mock_get_all):
fakes = [self.fake_instance(1),
self.fake_instance(2)]
dt = utils.isotime()
mock_get_all.return_value = fakes
inst_list = objects.InstanceList.get_hung_in_rebooting(self.context,
dt)
for i in range(0, len(fakes)):
self.assertIsInstance(inst_list.objects[i], instance.Instance)
self.assertEqual(fakes[i]['uuid'], inst_list.objects[i].uuid)
mock_get_all.assert_called_once_with(self.context, dt)
def test_get_active_by_window_joined(self):
fakes = [self.fake_instance(1), self.fake_instance(2)]
# NOTE(mriedem): Send in a timezone-naive datetime since the
# InstanceList.get_active_by_window_joined method should convert it
# to tz-aware for the DB API call, which we'll assert with our stub.
dt = timeutils.utcnow()
def fake_instance_get_active_by_window_joined(context, begin, end,
project_id, host,
columns_to_join,
limit=None, marker=None):
# make sure begin is tz-aware
self.assertIsNotNone(begin.utcoffset())
self.assertIsNone(end)
self.assertEqual(['metadata'], columns_to_join)
return fakes
with mock.patch.object(db, 'instance_get_active_by_window_joined',
fake_instance_get_active_by_window_joined):
inst_list = objects.InstanceList.get_active_by_window_joined(
self.context, dt, expected_attrs=['metadata'])
for fake, obj in zip(fakes, inst_list.objects):
self.assertIsInstance(obj, instance.Instance)
self.assertEqual(fake['uuid'], obj.uuid)
@mock.patch.object(db, 'instance_fault_get_by_instance_uuids')
@mock.patch.object(db, 'instance_get_all_by_host')
def test_with_fault(self, mock_get_all, mock_fault_get):
fake_insts = [
fake_instance.fake_db_instance(uuid=uuids.faults_instance,
host='host'),
fake_instance.fake_db_instance(uuid=uuids.faults_instance_nonexist,
host='host'),
]
fake_faults = test_instance_fault.fake_faults
mock_get_all.return_value = fake_insts
mock_fault_get.return_value = fake_faults
instances = objects.InstanceList.get_by_host(self.context, 'host',
expected_attrs=['fault'],
use_slave=False)
self.assertEqual(2, len(instances))
self.assertEqual(fake_faults['fake-uuid'][0],
dict(instances[0].fault))
self.assertIsNone(instances[1].fault)
mock_get_all.assert_called_once_with(self.context, 'host',
columns_to_join=[])
mock_fault_get.assert_called_once_with(self.context,
[x['uuid'] for x in fake_insts])
@mock.patch.object(db, 'instance_fault_get_by_instance_uuids')
def test_fill_faults(self, mock_fault_get):
inst1 = objects.Instance(uuid=uuids.db_fault_1)
inst2 = objects.Instance(uuid=uuids.db_fault_2)
insts = [inst1, inst2]
for inst in insts:
inst.obj_reset_changes()
db_faults = {
'uuid1': [{'id': 123,
'instance_uuid': uuids.db_fault_1,
'code': 456,
'message': 'Fake message',
'details': 'No details',
'host': 'foo',
'deleted': False,
'deleted_at': None,
'updated_at': None,
'created_at': None,
}
]}
mock_fault_get.return_value = db_faults
inst_list = objects.InstanceList()
inst_list._context = self.context
inst_list.objects = insts
faulty = inst_list.fill_faults()
self.assertEqual([uuids.db_fault_1], list(faulty))
self.assertEqual(db_faults['uuid1'][0]['message'],
inst_list[0].fault.message)
self.assertIsNone(inst_list[1].fault)
for inst in inst_list:
self.assertEqual(set(), inst.obj_what_changed())
mock_fault_get.assert_called_once_with(self.context,
[x.uuid for x in insts],
latest=True)
@mock.patch('nova.objects.instance.Instance.obj_make_compatible')
def test_get_by_security_group(self, mock_compat):
fake_secgroup = dict(test_security_group.fake_secgroup)
fake_secgroup['instances'] = [
fake_instance.fake_db_instance(id=1,
system_metadata={'foo': 'bar'}),
fake_instance.fake_db_instance(id=2),
]
with mock.patch.object(db, 'security_group_get') as sgg:
sgg.return_value = fake_secgroup
secgroup = security_group.SecurityGroup()
secgroup.id = fake_secgroup['id']
instances = instance.InstanceList.get_by_security_group(
self.context, secgroup)
self.assertEqual(2, len(instances))
self.assertEqual([1, 2], [x.id for x in instances])
self.assertTrue(instances[0].obj_attr_is_set('system_metadata'))
self.assertEqual({'foo': 'bar'}, instances[0].system_metadata)
def test_get_by_security_group_after_destroy(self):
db_sg = db.security_group_create(
self.context,
{'name': 'foo',
'description': 'test group',
'user_id': self.context.user_id,
'project_id': self.context.project_id})
self.assertFalse(db.security_group_in_use(self.context, db_sg.id))
inst = objects.Instance(
context=self.context,
user_id=self.context.user_id,
project_id=self.context.project_id)
inst.create()
db.instance_add_security_group(self.context,
inst.uuid,
db_sg.id)
self.assertTrue(db.security_group_in_use(self.context, db_sg.id))
inst.destroy()
self.assertFalse(db.security_group_in_use(self.context, db_sg.id))
def test_get_by_grantee_security_group_ids(self):
fake_instances = [
fake_instance.fake_db_instance(id=1),
fake_instance.fake_db_instance(id=2)
]
with mock.patch.object(
db, 'instance_get_all_by_grantee_security_groups') as igabgsg:
igabgsg.return_value = fake_instances
secgroup_ids = [1]
instances = objects.InstanceList.get_by_grantee_security_group_ids(
self.context, secgroup_ids)
igabgsg.assert_called_once_with(self.context, secgroup_ids)
self.assertEqual(2, len(instances))
self.assertEqual([1, 2], [x.id for x in instances])
@mock.patch('nova.db.instance_get_all_by_host')
def test_get_uuids_by_host(self, mock_get_all):
fake_instances = [
fake_instance.fake_db_instance(id=1),
fake_instance.fake_db_instance(id=2),
]
mock_get_all.return_value = fake_instances
expected_uuids = [inst['uuid'] for inst in fake_instances]
actual_uuids = objects.InstanceList.get_uuids_by_host(
self.context, 'b')
self.assertEqual(expected_uuids, actual_uuids)
mock_get_all.assert_called_once_with(self.context, 'b',
columns_to_join=[])
class TestInstanceListObject(test_objects._LocalTest,
_TestInstanceListObject):
pass
class TestRemoteInstanceListObject(test_objects._RemoteTest,
_TestInstanceListObject):
pass
class TestInstanceObjectMisc(test.TestCase):
def test_expected_cols(self):
self.stub_out('nova.objects.instance._INSTANCE_OPTIONAL_JOINED_FIELDS',
['bar'])
self.assertEqual(['bar'], instance._expected_cols(['foo', 'bar']))
self.assertIsNone(instance._expected_cols(None))
def test_expected_cols_extra(self):
self.assertEqual(['metadata', 'extra', 'extra.numa_topology'],
instance._expected_cols(['metadata',
'numa_topology']))
def test_expected_cols_no_duplicates(self):
expected_attr = ['metadata', 'system_metadata', 'info_cache',
'security_groups', 'info_cache', 'metadata',
'pci_devices', 'tags', 'extra', 'flavor']
result_list = instance._expected_cols(expected_attr)
self.assertEqual(len(result_list), len(set(expected_attr)))
self.assertEqual(['metadata', 'system_metadata', 'info_cache',
'security_groups', 'pci_devices', 'tags', 'extra',
'extra.flavor'], result_list)
def test_migrate_instance_keypairs(self):
ctxt = context.RequestContext('foo', 'bar')
key = objects.KeyPair(context=ctxt,
user_id=ctxt.user_id,
name='testkey',
public_key='keydata',
type='ssh')
key.create()
inst1 = objects.Instance(context=ctxt,
user_id=ctxt.user_id,
project_id=ctxt.project_id,
key_name='testkey')
inst1.create()
inst2 = objects.Instance(context=ctxt,
user_id=ctxt.user_id,
project_id=ctxt.project_id,
key_name='testkey',
keypairs=objects.KeyPairList(
objects=[key]))
inst2.create()
inst3 = objects.Instance(context=ctxt,
user_id=ctxt.user_id,
project_id=ctxt.project_id,
key_name='missingkey')
inst3.create()
inst4 = objects.Instance(context=ctxt,
user_id=ctxt.user_id,
project_id=ctxt.project_id,
key_name='missingkey')
inst4.create()
inst4.destroy()
# NOTE(danms): Add an orphaned instance_extra record for
# a totally invalid instance to make sure we don't explode.
# See bug 1684861 for more information.
db.instance_extra_update_by_uuid(ctxt, 'foo', {})
hit, done = instance.migrate_instance_keypairs(ctxt, 10)
self.assertEqual(3, hit)
self.assertEqual(2, done)
db_extra = db.instance_extra_get_by_instance_uuid(
ctxt, inst1.uuid, ['keypairs'])
self.assertIsNotNone(db_extra.keypairs)
db_extra = db.instance_extra_get_by_instance_uuid(
ctxt, inst3.uuid, ['keypairs'])
obj = base.NovaObject.obj_from_primitive(
jsonutils.loads(db_extra['keypairs']))
self.assertEqual([], obj.objects)
| |
import re
import os
import six
class Compiler(object):
RE_INTERPOLATE = re.compile(r'(\\)?([#!]){(.*?)}')
doctypes = {
'5': '<!DOCTYPE html>'
, 'xml': '<?xml version="1.0" encoding="utf-8" ?>'
, 'default': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
, 'transitional': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
, 'strict': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">'
, 'frameset': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Frameset//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd">'
, '1.1': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">'
, 'basic': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML Basic 1.1//EN" "http://www.w3.org/TR/xhtml-basic/xhtml-basic11.dtd">'
, 'mobile': '<!DOCTYPE html PUBLIC "-//WAPFORUM//DTD XHTML Mobile 1.2//EN" "http://www.openmobilealliance.org/tech/DTD/xhtml-mobile12.dtd">'
}
inlineTags = [
'a'
, 'abbr'
, 'acronym'
, 'b'
, 'br'
, 'code'
, 'em'
, 'font'
, 'i'
, 'img'
, 'ins'
, 'kbd'
, 'map'
, 'samp'
, 'small'
, 'span'
, 'strong'
, 'sub'
, 'sup'
, 'textarea'
]
selfClosing = [
'meta'
, 'img'
, 'link'
, 'input'
, 'area'
, 'base'
, 'col'
, 'br'
, 'hr'
]
autocloseCode = 'if,for,block,filter,autoescape,with,trans,spaceless,comment,cache,macro,localize,compress,raw'.split(',')
filters = {}
def __init__(self, node, **options):
self.options = options
self.node = node
self.hasCompiledDoctype = False
self.hasCompiledTag = False
self.pp = options.get('pretty', True)
self.debug = options.get('compileDebug', False) is not False
self.filters.update(options.get('filters', {}))
self.doctypes.update(options.get('doctypes', {}))
# self.var_processor = options.get('var_processor', lambda x: x)
self.selfClosing.extend(options.get('selfClosing', []))
self.autocloseCode.extend(options.get('autocloseCode', []))
self.inlineTags.extend(options.get('inlineTags', []))
self.useRuntime = options.get('useRuntime', True)
self.extension = options.get('extension', None) or '.jade'
self.indents = 0
self.doctype = None
self.terse = False
self.xml = False
self.mixing = 0
self.block_start_string = options.get("block_start_string", "{%")
self.block_end_string = options.get("block_end_string", "%}")
self.variable_start_string = options.get("variable_start_string", "{{")
self.variable_end_string = options.get("variable_end_string", "}}")
if 'doctype' in self.options: self.setDoctype(options['doctype'])
self.instring = False
def var_processor(self, var):
if isinstance(var,six.string_types) and var.startswith('_ '):
var = '_("%s")'%var[2:]
return var
def compile_top(self):
return ''
def compile(self):
self.buf = [self.compile_top()]
self.lastBufferedIdx = -1
self.visit(self.node)
compiled = u''.join(self.buf)
if isinstance(compiled, six.binary_type):
compiled = six.text_type(compiled, 'utf8')
return compiled
def setDoctype(self, name):
self.doctype = self.doctypes.get(name or 'default',
'<!DOCTYPE %s>' % name)
self.terse = name in ['5','html']
self.xml = self.doctype.startswith('<?xml')
def buffer(self, str):
if self.lastBufferedIdx == len(self.buf):
self.lastBuffered += str
self.buf[self.lastBufferedIdx - 1] = self.lastBuffered
else:
self.buf.append(str)
self.lastBuffered = str;
self.lastBufferedIdx = len(self.buf)
def variable(self, str):
return "%s%s%s" % (self.variable_start_string, str, self.variable_end_string)
def tag(self, str):
return"%s %s %s" % (self.block_start_string, str, self.block_end_string)
def visit(self, node, *args, **kwargs):
# debug = self.debug
# if debug:
# self.buf.append('__jade.unshift({ lineno: %d, filename: %s });' % (node.line,('"%s"'%node.filename) if node.filename else '__jade[0].filename'));
# if node.debug==False and self.debug:
# self.buf.pop()
# self.buf.pop()
self.visitNode(node, *args, **kwargs)
# if debug: self.buf.append('__jade.shift();')
def visitNode (self, node, *args, **kwargs):
name = node.__class__.__name__
if self.instring and name != 'Tag':
self.buffer('\n')
self.instring = False
return getattr(self, 'visit%s' % name)(node, *args, **kwargs)
def visitLiteral(self, node):
self.buffer(node.str)
def visitBlock(self, block):
for node in block.nodes:
self.visit(node)
def visitCodeBlock(self, block):
self.buffer(self.tag('block ' + block.name))
if block.mode=='prepend':
self.buffer(self.variable('super()'))
self.visitBlock(block)
if block.mode == 'append':
self.buffer(self.variable('super()'))
self.buffer(self.tag('endblock'))
def visitDoctype(self,doctype=None):
if doctype and (doctype.val or not self.doctype):
self.setDoctype(doctype.val or 'default')
if self.doctype:
self.buffer(self.doctype)
self.hasCompiledDoctype = True
def visitMixin(self,mixin):
if mixin.block:
self.buffer(self.tag('macro %s(%s)' % (mixin.name, mixin.args)))
self.visitBlock(mixin.block)
self.buffer(self.tag('endmacro'))
else:
self.buffer(self.variable('%s(%s)' % (mixin.name, mixin.args)))
def visitTag(self,tag):
self.indents += 1
name = tag.name
if not self.hasCompiledTag:
if not self.hasCompiledDoctype and 'html' == name:
self.visitDoctype()
self.hasCompiledTag = True
if self.pp and name not in self.inlineTags and not tag.inline:
self.buffer('\n' + ' ' * (self.indents - 1))
if name in self.inlineTags or tag.inline:
self.instring = False
closed = name in self.selfClosing and not self.xml
if tag.text:
t = tag.text.nodes[0]
if t.startswith(u'/'):
if len(t) > 1:
raise Exception('%s is self closing and should not have content.' % name)
closed = True
self.buffer('<%s' % name)
self.visitAttributes(tag.attrs)
self.buffer('/>' if not self.terse and closed else '>')
if not closed:
if tag.code: self.visitCode(tag.code)
if tag.text: self.buffer(self.interpolate(tag.text.nodes[0].lstrip()))
self.escape = 'pre' == tag.name
# empirically check if we only contain text
textOnly = tag.textOnly or not bool(len(tag.block.nodes))
self.instring = False
self.visit(tag.block)
if self.pp and not name in self.inlineTags and not textOnly:
self.buffer('\n' + ' ' * (self.indents-1))
self.buffer('</%s>' % name)
self.indents -= 1
def visitFilter(self,filter):
if filter.name not in self.filters:
if filter.isASTFilter:
raise Exception('unknown ast filter "%s"' % filter.name)
else:
raise Exception('unknown filter "%s"' % filter.name)
fn = self.filters.get(filter.name)
if filter.isASTFilter:
self.buf.append(fn(filter.block, self, filter.attrs))
else:
text = ''.join(filter.block.nodes)
text = self.interpolate(text)
filter.attrs = filter.attrs or {}
filter.attrs['filename'] = self.options.get('filename', None)
self.buffer(fn(text, filter.attrs))
def _interpolate(self, attr, repl):
return self.RE_INTERPOLATE.sub(lambda matchobj:repl(matchobj.group(3)),
attr)
def interpolate(self, text, escape=None):
def repl(matchobj):
if escape is None:
if matchobj.group(2) == '!':
filter_string = ''
else:
filter_string = '|escape'
elif escape is True:
filter_string = '|escape'
elif escape is False:
filter_string = ''
return self.variable(matchobj.group(3) + filter_string)
return self.RE_INTERPOLATE.sub(repl, text)
def visitText(self,text):
text = ''.join(text.nodes)
text = self.interpolate(text)
self.buffer(text)
if self.pp:
self.buffer('\n')
def visitString(self,text):
instring = not text.inline
text = ''.join(text.nodes)
text = self.interpolate(text)
self.buffer(text)
self.instring = instring
def visitComment(self,comment):
if not comment.buffer: return
if self.pp:
self.buffer('\n' + ' ' * (self.indents))
self.buffer('<!--%s-->' % comment.val)
def visitAssignment(self,assignment):
self.buffer(self.tag('set %s = %s' % (assignment.name, assignment.val)))
def format_path(self,path):
has_extension = '.' in os.path.basename(path)
if not has_extension:
path += self.extension
return path
def visitExtends(self,node):
path = self.format_path(node.path)
self.buffer(self.tag('extends "%s"' % path))
def visitInclude(self,node):
path = self.format_path(node.path)
self.buffer(self.tag('include "%s"' % path))
def visitBlockComment(self, comment):
if not comment.buffer:
return
isConditional = comment.val.strip().startswith('if')
self.buffer('<!--[%s]>' % comment.val.strip() if isConditional else '<!--%s' % comment.val)
self.visit(comment.block)
self.buffer('<![endif]-->' if isConditional else '-->')
def visitConditional(self, conditional):
TYPE_CODE = {
'if': lambda x: 'if %s'%x,
'unless': lambda x: 'if not %s'%x,
'elif': lambda x: 'elif %s'%x,
'else': lambda x: 'else'
}
self.buf.append(self.tag(TYPE_CODE[conditional.type](conditional.sentence)))
if conditional.block:
self.visit(conditional.block)
for next in conditional.next:
self.visitConditional(next)
if conditional.type in ['if','unless']:
self.buf.append(self.tag('endif'))
def visitVar(self, var, escape=False):
var = self.var_processor(var)
return self.variable(var + ('|escape' if escape else ''))
def visitCode(self,code):
if code.buffer:
val = code.val.lstrip()
self.buf.append(self.visitVar(val, code.escape))
else:
self.buf.append(self.tag(code.val))
if code.block:
# if not code.buffer: self.buf.append('{')
self.visit(code.block)
# if not code.buffer: self.buf.append('}')
if not code.buffer:
codeTag = code.val.strip().split(' ', 1)[0]
if codeTag in self.autocloseCode:
self.buf.append(self.tag('end' + codeTag))
def visitEach(self,each):
self.buf.append(self.tag('for %s in %s|__pyjade_iter:%d' % (','.join(each.keys), each.obj, len(each.keys))))
self.visit(each.block)
self.buf.append(self.tag('endfor'))
def attributes(self,attrs):
return self.variable("__pyjade_attrs(%s)" % attrs)
def visitDynamicAttributes(self, attrs):
buf, classes, params = [], [], {}
terse='terse=True' if self.terse else ''
for attr in attrs:
if attr['name'] == 'class':
classes.append('(%s)' % attr['val'])
else:
pair = "('%s',(%s))" % (attr['name'], attr['val'])
buf.append(pair)
if classes:
classes = " , ".join(classes)
buf.append("('class', (%s))" % classes)
buf = ', '.join(buf)
if self.terse: params['terse'] = 'True'
if buf: params['attrs'] = '[%s]' % buf
param_string = ', '.join(['%s=%s' % (n, v) for n, v in six.iteritems(params)])
if buf or terse:
self.buf.append(self.attributes(param_string))
def visitAttributes(self, attrs):
temp_attrs = []
for attr in attrs:
if (not self.useRuntime and not attr['name']=='class') or attr['static']: #
if temp_attrs:
self.visitDynamicAttributes(temp_attrs)
temp_attrs = []
n, v = attr['name'], attr['val']
if isinstance(v, six.string_types):
if self.useRuntime or attr['static']:
self.buf.append(' %s=%s' % (n, v))
else:
self.buf.append(' %s="%s"' % (n, self.visitVar(v)))
elif v is True:
if self.terse:
self.buf.append(' %s' % (n,))
else:
self.buf.append(' %s="%s"' % (n, n))
else:
temp_attrs.append(attr)
if temp_attrs: self.visitDynamicAttributes(temp_attrs)
@classmethod
def register_filter(cls, name, f):
cls.filters[name] = f
@classmethod
def register_autoclosecode(cls, name):
cls.autocloseCode.append(name)
#1-
| |
# Copyright (C) 2009 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import common
class EdifyGenerator(object):
"""Class to generate scripts in the 'edify' recovery script language
used from donut onwards."""
def __init__(self, version, info, fstab=None):
self.script = []
self.mounts = set()
self.version = version
self.info = info
if fstab is None:
self.fstab = self.info.get("fstab", None)
else:
self.fstab = fstab
def MakeTemporary(self):
"""Make a temporary script object whose commands can latter be
appended to the parent script with AppendScript(). Used when the
caller wants to generate script commands out-of-order."""
x = EdifyGenerator(self.version, self.info)
x.mounts = self.mounts
return x
@staticmethod
def WordWrap(cmd, linelen=80):
"""'cmd' should be a function call with null characters after each
parameter (eg, "somefun(foo,\0bar,\0baz)"). This function wraps cmd
to a given line length, replacing nulls with spaces and/or newlines
to format it nicely."""
indent = cmd.index("(")+1
out = []
first = True
x = re.compile("^(.{,%d})\0" % (linelen-indent,))
while True:
if not first:
out.append(" " * indent)
first = False
m = x.search(cmd)
if not m:
parts = cmd.split("\0", 1)
out.append(parts[0]+"\n")
if len(parts) == 1:
break
else:
cmd = parts[1]
continue
out.append(m.group(1)+"\n")
cmd = cmd[m.end():]
return "".join(out).replace("\0", " ").rstrip("\n")
def AppendScript(self, other):
"""Append the contents of another script (which should be created
with temporary=True) to this one."""
self.script.extend(other.script)
def AssertOemProperty(self, name, value):
"""Assert that a property on the OEM paritition matches a value."""
if not name:
raise ValueError("must specify an OEM property")
if not value:
raise ValueError("must specify the OEM value")
cmd = ('file_getprop("/oem/oem.prop", "{name}") == "{value}" || '
'abort("This package expects the value \\"{value}\\" for '
'\\"{name}\\" on the OEM partition; this has value \\"" + '
'file_getprop("/oem/oem.prop", "{name}") + "\\".");').format(
name=name, value=value)
self.script.append(cmd)
def AssertSomeFingerprint(self, *fp):
"""Assert that the current recovery build fingerprint is one of *fp."""
if not fp:
raise ValueError("must specify some fingerprints")
cmd = (' ||\n '.join([('getprop("ro.build.fingerprint") == "%s"') % i
for i in fp]) +
' ||\n abort("Package expects build fingerprint of %s; this '
'device has " + getprop("ro.build.fingerprint") + ".");') % (
" or ".join(fp))
self.script.append(cmd)
def AssertSomeThumbprint(self, *fp):
"""Assert that the current recovery build thumbprint is one of *fp."""
if not fp:
raise ValueError("must specify some thumbprints")
cmd = (' ||\n '.join([('getprop("ro.build.thumbprint") == "%s"') % i
for i in fp]) +
' ||\n abort("Package expects build thumbprint of %s; this '
'device has " + getprop("ro.build.thumbprint") + ".");') % (
" or ".join(fp))
self.script.append(cmd)
def AssertOlderBuild(self, timestamp, timestamp_text):
"""Assert that the build on the device is older (or the same as)
the given timestamp."""
self.script.append(
('(!less_than_int(%s, getprop("ro.build.date.utc"))) || '
'abort("Can\'t install this package (%s) over newer '
'build (" + getprop("ro.build.date") + ").");') % (timestamp,
timestamp_text))
def AssertDevice(self, device):
"""Assert that the device identifier is the given string."""
cmd = ('assert(' +
' || '.join(['getprop("ro.product.device") == "%s" || getprop("ro.build.product") == "%s"'
% (i, i) for i in device.split(",")]) +
' || abort("This package is for device: %s; ' +
'this device is " + getprop("ro.product.device") + ".");' +
');') % device
self.script.append(cmd)
def AssertSomeBootloader(self, *bootloaders):
"""Assert that the bootloader version is one of *bootloaders."""
cmd = ("assert(" +
" || ".join(['getprop("ro.bootloader") == "%s"' % (b,)
for b in bootloaders]) +
' || abort("This package supports bootloader(s): ' +
", ".join(["%s" % (b,) for b in bootloaders]) +
'; this device has bootloader " + getprop("ro.bootloader") + ".");' +
");")
self.script.append(self.WordWrap(cmd))
def AssertSomeBaseband(self, *basebands):
"""Assert that the baseband version is one of *basebands."""
cmd = ("assert(" +
" || ".join(['getprop("ro.baseband") == "%s"' % (b,)
for b in basebands]) +
' || abort("This package supports baseband(s): ' +
", ".join(["%s" % (b,) for b in basebands]) +
'; this device has baseband " + getprop("ro.baseband") + ".");' +
");")
self.script.append(self._WordWrap(cmd))
def RunBackup(self, command):
self.script.append(('run_program("/tmp/install/bin/backuptool.sh", "%s");' % command))
def FlashSuperSU(self):
self.script.append('package_extract_dir("supersu", "/tmp/supersu");')
self.script.append('run_program("/sbin/busybox", "unzip", "/tmp/supersu/supersu.zip", "META-INF/com/google/android/*", "-d", "/tmp/supersu");')
self.script.append('run_program("/sbin/busybox", "sh", "/tmp/supersu/META-INF/com/google/android/update-binary", "dummy", "1", "/tmp/supersu/supersu.zip");')
def ValidateSignatures(self, command):
self.script.append('package_extract_file("META-INF/org/cyanogenmod/releasekey", "/tmp/releasekey");')
# Exit code 124 == abort. run_program returns raw, so left-shift 8bit
self.script.append('run_program("/tmp/install/bin/otasigcheck.sh") != "31744" || abort("Can\'t install this package on top of incompatible data. Please try another package or run a factory reset");')
def ShowProgress(self, frac, dur):
"""Update the progress bar, advancing it over 'frac' over the next
'dur' seconds. 'dur' may be zero to advance it via SetProgress
commands instead of by time."""
self.script.append("show_progress(%f, %d);" % (frac, int(dur)))
def SetProgress(self, frac):
"""Set the position of the progress bar within the chunk defined
by the most recent ShowProgress call. 'frac' should be in
[0,1]."""
self.script.append("set_progress(%f);" % (frac,))
def PatchCheck(self, filename, *sha1):
"""Check that the given file (or MTD reference) has one of the
given *sha1 hashes, checking the version saved in cache if the
file does not match."""
self.script.append(
'apply_patch_check("%s"' % (filename,) +
"".join([', "%s"' % (i,) for i in sha1]) +
') || abort("\\"%s\\" has unexpected contents.");' % (filename,))
def FileCheck(self, filename, *sha1):
"""Check that the given file (or MTD reference) has one of the
given *sha1 hashes."""
self.script.append('assert(sha1_check(read_file("%s")' % (filename,) +
"".join([', "%s"' % (i,) for i in sha1]) +
'));')
def CacheFreeSpaceCheck(self, amount):
"""Check that there's at least 'amount' space that can be made
available on /cache."""
self.script.append(('apply_patch_space(%d) || abort("Not enough free space '
'on /system to apply patches.");') % (amount,))
def Mount(self, mount_point, mount_options_by_format=""):
"""Mount the partition with the given mount_point.
mount_options_by_format:
[fs_type=option[,option]...[|fs_type=option[,option]...]...]
where option is optname[=optvalue]
E.g. ext4=barrier=1,nodelalloc,errors=panic|f2fs=errors=recover
"""
fstab = self.fstab
if fstab:
p = fstab[mount_point]
mount_dict = {}
if mount_options_by_format is not None:
for option in mount_options_by_format.split("|"):
if "=" in option:
key, value = option.split("=", 1)
mount_dict[key] = value
mount_flags = mount_dict.get(p.fs_type, "")
if p.context is not None:
mount_flags = p.context + ("," + mount_flags if mount_flags else "")
self.script.append('mount("%s", "%s", "%s", "%s", "%s");' % (
p.fs_type, common.PARTITION_TYPES[p.fs_type], p.device,
p.mount_point, mount_flags))
self.mounts.add(p.mount_point)
def Unmount(self, mount_point):
"""Unmount the partiiton with the given mount_point."""
if mount_point in self.mounts:
self.mounts.remove(mount_point)
self.script.append('unmount("%s");' % (mount_point,))
def UnpackPackageDir(self, src, dst):
"""Unpack a given directory from the OTA package into the given
destination directory."""
self.script.append('package_extract_dir("%s", "%s");' % (src, dst))
def Comment(self, comment):
"""Write a comment into the update script."""
self.script.append("")
for i in comment.split("\n"):
self.script.append("# " + i)
self.script.append("")
def Print(self, message):
"""Log a message to the screen (if the logs are visible)."""
self.script.append('ui_print("%s");' % (message,))
def TunePartition(self, partition, *options):
fstab = self.fstab
if fstab:
p = fstab[partition]
if p.fs_type not in ("ext2", "ext3", "ext4"):
raise ValueError("Partition %s cannot be tuned\n" % (partition,))
self.script.append(
'tune2fs(' + "".join(['"%s", ' % (i,) for i in options]) +
'"%s") || abort("Failed to tune partition %s");' % (
p.device, partition))
def FormatPartition(self, partition):
"""Format the given partition, specified by its mount point (eg,
"/system")."""
fstab = self.fstab
if fstab:
p = fstab[partition]
self.script.append('format("%s", "%s", "%s", "%s", "%s");' %
(p.fs_type, common.PARTITION_TYPES[p.fs_type],
p.device, p.length, p.mount_point))
def WipeBlockDevice(self, partition):
if partition not in ("/system", "/vendor"):
raise ValueError(("WipeBlockDevice doesn't work on %s\n") % (partition,))
fstab = self.fstab
size = self.info.get(partition.lstrip("/") + "_size", None)
device = fstab[partition].device
self.script.append('wipe_block_device("%s", %s);' % (device, size))
def DeleteFiles(self, file_list):
"""Delete all files in file_list."""
if not file_list:
return
cmd = "delete(" + ",\0".join(['"%s"' % (i,) for i in file_list]) + ");"
self.script.append(self.WordWrap(cmd))
def DeleteFilesIfNotMatching(self, file_list):
"""Delete the file in file_list if not matching the checksum."""
if not file_list:
return
for name, sha1 in file_list:
cmd = ('sha1_check(read_file("{name}"), "{sha1}") || '
'delete("{name}");'.format(name=name, sha1=sha1))
self.script.append(self.WordWrap(cmd))
def RenameFile(self, srcfile, tgtfile):
"""Moves a file from one location to another."""
if self.info.get("update_rename_support", False):
self.script.append('rename("%s", "%s");' % (srcfile, tgtfile))
else:
raise ValueError("Rename not supported by update binary")
def SkipNextActionIfTargetExists(self, tgtfile, tgtsha1):
"""Prepend an action with an apply_patch_check in order to
skip the action if the file exists. Used when a patch
is later renamed."""
cmd = ('sha1_check(read_file("%s"), %s) ||' % (tgtfile, tgtsha1))
self.script.append(self.WordWrap(cmd))
def ApplyPatch(self, srcfile, tgtfile, tgtsize, tgtsha1, *patchpairs):
"""Apply binary patches (in *patchpairs) to the given srcfile to
produce tgtfile (which may be "-" to indicate overwriting the
source file."""
if len(patchpairs) % 2 != 0 or len(patchpairs) == 0:
raise ValueError("bad patches given to ApplyPatch")
cmd = ['apply_patch("%s",\0"%s",\0%s,\0%d'
% (srcfile, tgtfile, tgtsha1, tgtsize)]
for i in range(0, len(patchpairs), 2):
cmd.append(',\0%s, package_extract_file("%s")' % patchpairs[i:i+2])
cmd.append(');')
cmd = "".join(cmd)
self.script.append(self.WordWrap(cmd))
def WriteRawImage(self, mount_point, fn, mapfn=None):
"""Write the given package file into the partition for the given
mount point."""
fstab = self.fstab
if fstab:
p = fstab[mount_point]
partition_type = common.PARTITION_TYPES[p.fs_type]
args = {'device': p.device, 'fn': fn}
if partition_type == "MTD":
self.script.append(
'write_raw_image(package_extract_file("%(fn)s"), "%(device)s");'
% args)
elif partition_type == "EMMC":
if mapfn:
args["map"] = mapfn
self.script.append(
'package_extract_file("%(fn)s", "%(device)s", "%(map)s");' % args)
else:
self.script.append(
'package_extract_file("%(fn)s", "%(device)s");' % args)
else:
raise ValueError(
"don't know how to write \"%s\" partitions" % p.fs_type)
def SetPermissions(self, fn, uid, gid, mode, selabel, capabilities):
"""Set file ownership and permissions."""
if not self.info.get("use_set_metadata", False):
self.script.append('set_perm(%d, %d, 0%o, "%s");' % (uid, gid, mode, fn))
else:
cmd = 'set_metadata("%s", "uid", %d, "gid", %d, "mode", 0%o' \
% (fn, uid, gid, mode)
if capabilities is not None:
cmd += ', "capabilities", %s' % ( capabilities )
if selabel is not None:
cmd += ', "selabel", "%s"' % selabel
cmd += ');'
self.script.append(cmd)
def SetPermissionsRecursive(self, fn, uid, gid, dmode, fmode, selabel,
capabilities):
"""Recursively set path ownership and permissions."""
if not self.info.get("use_set_metadata", False):
self.script.append('set_perm_recursive(%d, %d, 0%o, 0%o, "%s");'
% (uid, gid, dmode, fmode, fn))
else:
cmd = 'set_metadata_recursive("%s", "uid", %d, "gid", %d, ' \
'"dmode", 0%o, "fmode", 0%o' \
% (fn, uid, gid, dmode, fmode)
if capabilities is not None:
cmd += ', "capabilities", "%s"' % ( capabilities )
if selabel is not None:
cmd += ', "selabel", "%s"' % selabel
cmd += ');'
self.script.append(cmd)
def MakeSymlinks(self, symlink_list):
"""Create symlinks, given a list of (dest, link) pairs."""
by_dest = {}
for d, l in symlink_list:
by_dest.setdefault(d, []).append(l)
for dest, links in sorted(by_dest.iteritems()):
cmd = ('symlink("%s", ' % (dest,) +
",\0".join(['"' + i + '"' for i in sorted(links)]) + ");")
self.script.append(self.WordWrap(cmd))
def AppendExtra(self, extra):
"""Append text verbatim to the output script."""
self.script.append(extra)
def Unmount(self, mount_point):
self.script.append('unmount("%s");' % mount_point)
self.mounts.remove(mount_point)
def UnmountAll(self):
for p in sorted(self.mounts):
self.script.append('unmount("%s");' % (p,))
self.mounts = set()
def AddToZip(self, input_zip, output_zip, input_path=None):
"""Write the accumulated script to the output_zip file. input_zip
is used as the source for the 'updater' binary needed to run
script. If input_path is not None, it will be used as a local
path for the binary instead of input_zip."""
self.UnmountAll()
common.ZipWriteStr(output_zip, "META-INF/com/google/android/updater-script",
"\n".join(self.script) + "\n")
if input_path is None:
data = input_zip.read("OTA/bin/updater")
else:
data = open(input_path, "rb").read()
common.ZipWriteStr(output_zip, "META-INF/com/google/android/update-binary",
data, perms=0o755)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class to hold a library of OpDefs and use it to create Brain operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.framework import tensor_pb2
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import tf_contextlib
def _Attr(op_def, name):
for attr in op_def.attr:
if attr.name == name:
return attr
raise TypeError("Inconsistent OpDef for '%s', missing attr '%s'" %
(op_def.name, name))
def _AttrValue(attr_protos, name):
if name in attr_protos:
return attr_protos[name]
raise TypeError("Inconsistent OpDef, missing attr '%s' from '%s'." %
(name, attr_protos))
def _SatisfiesTypeConstraint(dtype, attr_def, param_name):
if attr_def.HasField("allowed_values"):
allowed_list = attr_def.allowed_values.list.type
if dtype not in allowed_list:
raise TypeError(
"Value passed to parameter '%s' has DataType %s not in list of "
"allowed values: %s" %
(param_name, dtypes.as_dtype(dtype).name,
", ".join(dtypes.as_dtype(x).name for x in allowed_list)))
def _IsListParameter(arg):
if arg.number_attr:
return True
elif arg.type_list_attr:
return True
return False
def _NumTypeFields(arg):
num = 0
if arg.type != types_pb2.DT_INVALID: num += 1
if arg.type_attr: num += 1
if arg.type_list_attr: num += 1
return num
def _IsListValue(v):
return isinstance(v, (list, tuple))
def _Flatten(l):
"""Converts [1, 2, [3, 4], [5]] to [1, 2, 3, 4, 5]."""
# [1, 2, [3, 4], [5]] -> [[1], [2], [3, 4], [5]]
l_of_l = [x if _IsListValue(x) else [x] for x in l]
# [[1], [2], [3, 4], [5]] -> [1, 2, 3, 4, 5]
return [item for sublist in l_of_l for item in sublist]
def _Restructure(l, structure):
"""Returns the elements of list l structured according to the given structure.
A structure is represented by a list whose elements are either
`None` or a non-negative integer. `None` corresponds to a single
element in the output list, and an integer N corresponds to a nested
list of length N.
The function returns a data structure whose shape is given by
`structure`, and whose elements are taken from `l`. If `structure`
is a singleton, the function returns the single data structure
implied by the 0th element of `structure`. For example:
_Restructure(["foo", "bar", "baz", "qux"], [None, 2, None])
-> ["foo", ["bar", "baz"], "qux"]
_Restructure(["foo"], [None]) -> "foo"
_Restructure(["foo"], [1]) -> ["foo"]
_Restructure([], [0]) -> []
Args:
l: A list.
structure: A list whose elements are either `None` or a non-negative
integer.
Returns:
The elements of `l`, restructured according to `structure`. If
`structure` is a list of length 1, this function returns the
single data structure implied by `structure[0]`.
"""
result = []
current_index = 0
for element in structure:
if element is None:
result.append(l[current_index])
current_index += 1
else:
result.append(l[current_index:current_index+element])
current_index += element
if len(result) == 1:
return result[0]
else:
return tuple(result)
def _MakeFloat(v, arg_name):
if not isinstance(v, compat.real_types):
raise TypeError("Expected float for argument '%s' not %s." %
(arg_name, repr(v)))
return float(v)
def _MakeInt(v, arg_name):
if isinstance(v, six.string_types):
raise TypeError("Expected int for argument '%s' not %s." %
(arg_name, repr(v)))
try:
return int(v)
except (ValueError, TypeError):
raise TypeError("Expected int for argument '%s' not %s." %
(arg_name, repr(v)))
def _MakeStr(v, arg_name):
if not isinstance(v, compat.bytes_or_text_types):
raise TypeError("Expected string for argument '%s' not %s." %
(arg_name, repr(v)))
return compat.as_bytes(v) # Convert unicode strings to bytes.
def _MakeBool(v, arg_name):
if not isinstance(v, bool):
raise TypeError("Expected bool for argument '%s' not %s." %
(arg_name, repr(v)))
return v
def _MakeType(v, attr_def):
try:
v = dtypes.as_dtype(v).base_dtype
except TypeError:
raise TypeError("Expected DataType for argument '%s' not %s." %
(attr_def.name, repr(v)))
i = v.as_datatype_enum
_SatisfiesTypeConstraint(i, attr_def, param_name=attr_def.name)
return i
def _MakeShape(v, arg_name):
"""Convert v into a TensorShapeProto."""
# Args:
# v: A TensorShapeProto, a list of ints, or a tensor_shape.TensorShape.
# arg_name: String, for error messages.
# Returns:
# A TensorShapeProto.
if isinstance(v, tensor_shape_pb2.TensorShapeProto):
for d in v.dim:
if d.name:
logging.warning("Warning: TensorShapeProto with a named dimension: %s",
str(v))
break
return v
try:
return tensor_shape.as_shape(v).as_proto()
except TypeError as e:
raise TypeError("Error converting %s to a TensorShape: %s" % (arg_name, e))
except ValueError as e:
raise ValueError("Error converting %s to a TensorShape: %s" % (arg_name, e))
def _MakeTensor(v, arg_name):
"""Ensure v is a TensorProto."""
if isinstance(v, tensor_pb2.TensorProto):
return v
raise TypeError(
"Don't know how to convert %s to a TensorProto for argument '%s'" %
(repr(v), arg_name))
class _OpInfo(object):
"""All per-Op state we would like to precompute/validate."""
def __init__(self, op_def):
self.op_def = op_def
# TODO(josh11b): SWIG the ValidateOpDef() function from C++ and call it
# here, instead of these checks.
for arg in list(op_def.input_arg) + list(op_def.output_arg):
num_type_fields = _NumTypeFields(arg)
if num_type_fields != 1:
raise TypeError("Arg '%s' of '%s' must have one type field not %d" %
(arg.name, op_def.name, num_type_fields))
if arg.type_attr:
attr_type = _Attr(op_def, arg.type_attr).type
if attr_type != "type":
raise TypeError("Attr '%s' of '%s' used as a type_attr "
"but has type %s" %
(arg.type_attr, op_def.name, attr_type))
if arg.type_list_attr:
attr_type = _Attr(op_def, arg.type_list_attr).type
if attr_type != "list(type)":
raise TypeError(
"Attr '%s' of '%s' used as a type_list_attr but has type %s" %
(arg.type_attr, op_def.name, attr_type))
if arg.number_attr:
attr_type = _Attr(op_def, arg.number_attr).type
if attr_type != "int":
raise TypeError(
"Attr '%s' of '%s' used as a number_attr but has type %s" %
(arg.number_attr, op_def.name, attr_type))
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _MaybeColocateWith(inputs):
"""A context manager for (maybe) colocating with a list of input tensors.
Args:
inputs: A list of `Tensor` or `Operation` objects.
Returns:
A context manager.
"""
if not inputs:
yield
else:
# NOTE(mrry): The `ops.colocate_with()` function accepts only a single
# op or tensor, so we create one context manager per element in the list.
with ops.colocate_with(inputs[0]), _MaybeColocateWith(inputs[1:]):
yield
# pylint: enable=g-doc-return-or-yield
class OpDefLibrary(object):
"""Holds a collection of OpDefs, can add the corresponding Ops to a graph."""
def __init__(self):
self._ops = {}
# pylint: disable=invalid-name
def add_op(self, op_def):
"""Register an OpDef. May call apply_op with the name afterwards."""
if not isinstance(op_def, op_def_pb2.OpDef):
raise TypeError("%s is %s, not an op_def_pb2.OpDef" %
(op_def, type(op_def)))
if not op_def.name:
raise ValueError("%s missing name." % op_def)
if op_def.name in self._ops:
raise RuntimeError("Op name %s registered twice." % op_def.name)
self._ops[op_def.name] = _OpInfo(op_def)
def add_op_list(self, op_list):
"""Register the OpDefs from an OpList."""
if not isinstance(op_list, op_def_pb2.OpList):
raise TypeError("%s is %s, not an op_def_pb2.OpList" %
(op_list, type(op_list)))
for op_def in op_list.op:
self.add_op(op_def)
def apply_op(self, op_type_name, name=None, **keywords):
# pylint: disable=g-doc-args
"""Add a node invoking a registered Op to a graph.
Example usage:
# input1 and input2 can be Tensors or anything ops.convert_to_tensor()
# will convert to a Tensor.
op_def_library.apply_op("op", input1=input1, input2=input2)
# Can specify a node name.
op_def_library.apply_op("op", input1=input1, name="node_name")
# Must use keyword arguments, with the names specified in the OpDef.
op_def_library.apply_op("op", input_name=input, attr_name=attr)
All attrs must either be inferred from an input or specified.
(If inferred, the attr must not be specified.) If an attr has a default
value specified in the Op's OpDef, then you may pass None as the value
of that attr to get the default.
Args:
op_type_name: string. Must match the name field of a registered Op.
name: string. Optional name of the created op.
**keywords: input Tensor and attr arguments specified by name,
and optional parameters to pass when constructing the Operation.
Returns:
The Tensor(s) representing the output of the operation, or the Operation
itself if there are no outputs.
Raises:
RuntimeError: On some errors.
TypeError: On some errors.
ValueError: On some errors.
"""
output_structure, is_stateful, op = self._apply_op_helper(
op_type_name, name, **keywords)
if output_structure:
outputs = op.outputs
res = _Restructure(ops.convert_n_to_tensor(outputs), output_structure)
if isinstance(res, list) and not res and is_stateful:
return op
else:
return res
else:
return op
def _apply_op_helper(self, op_type_name, name=None, **keywords):
"""Implementation of apply_op that returns output_structure, op."""
op_info = self._ops.get(op_type_name, None)
if op_info is None:
raise RuntimeError("Unrecognized Op name " + op_type_name)
op_def = op_info.op_def
# Determine the graph context.
try:
# Need to flatten all the arguments into a list.
# pylint: disable=protected-access
g = ops._get_graph_from_inputs(_Flatten(keywords.values()))
# pylint: enable=protected-access
except AssertionError as e:
raise RuntimeError(
"Cannot determine graph for Op '%s' due to: %s"
% (op_type_name, e.message))
# Default name if not specified.
if name is None:
name = op_type_name
# Check for deprecation
deprecation_version = op_def.deprecation.version
if deprecation_version:
producer = g.graph_def_versions.producer
if producer >= deprecation_version:
raise NotImplementedError(
("Op %s is not available in GraphDef version %d. "
"It has been removed in version %d. %s.") %
(op_type_name, producer, deprecation_version,
op_def.deprecation.explanation))
# Fill in the list of default types for all "type" attrs. This
# will be used to choose a preferred dtype to convert to in the
# absence of input type information.
#
# TODO(b/31302892): Currently the defaults don't work in the right
# way if you have two inputs, one of whose type resolution depends
# on the other. Handling this will require restructuring this code
# significantly.
default_type_attr_map = {}
for attr_def in op_def.attr:
if attr_def.type != "type":
continue
key = attr_def.name
if attr_def.HasField("default_value"):
default_type_attr_map[key] = dtypes.as_dtype(
attr_def.default_value.type)
# Requires that op_def has passed validation (using the C++
# ValidateOpDef() from ../framework/op_def_util.h).
attrs = {}
inputs = []
input_types = []
with g.as_default(), ops.name_scope(name) as scope:
# Perform input type inference
inferred_from = {}
for input_arg in op_def.input_arg:
input_name = input_arg.name
if input_name in keywords:
values = keywords.pop(input_name)
elif input_name + "_" in keywords:
# Handle the case where the name is a keyword or built-in
# for Python so we use the name + _ instead.
input_name += "_"
values = keywords.pop(input_name)
else:
raise TypeError("No argument for input " + input_name)
# Goals:
# * Convert values to Tensors if it contains constants.
# * Verify that values is a list if that matches the input_arg's
# type.
# * If the input_arg's type is determined by attrs, either set
# those attrs and validate those attr values are legal (if
# they have not yet been set) or validate the input matches
# the type indicated by the attrs (if they have already been
# inferred via an earlier input).
# * If the input_arg has an explicit type, make sure the input
# conforms.
if _IsListParameter(input_arg):
if not _IsListValue(values):
raise TypeError(
"Expected list for '%s' argument to '%s' Op, not %s." %
(input_name, op_type_name, values))
# In cases where we expect all elements of the list to have the
# same dtype, try to cast non-Tensor elements to that type.
dtype = None
default_dtype = None
if input_arg.type != types_pb2.DT_INVALID:
dtype = input_arg.type
elif input_arg.number_attr:
if input_arg.type_attr in attrs:
dtype = attrs[input_arg.type_attr]
else:
for t in values:
if isinstance(t, ops.Tensor):
dtype = t.dtype
break
# dtype still not found, prefer using the default dtype
# from the attr.
if dtype is None and input_arg.type_attr in default_type_attr_map:
default_dtype = default_type_attr_map[input_arg.type_attr]
try:
if not input_arg.is_ref and dtype:
dtype = dtypes.as_dtype(dtype).base_dtype
values = ops.internal_convert_n_to_tensor(
values,
name=input_arg.name,
dtype=dtype if dtype else None,
preferred_dtype=default_dtype,
as_ref=input_arg.is_ref)
if input_arg.number_attr and len(
set(v.dtype.base_dtype for v in values)) > 1:
raise TypeError() # All types should match.
except (TypeError, ValueError):
# What types does the conversion function think values have?
observed_types = []
for value in values:
try:
converted_value = ops.internal_convert_to_tensor(
value, as_ref=input_arg.is_ref)
observed_types.append(converted_value.dtype.base_dtype.name)
except (TypeError, ValueError):
observed_types.append("<NOT CONVERTIBLE TO TENSOR>")
observed = ", ".join(observed_types)
prefix = (
"Tensors in list passed to '%s' of '%s' Op have types [%s]" %
(input_name, op_type_name, observed))
if input_arg.number_attr:
if input_arg.type != types_pb2.DT_INVALID:
raise TypeError("%s that do not match expected type %s." %
(prefix, dtype.name))
elif input_arg.type_attr in attrs:
raise TypeError("%s that do not match type %s inferred from "
"earlier arguments." %
(prefix, dtype.name))
else:
raise TypeError("%s that don't all match." % prefix)
else:
raise TypeError(
"%s that are invalid. Tensors: %s" % (prefix, values))
types = [x.dtype for x in values]
inputs.extend(values)
else:
# In cases where we have an expected type, try to convert non-Tensor
# arguments to that type.
dtype = None
default_dtype = None
if input_arg.type != types_pb2.DT_INVALID:
dtype = input_arg.type
elif input_arg.type_attr in attrs:
dtype = attrs[input_arg.type_attr]
elif input_arg.type_attr in default_type_attr_map:
# The dtype could not be inferred solely from the inputs,
# so we prefer the attr's default, so code that adds a new attr
# with a default is backwards compatible.
default_dtype = default_type_attr_map[input_arg.type_attr]
try:
values = ops.internal_convert_to_tensor(
values,
name=input_arg.name,
dtype=dtype,
as_ref=input_arg.is_ref,
preferred_dtype=default_dtype)
except TypeError as err:
if dtype is None:
raise err
else:
raise TypeError(
"Expected %s passed to parameter '%s' of op '%s', got %s of "
"type '%s' instead." %
(dtypes.as_dtype(dtype).name, input_arg.name, op_type_name,
repr(values), type(values).__name__))
except ValueError:
# What type does convert_to_tensor think it has?
try:
observed = ops.internal_convert_to_tensor(
values, as_ref=input_arg.is_ref).dtype.name
except ValueError as err:
raise ValueError(
"Tried to convert '%s' to a tensor and failed. Error: %s" %
(input_name, err))
prefix = ("Input '%s' of '%s' Op has type %s that does not match" %
(input_name, op_type_name, observed))
if input_arg.type != types_pb2.DT_INVALID:
raise TypeError("%s expected type of %s." %
(prefix, dtypes.as_dtype(input_arg.type).name))
else:
# Update the maps with the default, if needed.
k = input_arg.type_attr
if k in default_type_attr_map:
if k not in attrs:
attrs[k] = default_type_attr_map[k]
if k not in inferred_from:
inferred_from[k] = "Default in OpDef"
raise TypeError(
"%s type %s of argument '%s'." %
(prefix, dtypes.as_dtype(attrs[input_arg.type_attr]).name,
inferred_from[input_arg.type_attr]))
types = [values.dtype]
inputs.append(values)
base_types = [x.base_dtype for x in types]
if input_arg.number_attr:
# <number-attr> * <type> or <number-attr> * <type-attr>
if input_arg.number_attr in attrs:
if len(values) != attrs[input_arg.number_attr]:
raise ValueError(
"List argument '%s' to '%s' Op with length %d must match "
"length %d of argument '%s'." %
(input_name, op_type_name, len(values),
attrs[input_arg.number_attr],
inferred_from[input_arg.number_attr]))
else:
attrs[input_arg.number_attr] = len(values)
inferred_from[input_arg.number_attr] = input_name
num_attr = _Attr(op_def, input_arg.number_attr)
if num_attr.has_minimum and len(values) < num_attr.minimum:
raise ValueError(
"List argument '%s' to '%s' Op with length %d shorter "
"than minimum length %d." %
(input_name, op_type_name, len(values), num_attr.minimum))
# All tensors must have the same base type.
if any(bt != base_types[0] for bt in base_types):
raise TypeError(
"All tensors passed to '%s' of '%s' Op "
"must have the same type." %
(input_name, op_type_name))
if input_arg.type != types_pb2.DT_INVALID:
# <number-attr> * <type> case
if base_types and base_types[0] != input_arg.type:
assert False, "Unreachable"
elif input_arg.type_attr in attrs:
# <number-attr> * <type-attr> case, where <type-attr> already
# has an inferred value.
if base_types and base_types[0] != attrs[input_arg.type_attr]:
assert False, "Unreachable"
else:
# <number-attr> * <type-attr> case, where we are now setting
# the <type-attr> based on this input
if not base_types:
raise TypeError(
"Don't know how to infer type variable from empty input "
"list passed to input '%s' of '%s' Op." %
(input_name, op_type_name))
attrs[input_arg.type_attr] = base_types[0]
inferred_from[input_arg.type_attr] = input_name
type_attr = _Attr(op_def, input_arg.type_attr)
_SatisfiesTypeConstraint(base_types[0], type_attr,
param_name=input_name)
elif input_arg.type_attr:
# <type-attr>
attr_value = base_types[0]
if input_arg.type_attr in attrs:
if attrs[input_arg.type_attr] != attr_value:
assert False, "Unreachable"
else:
for base_type in base_types:
_SatisfiesTypeConstraint(base_type,
_Attr(op_def, input_arg.type_attr),
param_name=input_name)
attrs[input_arg.type_attr] = attr_value
inferred_from[input_arg.type_attr] = input_name
elif input_arg.type_list_attr:
# <type-list-attr>
attr_value = base_types
if input_arg.type_list_attr in attrs:
if attrs[input_arg.type_list_attr] != attr_value:
raise TypeError(
"Input '%s' of '%s' Op has type list of %s that does not "
"match type list %s of argument '%s'." %
(input_name, op_type_name,
", ".join(dtypes.as_dtype(x).name for x in attr_value),
", ".join(dtypes.as_dtype(x).name
for x in attrs[input_arg.type_list_attr]),
inferred_from[input_arg.type_list_attr]))
else:
for base_type in base_types:
_SatisfiesTypeConstraint(base_type,
_Attr(op_def, input_arg.type_list_attr),
param_name=input_name)
attrs[input_arg.type_list_attr] = attr_value
inferred_from[input_arg.type_list_attr] = input_name
else:
# single Tensor with specified type
if base_types[0] != input_arg.type:
assert False, "Unreachable"
if input_arg.is_ref:
if not all(x._is_ref_dtype for x in types): # pylint: disable=protected-access
raise TypeError(
("'%s' Op requires that input '%s' be a mutable tensor "
"(e.g.: a tf.Variable)") % (op_type_name, input_name))
input_types.extend(types)
else:
input_types.extend(base_types)
# Process remaining attrs
for attr in op_def.attr:
# Skip attrs that have already had their values inferred
if attr.name in attrs:
if attr.name in keywords:
raise TypeError(
"Should not specify value for inferred attr '%s'." % attr.name)
continue
if attr.name in keywords:
attrs[attr.name] = keywords.pop(attr.name)
elif attr.name + "_" in keywords:
# Attrs whose names match Python keywords have an extra '_'
# appended, so we must check for that as well.
attrs[attr.name] = keywords.pop(attr.name + "_")
else:
raise TypeError("No argument for attr " + attr.name)
# Convert attr values to AttrValue protos.
attr_protos = {}
for attr_def in op_def.attr:
key = attr_def.name
value = attrs[key]
attr_value = attr_value_pb2.AttrValue()
if attr_def.HasField("default_value") and value is None:
attr_value.CopyFrom(attr_def.default_value)
attr_protos[key] = attr_value
continue
if attr_def.type.startswith("list("):
if not _IsListValue(value):
raise TypeError("Expected list for attr " + key)
if attr_def.has_minimum:
if len(value) < attr_def.minimum:
raise ValueError("Attr '%s' of '%s' Op passed list of length %d "
"less than minimum %d." %
(key, op_type_name, len(value),
attr_def.minimum))
attr_value.list.SetInParent()
if attr_def.type == "string":
attr_value.s = _MakeStr(value, key)
if attr_def.HasField("allowed_values"):
if attr_value.s not in attr_def.allowed_values.list.s:
raise ValueError(
"Attr '%s' of '%s' Op passed string '%s' not in: \"%s\"." %
(key, op_type_name, compat.as_text(attr_value.s),
'", "'.join(map(compat.as_text,
attr_def.allowed_values.list.s))))
elif attr_def.type == "list(string)":
attr_value.list.s.extend([_MakeStr(x, key) for x in value])
if attr_def.HasField("allowed_values"):
for x in attr_value.list.s:
if x not in attr_def.allowed_values.list.s:
raise ValueError(
"Attr '%s' of '%s' Op passed string '%s' not in: \"%s\"." %
(key, op_type_name, compat.as_text(x),
'", "'.join(map(compat.as_text,
attr_def.allowed_values.list.s))))
elif attr_def.type == "int":
attr_value.i = _MakeInt(value, key)
if attr_def.has_minimum:
if attr_value.i < attr_def.minimum:
raise ValueError(
"Attr '%s' of '%s' Op passed %d less than minimum %d." %
(key, op_type_name, attr_value.i, attr_def.minimum))
elif attr_def.type == "list(int)":
attr_value.list.i.extend([_MakeInt(x, key) for x in value])
elif attr_def.type == "float":
attr_value.f = _MakeFloat(value, key)
elif attr_def.type == "list(float)":
attr_value.list.f.extend([_MakeFloat(x, key) for x in value])
elif attr_def.type == "bool":
attr_value.b = _MakeBool(value, key)
elif attr_def.type == "list(bool)":
attr_value.list.b.extend([_MakeBool(x, key) for x in value])
elif attr_def.type == "type":
attr_value.type = _MakeType(value, attr_def)
elif attr_def.type == "list(type)":
attr_value.list.type.extend(
[_MakeType(x, attr_def) for x in value])
elif attr_def.type == "shape":
attr_value.shape.CopyFrom(_MakeShape(value, key))
elif attr_def.type == "list(shape)":
attr_value.list.shape.extend(
[_MakeShape(x, key) for x in value])
elif attr_def.type == "tensor":
attr_value.tensor.CopyFrom(_MakeTensor(value, key))
elif attr_def.type == "list(tensor)":
attr_value.list.tensor.extend(
[_MakeTensor(x, key) for x in value])
elif attr_def.type == "func":
if isinstance(value, attr_value_pb2.NameAttrList):
attr_value.func.CopyFrom(value)
elif isinstance(value, compat.bytes_or_text_types):
attr_value.func.name = value
else:
value.add_to_graph(ops.get_default_graph())
attr_value.func.name = value.name
else:
raise TypeError("Unrecognized Attr type " + attr_def.type)
attr_protos[key] = attr_value
del attrs # attrs is no longer authoritative, use attr_protos instead
# Determine output types (possibly using attrs)
output_types = []
output_structure = []
for arg in op_def.output_arg:
types = []
if arg.number_attr:
n = _AttrValue(attr_protos, arg.number_attr).i
if arg.type_attr:
types = [_AttrValue(attr_protos, arg.type_attr).type] * n
else:
types = [arg.type] * n
output_structure.append(n)
elif arg.type_attr:
t = _AttrValue(attr_protos, arg.type_attr)
types = [t.type]
output_structure.append(None)
elif arg.type_list_attr:
t = _AttrValue(attr_protos, arg.type_list_attr)
types = t.list.type
output_structure.append(len(types))
else:
types = [arg.type]
output_structure.append(None)
if arg.is_ref:
types = [dtypes.as_dtype(x)._as_ref for x in types] # pylint: disable=protected-access
output_types.extend(types)
if keywords:
raise TypeError("apply_op() got unexpected keyword arguments: " +
", ".join(sorted(keywords.keys())))
# NOTE(mrry): We add an explicit colocation constraint between
# the newly created op and any of its reference-typed inputs.
must_colocate_inputs = [val for arg, val in zip(op_def.input_arg, inputs)
if arg.is_ref]
with _MaybeColocateWith(must_colocate_inputs):
# Add Op to graph
op = g.create_op(op_type_name, inputs, output_types, name=scope,
input_types=input_types, attrs=attr_protos,
op_def=op_def)
return output_structure, op_def.is_stateful, op
# pylint: enable=invalid-name
| |
########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import sys
import logging
import json
import datetime
from functools import wraps
from cloudify import constants
from cloudify import amqp_client_utils
from cloudify import event as _event
from cloudify.exceptions import ClosedAMQPClientException
EVENT_CLASS = _event.Event
EVENT_VERBOSITY_LEVEL = _event.NO_VERBOSE
def message_context_from_cloudify_context(ctx):
"""Build a message context from a CloudifyContext instance"""
context = {
'blueprint_id': ctx.blueprint.id,
'deployment_id': ctx.deployment.id,
'execution_id': ctx.execution_id,
'workflow_id': ctx.workflow_id,
'task_id': ctx.task_id,
'task_name': ctx.task_name,
'task_queue': ctx.task_queue,
'task_target': ctx.task_target,
'operation': ctx.operation.name,
'plugin': ctx.plugin,
}
if ctx.type == constants.NODE_INSTANCE:
context['node_id'] = ctx.instance.id
context['node_name'] = ctx.node.name
elif ctx.type == constants.RELATIONSHIP_INSTANCE:
context['source_id'] = ctx.source.instance.id
context['source_name'] = ctx.source.node.name
context['target_id'] = ctx.target.instance.id
context['target_name'] = ctx.target.node.name
return context
def message_context_from_workflow_context(ctx):
"""Build a message context from a CloudifyWorkflowContext instance"""
return {
'blueprint_id': ctx.blueprint.id,
'deployment_id': ctx.deployment.id,
'execution_id': ctx.execution_id,
'workflow_id': ctx.workflow_id,
}
def message_context_from_sys_wide_wf_context(ctx):
"""Build a message context from a CloudifyWorkflowContext instance"""
return {
'blueprint_id': None,
'deployment_id': None,
'execution_id': ctx.execution_id,
'workflow_id': ctx.workflow_id,
}
def message_context_from_workflow_node_instance_context(ctx):
"""Build a message context from a CloudifyWorkflowNode instance"""
message_context = message_context_from_workflow_context(ctx.ctx)
message_context.update({
'node_name': ctx.node_id,
'node_id': ctx.id,
})
return message_context
class CloudifyBaseLoggingHandler(logging.Handler):
"""A base handler class for writing log messages to RabbitMQ"""
def __init__(self, ctx, out_func, message_context_builder):
logging.Handler.__init__(self)
self.context = message_context_builder(ctx)
self.out_func = out_func or amqp_log_out
def flush(self):
pass
def emit(self, record):
message = self.format(record)
log = {
'context': self.context,
'logger': record.name,
'level': record.levelname.lower(),
'message': {
'text': message
}
}
self.out_func(log)
class CloudifyPluginLoggingHandler(CloudifyBaseLoggingHandler):
"""A handler class for writing plugin log messages to RabbitMQ"""
def __init__(self, ctx, out_func=None):
CloudifyBaseLoggingHandler.__init__(
self, ctx, out_func, message_context_from_cloudify_context)
class CloudifyWorkflowLoggingHandler(CloudifyBaseLoggingHandler):
"""A Handler class for writing workflow log messages to RabbitMQ"""
def __init__(self, ctx, out_func=None):
CloudifyBaseLoggingHandler.__init__(
self, ctx, out_func, message_context_from_workflow_context)
class SystemWideWorkflowLoggingHandler(CloudifyBaseLoggingHandler):
"""Class for writing system-wide workflow log messages to RabbitMQ"""
def __init__(self, ctx, out_func=None):
CloudifyBaseLoggingHandler.__init__(
self, ctx, out_func, message_context_from_sys_wide_wf_context)
class CloudifyWorkflowNodeLoggingHandler(CloudifyBaseLoggingHandler):
"""A Handler class for writing workflow nodes log messages to RabbitMQ"""
def __init__(self, ctx, out_func=None):
CloudifyBaseLoggingHandler.__init__(
self, ctx, out_func,
message_context_from_workflow_node_instance_context)
def init_cloudify_logger(handler, logger_name,
logging_level=logging.DEBUG):
"""
Instantiate an amqp backed logger based on the provided handler
for sending log messages to RabbitMQ
:param handler: A logger handler based on the context
:param logger_name: The logger name
:param logging_level: The logging level
:return: An amqp backed logger
"""
# TODO: somehow inject logging level (no one currently passes
# logging_level)
logger = logging.getLogger(logger_name)
logger.setLevel(logging_level)
for h in logger.handlers:
logger.removeHandler(h)
handler.setFormatter(logging.Formatter("%(message)s"))
handler.setLevel(logging_level)
logger.propagate = True
logger.addHandler(handler)
return logger
def send_workflow_event(ctx, event_type,
message=None,
args=None,
additional_context=None,
out_func=None):
"""Send a workflow event to RabbitMQ
:param ctx: A CloudifyWorkflowContext instance
:param event_type: The event type
:param message: The message
:param args: additional arguments that may be added to the message
:param additional_context: additional context to be added to the context
"""
_send_event(ctx, 'workflow', event_type, message, args,
additional_context, out_func)
def send_sys_wide_wf_event(ctx, event_type, message=None, args=None,
additional_context=None, out_func=None):
"""Send a workflow event to RabbitMQ
:param ctx: A CloudifySystemWideWorkflowContext instance
:param event_type: The event type
:param message: The message
:param args: additional arguments that may be added to the message
:param additional_context: additional context to be added to the context
"""
_send_event(ctx, 'system_wide_workflow', event_type, message, args,
additional_context, out_func)
def send_workflow_node_event(ctx, event_type,
message=None,
args=None,
additional_context=None,
out_func=None):
"""Send a workflow node event to RabbitMQ
:param ctx: A CloudifyWorkflowNode instance
:param event_type: The event type
:param message: The message
:param args: additional arguments that may be added to the message
:param additional_context: additional context to be added to the context
"""
_send_event(ctx, 'workflow_node', event_type, message, args,
additional_context, out_func)
def send_plugin_event(ctx,
message=None,
args=None,
additional_context=None,
out_func=None):
"""Send a plugin event to RabbitMQ
:param ctx: A CloudifyContext instance
:param message: The message
:param args: additional arguments that may be added to the message
:param additional_context: additional context to be added to the context
"""
_send_event(ctx, 'plugin', 'plugin_event', message, args,
additional_context, out_func)
def send_task_event(cloudify_context,
event_type,
message=None,
args=None,
additional_context=None,
out_func=None):
"""Send a task event to RabbitMQ
:param cloudify_context: a __cloudify_context struct as passed to
operations
:param event_type: The event type
:param message: The message
:param args: additional arguments that may be added to the message
:param additional_context: additional context to be added to the context
"""
# import here to avoid cyclic dependencies
from cloudify.context import CloudifyContext
_send_event(CloudifyContext(cloudify_context),
'task', event_type, message, args,
additional_context,
out_func)
def _send_event(ctx, context_type, event_type,
message, args, additional_context,
out_func):
if context_type in ['plugin', 'task']:
message_context = message_context_from_cloudify_context(
ctx)
elif context_type == 'workflow':
message_context = message_context_from_workflow_context(ctx)
elif context_type == 'workflow_node':
message_context = message_context_from_workflow_node_instance_context(
ctx)
elif context_type == 'system_wide_workflow':
message_context = message_context_from_sys_wide_wf_context(ctx)
else:
raise RuntimeError('Invalid context_type: {0}'.format(context_type))
additional_context = additional_context or {}
message_context.update(additional_context)
event = {
'event_type': event_type,
'context': message_context,
'message': {
'text': message,
'arguments': args
}
}
out_func = out_func or amqp_event_out
out_func(event)
def populate_base_item(item, message_type):
# Adding 'Z' to match ISO format
timestamp = '{0}Z'.format(datetime.datetime.utcnow().isoformat()[:-3])
item['timestamp'] = timestamp
item['message_code'] = None
item['type'] = message_type
def amqp_event_out(event):
populate_base_item(event, 'cloudify_event')
_publish_message(event, 'event', logging.getLogger('cloudify_events'))
def amqp_log_out(log):
populate_base_item(log, 'cloudify_log')
_publish_message(log, 'log', logging.getLogger('cloudify_logs'))
def stdout_event_out(event):
populate_base_item(event, 'cloudify_event')
output = create_event_message_prefix(event)
if output:
sys.stdout.write('{0}\n'.format(output))
sys.stdout.flush()
def stdout_log_out(log):
populate_base_item(log, 'cloudify_log')
output = create_event_message_prefix(log)
if output:
sys.stdout.write('{0}\n'.format(output))
sys.stdout.flush()
def create_event_message_prefix(event):
event_obj = EVENT_CLASS(event, verbosity_level=EVENT_VERBOSITY_LEVEL)
if not event_obj.has_output:
return None
return str(event_obj)
def with_amqp_client(func):
@wraps(func)
def wrapper(*args, **kwargs):
"""Calls the wrapped func with an AMQP client instance."""
# call the wrapped func with the amqp client
with amqp_client_utils.get_event_amqp_client() as client:
return func(client, *args, **kwargs)
return wrapper
@with_amqp_client
def _publish_message(client, message, message_type, logger):
try:
client.publish_message(message, message_type)
except ClosedAMQPClientException:
raise
except BaseException as e:
logger.warning(
'Error publishing {0} to RabbitMQ ({1})[message={2}]'
.format(message_type,
'{0}: {1}'.format(type(e).__name__, e),
json.dumps(message)))
class ZMQLoggingHandler(logging.Handler):
def __init__(self, context, socket, fallback_logger):
logging.Handler.__init__(self)
self._context = context
self._socket = socket
self._fallback_logger = fallback_logger
def emit(self, record):
message = self.format(record)
message = message.decode('utf-8', 'ignore').encode('utf-8')
try:
# Not using send_json to avoid possible deadlocks (see CFY-4866)
self._socket.send(json.dumps({
'context': self._context,
'message': message
}))
except Exception as e:
self._fallback_logger.warn(
'Error sending message to logging server. ({0}: {1})'
'[context={2}, message={3}]'
.format(type(e).__name__, e, self._context, message))
| |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Forcing functions for Navier-Stokes equations."""
# TODO(jamieas): change the signature for all forcing functions so that they
# close over `grid`.
import functools
from typing import Callable, Optional, Tuple
import jax.numpy as jnp
from jax_cfd.base import equations
from jax_cfd.base import filter_utils
from jax_cfd.base import grids
from jax_cfd.base import validation_problems
Array = grids.Array
GridArrayVector = grids.GridArrayVector
GridVariableVector = grids.GridVariableVector
ForcingFn = Callable[[GridVariableVector], GridArrayVector]
def taylor_green_forcing(
grid: grids.Grid, scale: float = 1, k: int = 2,
) -> ForcingFn:
"""Constant driving forced in the form of Taylor-Green vorcities."""
u, v = validation_problems.TaylorGreen(
shape=grid.shape[:2], kx=k, ky=k).velocity()
# Put force on same offset, grid as velocity components
if grid.ndim == 2:
u = grids.GridArray(u.data * scale, u.offset, grid)
v = grids.GridArray(v.data * scale, v.offset, grid)
f = (u, v)
elif grid.ndim == 3:
# append z-dimension to u,v arrays
u_data = jnp.broadcast_to(jnp.expand_dims(u.data * scale, -1), grid.shape)
v_data = jnp.broadcast_to(jnp.expand_dims(v.data * scale, -1), grid.shape)
u = grids.GridArray(u_data, (1, 0.5, 0.5), grid)
v = grids.GridArray(v_data, (0.5, 1, 0.5), grid)
w = grids.GridArray(jnp.zeros_like(u.data), (0.5, 0.5, 1), grid)
f = (u, v, w)
else:
raise NotImplementedError
def forcing(v):
del v
return f
return forcing
def kolmogorov_forcing(
grid: grids.Grid,
scale: float = 1,
k: int = 2,
swap_xy: bool = False,
offsets: Optional[Tuple[Tuple[float]]] = None,
) -> ForcingFn:
"""Returns the Kolmogorov forcing function for turbulence in 2D."""
if offsets is None:
offsets = grid.cell_faces
if swap_xy:
x = grid.mesh(offsets[1])[0]
v = scale * grids.GridArray(jnp.sin(k * x), offsets[1], grid)
if grid.ndim == 2:
u = grids.GridArray(jnp.zeros_like(v.data), (1, 1/2), grid)
f = (u, v)
elif grid.ndim == 3:
u = grids.GridArray(jnp.zeros_like(v.data), (1, 1/2, 1/2), grid)
w = grids.GridArray(jnp.zeros_like(u.data), (1/2, 1/2, 1), grid)
f = (u, v, w)
else:
raise NotImplementedError
else:
y = grid.mesh(offsets[0])[1]
u = scale * grids.GridArray(jnp.sin(k * y), offsets[0], grid)
if grid.ndim == 2:
v = grids.GridArray(jnp.zeros_like(u.data), (1/2, 1), grid)
f = (u, v)
elif grid.ndim == 3:
v = grids.GridArray(jnp.zeros_like(u.data), (1/2, 1, 1/2), grid)
w = grids.GridArray(jnp.zeros_like(u.data), (1/2, 1/2, 1), grid)
f = (u, v, w)
else:
raise NotImplementedError
def forcing(v):
del v
return f
return forcing
def linear_forcing(grid, coefficient: float) -> ForcingFn:
"""Linear forcing, proportional to velocity."""
del grid
def forcing(v):
return tuple(coefficient * u.array for u in v)
return forcing
def no_forcing(grid):
"""Zero-valued forcing field for unforced simulations."""
del grid
def forcing(v):
return tuple(0 * u.array for u in v)
return forcing
def sum_forcings(*forcings: ForcingFn) -> ForcingFn:
"""Sum multiple forcing functions."""
def forcing(v):
return equations.sum_fields(*[forcing(v) for forcing in forcings])
return forcing
FORCING_FUNCTIONS = dict(kolmogorov=kolmogorov_forcing,
taylor_green=taylor_green_forcing)
def simple_turbulence_forcing(
grid: grids.Grid,
constant_magnitude: float = 0,
constant_wavenumber: int = 2,
linear_coefficient: float = 0,
forcing_type: str = 'kolmogorov',
) -> ForcingFn:
"""Returns a forcing function for turbulence in 2D or 3D.
2D turbulence needs a driving force injecting energy at intermediate
length-scales, and a damping force at long length-scales to avoid all energy
accumulating in giant vorticies. This can be achieved with
`constant_magnitude > 0` and `linear_coefficient < 0`.
3D turbulence only needs a driving force at the longest length-scale (damping
happens at the smallest length-scales due to viscosity and/or numerical
dispersion). This can be achieved with `constant_magnitude = 0` and
`linear_coefficient > 0`.
Args:
grid: grid on which to simulate.
constant_magnitude: magnitude for constant forcing with Taylor-Green
vortices.
constant_wavenumber: wavenumber for constant forcing with Taylor-Green
vortices.
linear_coefficient: forcing coefficient proportional to velocity, for
either driving or damping based on the sign.
forcing_type: String that specifies forcing. This must specify the name of
function declared in FORCING_FUNCTIONS (taylor_green, etc.)
Returns:
Forcing function.
"""
linear_force = linear_forcing(grid, linear_coefficient)
constant_force_fn = FORCING_FUNCTIONS.get(forcing_type)
if constant_force_fn is None:
raise ValueError('Unknown `forcing_type`. '
f'Expected one of {list(FORCING_FUNCTIONS.keys())}; '
f'got {forcing_type}.')
constant_force = constant_force_fn(grid, constant_magnitude,
constant_wavenumber)
return sum_forcings(linear_force, constant_force)
def filtered_forcing(
spectral_density: Callable[[Array], Array],
grid: grids.Grid,
) -> ForcingFn:
"""Apply forcing as a function of angular frequency.
Args:
spectral_density: if `x_hat` is a Fourier component of the velocity with
angular frequency `k` then the forcing applied to `x_hat` is
`spectral_density(k)`.
grid: object representing spatial discretization.
Returns:
A forcing function that applies filtered forcing.
"""
def forcing(v):
filter_ = grids.applied(
functools.partial(filter_utils.filter, spectral_density, grid=grid))
return tuple(filter_(u.array) for u in v)
return forcing
def filtered_linear_forcing(
lower_wavenumber: float,
upper_wavenumber: float,
coefficient: float,
grid: grids.Grid,
) -> ForcingFn:
"""Apply linear forcing to low frequency components of the velocity field.
Args:
lower_wavenumber: the minimum wavenumber to which forcing should be
applied.
upper_wavenumber: the maximum wavenumber to which forcing should be
applied.
coefficient: the linear coefficient for forcing applied to components with
wavenumber below `threshold`.
grid: object representing spatial discretization.
Returns:
A forcing function that applies filtered linear forcing.
"""
def spectral_density(k):
return jnp.where(((k >= 2 * jnp.pi * lower_wavenumber) &
(k <= 2 * jnp.pi * upper_wavenumber)),
coefficient,
0)
return filtered_forcing(spectral_density, grid)
| |
import pytest
from django.conf.urls import include, url
from django.test import override_settings
from rest_framework import serializers, status, versioning
from rest_framework.decorators import APIView
from rest_framework.relations import PKOnlyObject
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.test import APIRequestFactory, APITestCase
from rest_framework.versioning import NamespaceVersioning
@override_settings(ROOT_URLCONF='tests.test_versioning')
class URLPatternsTestCase(APITestCase):
"""
Isolates URL patterns used during testing on the test class itself.
For example:
class MyTestCase(URLPatternsTestCase):
urlpatterns = [
...
]
def test_something(self):
...
"""
def setUp(self):
global urlpatterns
urlpatterns = self.urlpatterns
def tearDown(self):
global urlpatterns
urlpatterns = []
class RequestVersionView(APIView):
def get(self, request, *args, **kwargs):
return Response({'version': request.version})
class ReverseView(APIView):
def get(self, request, *args, **kwargs):
return Response({'url': reverse('another', request=request)})
class AllowedVersionsView(RequestVersionView):
def determine_version(self, request, *args, **kwargs):
scheme = self.versioning_class()
scheme.allowed_versions = ('v1', 'v2')
return (scheme.determine_version(request, *args, **kwargs), scheme)
class AllowedAndDefaultVersionsView(RequestVersionView):
def determine_version(self, request, *args, **kwargs):
scheme = self.versioning_class()
scheme.allowed_versions = ('v1', 'v2')
scheme.default_version = 'v2'
return (scheme.determine_version(request, *args, **kwargs), scheme)
class AllowedWithNoneVersionsView(RequestVersionView):
def determine_version(self, request, *args, **kwargs):
scheme = self.versioning_class()
scheme.allowed_versions = ('v1', 'v2', None)
return (scheme.determine_version(request, *args, **kwargs), scheme)
class AllowedWithNoneAndDefaultVersionsView(RequestVersionView):
def determine_version(self, request, *args, **kwargs):
scheme = self.versioning_class()
scheme.allowed_versions = ('v1', 'v2', None)
scheme.default_version = 'v2'
return (scheme.determine_version(request, *args, **kwargs), scheme)
factory = APIRequestFactory()
def dummy_view(request):
pass
def dummy_pk_view(request, pk):
pass
class TestRequestVersion:
def test_unversioned(self):
view = RequestVersionView.as_view()
request = factory.get('/endpoint/')
response = view(request)
assert response.data == {'version': None}
def test_query_param_versioning(self):
scheme = versioning.QueryParameterVersioning
view = RequestVersionView.as_view(versioning_class=scheme)
request = factory.get('/endpoint/?version=1.2.3')
response = view(request)
assert response.data == {'version': '1.2.3'}
request = factory.get('/endpoint/')
response = view(request)
assert response.data == {'version': None}
@override_settings(ALLOWED_HOSTS=['*'])
def test_host_name_versioning(self):
scheme = versioning.HostNameVersioning
view = RequestVersionView.as_view(versioning_class=scheme)
request = factory.get('/endpoint/', HTTP_HOST='v1.example.org')
response = view(request)
assert response.data == {'version': 'v1'}
request = factory.get('/endpoint/')
response = view(request)
assert response.data == {'version': None}
def test_accept_header_versioning(self):
scheme = versioning.AcceptHeaderVersioning
view = RequestVersionView.as_view(versioning_class=scheme)
request = factory.get('/endpoint/', HTTP_ACCEPT='application/json; version=1.2.3')
response = view(request)
assert response.data == {'version': '1.2.3'}
request = factory.get('/endpoint/', HTTP_ACCEPT='*/*; version=1.2.3')
response = view(request)
assert response.data == {'version': '1.2.3'}
request = factory.get('/endpoint/', HTTP_ACCEPT='application/json')
response = view(request)
assert response.data == {'version': None}
def test_url_path_versioning(self):
scheme = versioning.URLPathVersioning
view = RequestVersionView.as_view(versioning_class=scheme)
request = factory.get('/1.2.3/endpoint/')
response = view(request, version='1.2.3')
assert response.data == {'version': '1.2.3'}
request = factory.get('/endpoint/')
response = view(request)
assert response.data == {'version': None}
def test_namespace_versioning(self):
class FakeResolverMatch:
namespace = 'v1'
scheme = versioning.NamespaceVersioning
view = RequestVersionView.as_view(versioning_class=scheme)
request = factory.get('/v1/endpoint/')
request.resolver_match = FakeResolverMatch
response = view(request, version='v1')
assert response.data == {'version': 'v1'}
request = factory.get('/endpoint/')
response = view(request)
assert response.data == {'version': None}
class TestURLReversing(URLPatternsTestCase):
included = [
url(r'^namespaced/$', dummy_view, name='another'),
url(r'^example/(?P<pk>\d+)/$', dummy_pk_view, name='example-detail')
]
urlpatterns = [
url(r'^v1/', include((included, 'v1'), namespace='v1')),
url(r'^another/$', dummy_view, name='another'),
url(r'^(?P<version>[v1|v2]+)/another/$', dummy_view, name='another'),
]
def test_reverse_unversioned(self):
view = ReverseView.as_view()
request = factory.get('/endpoint/')
response = view(request)
assert response.data == {'url': 'http://testserver/another/'}
def test_reverse_query_param_versioning(self):
scheme = versioning.QueryParameterVersioning
view = ReverseView.as_view(versioning_class=scheme)
request = factory.get('/endpoint/?version=v1')
response = view(request)
assert response.data == {'url': 'http://testserver/another/?version=v1'}
request = factory.get('/endpoint/')
response = view(request)
assert response.data == {'url': 'http://testserver/another/'}
@override_settings(ALLOWED_HOSTS=['*'])
def test_reverse_host_name_versioning(self):
scheme = versioning.HostNameVersioning
view = ReverseView.as_view(versioning_class=scheme)
request = factory.get('/endpoint/', HTTP_HOST='v1.example.org')
response = view(request)
assert response.data == {'url': 'http://v1.example.org/another/'}
request = factory.get('/endpoint/')
response = view(request)
assert response.data == {'url': 'http://testserver/another/'}
def test_reverse_url_path_versioning(self):
scheme = versioning.URLPathVersioning
view = ReverseView.as_view(versioning_class=scheme)
request = factory.get('/v1/endpoint/')
response = view(request, version='v1')
assert response.data == {'url': 'http://testserver/v1/another/'}
request = factory.get('/endpoint/')
response = view(request)
assert response.data == {'url': 'http://testserver/another/'}
def test_reverse_namespace_versioning(self):
class FakeResolverMatch:
namespace = 'v1'
scheme = versioning.NamespaceVersioning
view = ReverseView.as_view(versioning_class=scheme)
request = factory.get('/v1/endpoint/')
request.resolver_match = FakeResolverMatch
response = view(request, version='v1')
assert response.data == {'url': 'http://testserver/v1/namespaced/'}
request = factory.get('/endpoint/')
response = view(request)
assert response.data == {'url': 'http://testserver/another/'}
class TestInvalidVersion:
def test_invalid_query_param_versioning(self):
scheme = versioning.QueryParameterVersioning
view = AllowedVersionsView.as_view(versioning_class=scheme)
request = factory.get('/endpoint/?version=v3')
response = view(request)
assert response.status_code == status.HTTP_404_NOT_FOUND
@override_settings(ALLOWED_HOSTS=['*'])
def test_invalid_host_name_versioning(self):
scheme = versioning.HostNameVersioning
view = AllowedVersionsView.as_view(versioning_class=scheme)
request = factory.get('/endpoint/', HTTP_HOST='v3.example.org')
response = view(request)
assert response.status_code == status.HTTP_404_NOT_FOUND
def test_invalid_accept_header_versioning(self):
scheme = versioning.AcceptHeaderVersioning
view = AllowedVersionsView.as_view(versioning_class=scheme)
request = factory.get('/endpoint/', HTTP_ACCEPT='application/json; version=v3')
response = view(request)
assert response.status_code == status.HTTP_406_NOT_ACCEPTABLE
def test_invalid_url_path_versioning(self):
scheme = versioning.URLPathVersioning
view = AllowedVersionsView.as_view(versioning_class=scheme)
request = factory.get('/v3/endpoint/')
response = view(request, version='v3')
assert response.status_code == status.HTTP_404_NOT_FOUND
def test_invalid_namespace_versioning(self):
class FakeResolverMatch:
namespace = 'v3'
scheme = versioning.NamespaceVersioning
view = AllowedVersionsView.as_view(versioning_class=scheme)
request = factory.get('/v3/endpoint/')
request.resolver_match = FakeResolverMatch
response = view(request, version='v3')
assert response.status_code == status.HTTP_404_NOT_FOUND
class TestAllowedAndDefaultVersion:
def test_missing_without_default(self):
scheme = versioning.AcceptHeaderVersioning
view = AllowedVersionsView.as_view(versioning_class=scheme)
request = factory.get('/endpoint/', HTTP_ACCEPT='application/json')
response = view(request)
assert response.status_code == status.HTTP_406_NOT_ACCEPTABLE
def test_missing_with_default(self):
scheme = versioning.AcceptHeaderVersioning
view = AllowedAndDefaultVersionsView.as_view(versioning_class=scheme)
request = factory.get('/endpoint/', HTTP_ACCEPT='application/json')
response = view(request)
assert response.status_code == status.HTTP_200_OK
assert response.data == {'version': 'v2'}
def test_with_default(self):
scheme = versioning.AcceptHeaderVersioning
view = AllowedAndDefaultVersionsView.as_view(versioning_class=scheme)
request = factory.get('/endpoint/',
HTTP_ACCEPT='application/json; version=v2')
response = view(request)
assert response.status_code == status.HTTP_200_OK
def test_missing_without_default_but_none_allowed(self):
scheme = versioning.AcceptHeaderVersioning
view = AllowedWithNoneVersionsView.as_view(versioning_class=scheme)
request = factory.get('/endpoint/', HTTP_ACCEPT='application/json')
response = view(request)
assert response.status_code == status.HTTP_200_OK
assert response.data == {'version': None}
def test_missing_with_default_and_none_allowed(self):
scheme = versioning.AcceptHeaderVersioning
view = AllowedWithNoneAndDefaultVersionsView.as_view(versioning_class=scheme)
request = factory.get('/endpoint/', HTTP_ACCEPT='application/json')
response = view(request)
assert response.status_code == status.HTTP_200_OK
assert response.data == {'version': 'v2'}
class TestHyperlinkedRelatedField(URLPatternsTestCase):
included = [
url(r'^namespaced/(?P<pk>\d+)/$', dummy_pk_view, name='namespaced'),
]
urlpatterns = [
url(r'^v1/', include((included, 'v1'), namespace='v1')),
url(r'^v2/', include((included, 'v2'), namespace='v2'))
]
def setUp(self):
super(TestHyperlinkedRelatedField, self).setUp()
class MockQueryset(object):
def get(self, pk):
return 'object %s' % pk
self.field = serializers.HyperlinkedRelatedField(
view_name='namespaced',
queryset=MockQueryset()
)
request = factory.get('/')
request.versioning_scheme = NamespaceVersioning()
request.version = 'v1'
self.field._context = {'request': request}
def test_bug_2489(self):
assert self.field.to_internal_value('/v1/namespaced/3/') == 'object 3'
with pytest.raises(serializers.ValidationError):
self.field.to_internal_value('/v2/namespaced/3/')
class TestNamespaceVersioningHyperlinkedRelatedFieldScheme(URLPatternsTestCase):
nested = [
url(r'^namespaced/(?P<pk>\d+)/$', dummy_pk_view, name='nested'),
]
included = [
url(r'^namespaced/(?P<pk>\d+)/$', dummy_pk_view, name='namespaced'),
url(r'^nested/', include((nested, 'nested-namespace'), namespace='nested-namespace'))
]
urlpatterns = [
url(r'^v1/', include((included, 'restframeworkv1'), namespace='v1')),
url(r'^v2/', include((included, 'restframeworkv2'), namespace='v2')),
url(r'^non-api/(?P<pk>\d+)/$', dummy_pk_view, name='non-api-view')
]
def _create_field(self, view_name, version):
request = factory.get("/")
request.versioning_scheme = NamespaceVersioning()
request.version = version
field = serializers.HyperlinkedRelatedField(
view_name=view_name,
read_only=True)
field._context = {'request': request}
return field
def test_api_url_is_properly_reversed_with_v1(self):
field = self._create_field('namespaced', 'v1')
assert field.to_representation(PKOnlyObject(3)) == 'http://testserver/v1/namespaced/3/'
def test_api_url_is_properly_reversed_with_v2(self):
field = self._create_field('namespaced', 'v2')
assert field.to_representation(PKOnlyObject(5)) == 'http://testserver/v2/namespaced/5/'
def test_api_url_is_properly_reversed_with_nested(self):
field = self._create_field('nested', 'v1:nested-namespace')
assert field.to_representation(PKOnlyObject(3)) == 'http://testserver/v1/nested/namespaced/3/'
def test_non_api_url_is_properly_reversed_regardless_of_the_version(self):
"""
Regression test for #2711
"""
field = self._create_field('non-api-view', 'v1')
assert field.to_representation(PKOnlyObject(10)) == 'http://testserver/non-api/10/'
field = self._create_field('non-api-view', 'v2')
assert field.to_representation(PKOnlyObject(10)) == 'http://testserver/non-api/10/'
| |
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo.config import cfg
from oslotest import base as test_base
from fuel_agent import errors
from fuel_agent import manager
from fuel_agent.objects import partition
from fuel_agent.tests import test_nailgun
from fuel_agent.utils import artifact_utils as au
from fuel_agent.utils import fs_utils as fu
from fuel_agent.utils import hardware_utils as hu
from fuel_agent.utils import lvm_utils as lu
from fuel_agent.utils import md_utils as mu
from fuel_agent.utils import partition_utils as pu
from fuel_agent.utils import utils
CONF = cfg.CONF
class TestManager(test_base.BaseTestCase):
def setUp(self):
super(TestManager, self).setUp()
self.mgr = manager.Manager(test_nailgun.PROVISION_SAMPLE_DATA)
@mock.patch.object(hu, 'list_block_devices')
def test_do_parsing(self, mock_lbd):
mock_lbd.return_value = test_nailgun.LIST_BLOCK_DEVICES_SAMPLE
self.mgr.do_parsing()
#NOTE(agordeev): there's no need for deeper assertions as all schemes
# thoroughly tested in test_nailgun
self.assertFalse(self.mgr.partition_scheme is None)
self.assertFalse(self.mgr.configdrive_scheme is None)
self.assertFalse(self.mgr.image_scheme is None)
@mock.patch.object(utils, 'execute')
@mock.patch.object(mu, 'mdclean_all')
@mock.patch.object(lu, 'lvremove_all')
@mock.patch.object(lu, 'vgremove_all')
@mock.patch.object(lu, 'pvremove_all')
@mock.patch.object(fu, 'make_fs')
@mock.patch.object(lu, 'lvcreate')
@mock.patch.object(lu, 'vgcreate')
@mock.patch.object(lu, 'pvcreate')
@mock.patch.object(mu, 'mdcreate')
@mock.patch.object(pu, 'set_gpt_type')
@mock.patch.object(pu, 'set_partition_flag')
@mock.patch.object(pu, 'make_partition')
@mock.patch.object(pu, 'make_label')
@mock.patch.object(hu, 'list_block_devices')
def test_do_partitioning(self, mock_hu_lbd, mock_pu_ml, mock_pu_mp,
mock_pu_spf, mock_pu_sgt, mock_mu_m, mock_lu_p,
mock_lu_v, mock_lu_l, mock_fu_mf, mock_pvr,
mock_vgr, mock_lvr, mock_mdr, mock_exec):
mock_hu_lbd.return_value = test_nailgun.LIST_BLOCK_DEVICES_SAMPLE
self.mgr.do_parsing()
self.mgr.do_partitioning()
mock_pu_ml_expected_calls = [mock.call('/dev/sda', 'gpt'),
mock.call('/dev/sdb', 'gpt'),
mock.call('/dev/sdc', 'gpt')]
self.assertEqual(mock_pu_ml_expected_calls, mock_pu_ml.call_args_list)
mock_pu_mp_expected_calls = [
mock.call('/dev/sda', 1, 25, 'primary'),
mock.call('/dev/sda', 25, 225, 'primary'),
mock.call('/dev/sda', 225, 425, 'primary'),
mock.call('/dev/sda', 425, 625, 'primary'),
mock.call('/dev/sda', 625, 20063, 'primary'),
mock.call('/dev/sda', 20063, 65660, 'primary'),
mock.call('/dev/sda', 65660, 65680, 'primary'),
mock.call('/dev/sdb', 1, 25, 'primary'),
mock.call('/dev/sdb', 25, 225, 'primary'),
mock.call('/dev/sdb', 225, 425, 'primary'),
mock.call('/dev/sdb', 425, 65396, 'primary'),
mock.call('/dev/sdc', 1, 25, 'primary'),
mock.call('/dev/sdc', 25, 225, 'primary'),
mock.call('/dev/sdc', 225, 425, 'primary'),
mock.call('/dev/sdc', 425, 65396, 'primary')]
self.assertEqual(mock_pu_mp_expected_calls, mock_pu_mp.call_args_list)
mock_pu_spf_expected_calls = [mock.call('/dev/sda', 1, 'bios_grub'),
mock.call('/dev/sdb', 1, 'bios_grub'),
mock.call('/dev/sdc', 1, 'bios_grub')]
self.assertEqual(mock_pu_spf_expected_calls,
mock_pu_spf.call_args_list)
mock_pu_sgt_expected_calls = [mock.call('/dev/sda', 4, 'fake_guid')]
self.assertEqual(mock_pu_sgt_expected_calls,
mock_pu_sgt.call_args_list)
mock_mu_m_expected_calls = [mock.call('/dev/md0', 'mirror',
'/dev/sda3', '/dev/sdb3',
'/dev/sdc3')]
self.assertEqual(mock_mu_m_expected_calls, mock_mu_m.call_args_list)
mock_lu_p_expected_calls = [
mock.call('/dev/sda5', metadatasize=28, metadatacopies=2),
mock.call('/dev/sda6', metadatasize=28, metadatacopies=2),
mock.call('/dev/sdb4', metadatasize=28, metadatacopies=2),
mock.call('/dev/sdc4', metadatasize=28, metadatacopies=2)]
self.assertEqual(mock_lu_p_expected_calls, mock_lu_p.call_args_list)
mock_lu_v_expected_calls = [mock.call('os', '/dev/sda5'),
mock.call('image', '/dev/sda6',
'/dev/sdb4', '/dev/sdc4')]
self.assertEqual(mock_lu_v_expected_calls, mock_lu_v.call_args_list)
mock_lu_l_expected_calls = [mock.call('os', 'root', 15360),
mock.call('os', 'swap', 4014),
mock.call('image', 'glance', 175347)]
self.assertEqual(mock_lu_l_expected_calls, mock_lu_l.call_args_list)
mock_fu_mf_expected_calls = [
mock.call('ext2', '', '', '/dev/md0'),
mock.call('ext2', '', '', '/dev/sda4'),
mock.call('ext4', '', '', '/dev/mapper/os-root'),
mock.call('swap', '', '', '/dev/mapper/os-swap'),
mock.call('xfs', '', '', '/dev/mapper/image-glance')]
self.assertEqual(mock_fu_mf_expected_calls, mock_fu_mf.call_args_list)
@mock.patch.object(utils, 'execute')
@mock.patch.object(utils, 'render_and_save')
@mock.patch.object(hu, 'list_block_devices')
def test_do_configdrive(self, mock_lbd, mock_u_ras, mock_u_e):
mock_lbd.return_value = test_nailgun.LIST_BLOCK_DEVICES_SAMPLE
self.mgr.do_parsing()
self.assertEqual(1, len(self.mgr.image_scheme.images))
self.mgr.do_configdrive()
mock_u_ras_expected_calls = [
mock.call(CONF.nc_template_path,
['cloud_config_pro_fi-le.jinja2',
'cloud_config_pro.jinja2',
'cloud_config_pro_fi.jinja2',
'cloud_config.jinja2'],
mock.ANY, '%s/%s' % (CONF.tmp_path, 'cloud_config.txt')),
mock.call(CONF.nc_template_path,
['boothook_pro_fi-le.jinja2',
'boothook_pro.jinja2',
'boothook_pro_fi.jinja2',
'boothook.jinja2'],
mock.ANY, '%s/%s' % (CONF.tmp_path, 'boothook.txt')),
mock.call(CONF.nc_template_path,
['meta-data_pro_fi-le.jinja2',
'meta-data_pro.jinja2',
'meta-data_pro_fi.jinja2',
'meta-data.jinja2'],
mock.ANY, '%s/%s' % (CONF.tmp_path, 'meta-data'))]
self.assertEqual(mock_u_ras_expected_calls, mock_u_ras.call_args_list)
mock_u_e_expected_calls = [
mock.call('write-mime-multipart',
'--output=%s' % ('%s/%s' % (CONF.tmp_path, 'user-data')),
'%s:text/cloud-boothook' % ('%s/%s' % (CONF.tmp_path,
'boothook.txt')),
'%s:text/cloud-config' % ('%s/%s' % (CONF.tmp_path,
'cloud_config.txt'))
),
mock.call('genisoimage', '-output', CONF.config_drive_path,
'-volid', 'cidata', '-joliet', '-rock',
'%s/%s' % (CONF.tmp_path, 'user-data'),
'%s/%s' % (CONF.tmp_path, 'meta-data'))]
self.assertEqual(mock_u_e_expected_calls, mock_u_e.call_args_list)
self.assertEqual(2, len(self.mgr.image_scheme.images))
cf_drv_img = self.mgr.image_scheme.images[-1]
self.assertEqual('file://%s' % CONF.config_drive_path, cf_drv_img.uri)
self.assertEqual('/dev/sda7',
self.mgr.partition_scheme.configdrive_device())
self.assertEqual('iso9660', cf_drv_img.format)
self.assertEqual('raw', cf_drv_img.container)
@mock.patch.object(partition.PartitionScheme, 'configdrive_device')
@mock.patch.object(utils, 'execute')
@mock.patch.object(utils, 'render_and_save')
@mock.patch.object(hu, 'list_block_devices')
def test_do_configdrive_no_configdrive_device(self, mock_lbd, mock_u_ras,
mock_u_e, mock_p_ps_cd):
mock_lbd.return_value = test_nailgun.LIST_BLOCK_DEVICES_SAMPLE
self.mgr.do_parsing()
mock_p_ps_cd.return_value = None
self.assertRaises(errors.WrongPartitionSchemeError,
self.mgr.do_configdrive)
@mock.patch.object(au, 'GunzipStream')
@mock.patch.object(au, 'LocalFile')
@mock.patch.object(au, 'HttpUrl')
@mock.patch.object(au, 'Chain')
@mock.patch.object(utils, 'execute')
@mock.patch.object(utils, 'render_and_save')
@mock.patch.object(hu, 'list_block_devices')
def test_do_copyimage(self, mock_lbd, mock_u_ras, mock_u_e, mock_au_c,
mock_au_h, mock_au_l, mock_au_g):
class FakeChain(object):
processors = []
def append(self, thing):
self.processors.append(thing)
def process(self):
pass
mock_lbd.return_value = test_nailgun.LIST_BLOCK_DEVICES_SAMPLE
mock_au_c.return_value = FakeChain()
self.mgr.do_parsing()
self.mgr.do_configdrive()
self.mgr.do_copyimage()
imgs = self.mgr.image_scheme.images
self.assertEqual(2, len(imgs))
expected_processors_list = []
for img in imgs[:-1]:
expected_processors_list += [
img.uri,
au.HttpUrl,
au.GunzipStream,
img.target_device
]
expected_processors_list += [
imgs[-1].uri,
au.LocalFile,
imgs[-1].target_device
]
self.assertEqual(expected_processors_list,
mock_au_c.return_value.processors)
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
class BTree:
class Node:
def __init__(self):
self.sons = []
self.keys = []
def __repr__(self):
return 'Node' + str(self.keys) + str(self.sons)
def _lower_bound(self, key):
b = 0
e = len(self.sons) - 1
while b < e:
mid = (b + e + 1) // 2
if mid == 0: # mid is never 0 actually
pass
elif self.keys[mid - 1] <= key:
b = mid
else:
e = mid - 1
return b
def __init__(self, t):
self.root = self.Node()
self.t = t
def _inorder(self, cur):
if cur == None: return
for i, son in enumerate(cur.sons):
if i > 0:
yield cur.keys[i - 1]
yield from self._inorder(son)
def inorder(self):
yield from self._inorder(self.root)
def _preorder(self, cur):
if cur == None: return
for key in cur.keys:
yield key
for son in cur.sons:
yield from self._preorder(son)
def preorder(self):
yield from self._preorder(self.root)
def _split(self, node, parnode, pos):
# root case
if parnode is None:
self.root = self.Node()
left = self.Node()
right = self.Node()
left.keys = node.keys[:self.t - 1]
right.keys = node.keys[self.t:]
left.sons = node.sons[:self.t]
right.sons = node.sons[self.t:]
self.root.keys = [ node.keys[self.t - 1] ]
self.root.sons = [left, right]
return self.root
else:
left = self.Node()
right = self.Node()
left.keys = node.keys[:self.t - 1]
right.keys = node.keys[self.t:]
left.sons = node.sons[:self.t]
right.sons = node.sons[self.t:]
parnode.keys = parnode.keys[:pos] + [ node.keys[self.t - 1] ] + parnode.keys[pos:]
parnode.sons = parnode.sons[:pos] + [left, right] + parnode.sons[pos + 1:]
def _insert(self, key, node, parnode):
if node is None: return None
# node is full, and must be root
if len(node.keys) == 2 * self.t - 1:
assert node == self.root
node = self._split(node, parnode, -1)
assert len(node.keys) == 1
# to the right
if node.keys[0] <= key:
self._insert(key, node.sons[1], node)
else:
self._insert(key, node.sons[0], node)
return
# only possible for root at the beginning
if len(node.sons) == 0:
assert node == self.root
node.sons.append(None)
node.keys.append(key)
node.sons.append(None)
return
pos = node._lower_bound(key)
# we are in a leaf
if node.sons[pos] is None:
node.keys = node.keys[:pos] + [key] + node.keys[pos:]
node.sons.append(None)
else:
# son is full, doing split from here
if node.sons[pos] is not None and len(node.sons[pos].keys) == 2 * self.t - 1:
self._split(node.sons[pos], node, pos)
# go to right
if node.keys[pos] <= key:
self._insert(key, node.sons[pos + 1], node)
else:
self._insert(key, node.sons[pos], node)
else:
self._insert(key, node.sons[pos], node)
def insert(self, key):
self._insert(key, self.root, None)
def _find(self, key, node):
if node is None or len(node.sons) == 0:
return None
pos = node._lower_bound(key)
if pos >= 1 and node.keys[pos - 1] == key:
return node.keys[pos - 1]
else:
return self._find(key, node.sons[pos])
def find(self, key):
return self._find(key, self.root)
def _find_predecessor(self, key, node):
if node.sons[0] == None:
return node.keys[-1]
else:
return self._find_predecessor(key, node.sons[-1])
def _find_succesor(self, key, node):
if node.sons[0] == None:
return node.keys[0]
else:
return self._find_succesor(key, node.sons[0])
def _delete_key_leaf(self, key, node, pos):
# condition for correctness of algorithm
assert node == self.root or len(node.sons) >= self.t
assert node.keys[pos] == key
node.keys = node.keys[:pos] + node.keys[pos + 1:]
node.sons.pop()
def _merge_children_around_key(self, key, node, pos):
assert pos >= 0 and pos < len(node.sons) - 1
y = self.Node()
y.sons = node.sons[pos].sons + node.sons[pos + 1].sons
y.keys = node.sons[pos].keys + [node.keys[pos]] + node.sons[pos + 1].keys
node.keys = node.keys[:pos] + node.keys[pos + 1:]
node.sons = node.sons[:pos] + [y] + node.sons[pos + 2:]
def _move_node_from_left_child(self, node, pos):
assert pos > 0 and len(node.sons[pos - 1].keys) >= self.t
node.sons[pos].keys = [node.keys[pos - 1] ] + node.sons[pos].keys
node.sons[pos].sons = [ node.sons[pos - 1].sons[-1] ] + node.sons[pos].sons
node.keys[pos - 1] = node.sons[pos - 1].keys[-1]
node.sons[pos - 1].sons = node.sons[pos - 1].sons[:-1]
node.sons[pos - 1].keys = node.sons[pos - 1].keys[:-1]
def _move_node_from_right_child(self, node, pos):
assert pos < len(node.sons) - 1 and len(node.sons[pos + 1].keys) >= self.t
node.sons[pos].keys = node.sons[pos].keys + [node.keys[pos] ]
node.sons[pos].sons = node.sons[pos].sons + [ node.sons[pos + 1].sons[0] ]
node.keys[pos] = node.sons[pos + 1].keys[0]
node.sons[pos + 1].sons = node.sons[pos + 1].sons[1:]
node.sons[pos + 1].keys = node.sons[pos + 1].keys[1:]
def _fix_empty_root(self, node):
if node == self.root and len(node.sons) == 1:
self.root = node.sons[0]
return self.root
else:
return node
def _delete(self, key, node):
if node is None or len(node.sons) == 0: return
pos = node._lower_bound(key)
# the key to delete is here
if pos > 0 and node.keys[pos - 1] == key:
# this node is a leaf
if node.sons[pos] is None:
self._delete_key_leaf(key, node, pos - 1)
# left child node has enough keys
elif len(node.sons[pos - 1].keys) >= self.t:
kp = self._find_predecessor(key, node.sons[pos - 1])
node.keys[pos - 1] = kp
self._delete(kp, node.sons[pos - 1])
# right child node has enough keys
elif len(node.sons[pos].keys) >= self.t:
kp = self._find_succesor(key, node.sons[pos])
node.keys[pos - 1] = kp
self._delete(kp, node.sons[pos])
# both children have minimal number of keys, must combine them
else:
self._merge_children_around_key(key, node, pos - 1)
# here I should take care of missing root
node = self._fix_empty_root(node)
self._delete(key, node)
else:
# we are on a leave and haven't found the key, we have nothing to do
if node.sons[pos] is None:
pass
# the amount of keys in the child is enough, simply recurse
elif len(node.sons[pos].keys) >= self.t:
self._delete(key, node.sons[pos])
# we must push a key to the child
else:
# left sibbling has enough keys
if pos > 0 and len(node.sons[pos - 1].keys) >= self.t:
self._move_node_from_left_child(node, pos)
self._delete(key, node.sons[pos])
# right sibbling has enough keys
elif pos < len(node.sons) - 1 and len(node.sons[pos + 1].keys) >= self.t:
self._move_node_from_right_child(node, pos)
self._delete(key, node.sons[pos])
# must merge with one of sibblings
else:
if pos > 0:
self._merge_children_around_key(key, node, pos - 1)
# here I should take care of missing root
node = self._fix_empty_root(node)
self._delete(key, node)
elif pos < len(node.sons) - 1:
self._merge_children_around_key(key, node, pos)
# here I should take care of missing root
node = self._fix_empty_root(node)
self._delete(key, node)
# this shouldn't be possible
else:
assert False
def delete(self, key):
self._delete(key, self.root)
def _find_all(self, key, node, ans):
if node is None or len(node.sons) == 0: return
b = 0
e = len(node.sons) - 1
while b < e:
mid = (b + e + 1) // 2
if mid == 0: # mid is never 0 actually
pass
elif node.keys[mid - 1] < key:
b = mid
else:
e = mid - 1
left = b
b = 0
e = len(node.sons) - 1
while b < e:
mid = (b + e + 1) // 2
if mid == 0: # mid is never 0 actually
pass
elif node.keys[mid - 1] > key:
e = mid - 1
else:
b = mid
right = b
# print(left, right, len(node.sons))
for i in range(left, right + 1):
self._find_all(key, node.sons[i], ans)
if i < right:
assert node.keys[i] == key
ans.append(node.keys[i])
def find_all(self, key):
ans = []
self._find_all(key, self.root, ans)
return ans
def dummy_test0():
T = BTree(6)
rng = list(range(9000))
shuffle(rng)
for i in rng:
T.insert(i)
#print(T.root, '\n')
for i in range(9):
print(T.find(i), '\n')
def dummy_test1():
T = BTree(3)
for i in range(9):
T.insert(i)
print(T.root)
T.delete(5)
print(T.root)
T.delete(4)
print(T.root)
T.insert(100)
T.insert(101)
T.insert(3)
T.insert(3)
T.insert(3)
print(T.root)
T.delete(1)
print(T.root)
def dummy_test2():
T = BTree(3)
for _ in range(10):
T.insert(0)
T.insert(1)
T.insert(2)
T.insert(-1)
print(T.root)
ans = T.find_all(0)
print(len(ans), ans)
import random
import collections
def map_test():
'''
It's purpose is to compare againt map implementation
'''
seed = random.randint(0, 1000)
print('random seed %d' % seed)
# seed = 195
random.seed(seed)
num_tests = 10000
num_ops = 200
debug = False
for deg in range(2, 20):
for test in range(num_tests):
B = BTree(deg)
M = collections.defaultdict(int)
if debug: print('Beginning block of tests %d %d' % (deg, test))
for _ in range(num_ops):
if debug: print(B.root)
prob = random.random()
elem = random.randint(0, 10)
if prob < 1 / 3: # insert
if debug: print('insert %d' % elem)
B.insert(elem)
M[elem] += 1
elif prob < 1/3 + 1/3: # find
if debug: print('find %d' % elem)
r1 = (B.find(elem) != None)
r2 = (elem in M and M[elem] > 0)
if r1 != r2:
print(B.root)
print(r1, r2, elem)
assert r1 == r2
elif prob < 1/3 + 1/3 + 1/6: # findall
if debug: print('findall %d' % elem)
r1 = len(B.find_all(elem))
if elem not in M:
r2 = 0
else:
r2 = M[elem]
if r1 != r2:
print(B.root)
print(r1, r2, elem)
assert r1 == r2
else: # delete
if debug: print('delete %d' % elem)
if elem in M and M[elem] > 0:
M[elem] -= 1
B.delete(elem)
if debug: print('Block finished correctly')
def walk_test():
B = BTree(3)
for i in range(10):
B.insert(i)
print(B.root)
print(list(B.preorder()))
print(list(B.inorder()))
def dummy_tests():
walk_test()
def main(args):
'''
Testing BTree Implementation
'''
dummy_tests()
return 0
if __name__ == '__main__':
import sys
#sys.setrecursionlimit(10 ** 4)
sys.exit(main(sys.argv))
| |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2021 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Pre-training/Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) on a text file or a dataset.
Here is the full list of checkpoints on the hub that can be fine-tuned by this script:
https://huggingface.co/models?filter=text-generation
"""
# You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments.
import json
import logging
import math
import os
import sys
import time
from dataclasses import asdict, dataclass, field
from enum import Enum
from itertools import chain
from pathlib import Path
from typing import Callable, Optional
import datasets
import numpy as np
from datasets import Dataset, load_dataset
from tqdm import tqdm
import jax
import jax.numpy as jnp
import optax
import transformers
from flax import jax_utils, traverse_util
from flax.jax_utils import unreplicate
from flax.training import train_state
from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key
from huggingface_hub import Repository
from transformers import (
CONFIG_MAPPING,
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoTokenizer,
FlaxAutoModelForCausalLM,
HfArgumentParser,
is_tensorboard_available,
set_seed,
)
from transformers.file_utils import get_full_repo_name
from transformers.testing_utils import CaptureLogger
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(FLAX_MODEL_FOR_CAUSAL_LM_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class TrainingArguments:
output_dir: str = field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."},
)
overwrite_output_dir: bool = field(
default=False,
metadata={
"help": (
"Overwrite the content of the output directory. "
"Use this to continue training if output_dir points to a checkpoint directory."
)
},
)
do_train: bool = field(default=False, metadata={"help": "Whether to run training."})
do_eval: bool = field(default=False, metadata={"help": "Whether to run eval on the dev set."})
per_device_train_batch_size: int = field(
default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for training."}
)
per_device_eval_batch_size: int = field(
default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for evaluation."}
)
learning_rate: float = field(default=5e-5, metadata={"help": "The initial learning rate for AdamW."})
weight_decay: float = field(default=0.0, metadata={"help": "Weight decay for AdamW if we apply some."})
adam_beta1: float = field(default=0.9, metadata={"help": "Beta1 for AdamW optimizer"})
adam_beta2: float = field(default=0.999, metadata={"help": "Beta2 for AdamW optimizer"})
adam_epsilon: float = field(default=1e-8, metadata={"help": "Epsilon for AdamW optimizer."})
adafactor: bool = field(default=False, metadata={"help": "Whether or not to replace AdamW by Adafactor."})
num_train_epochs: float = field(default=3.0, metadata={"help": "Total number of training epochs to perform."})
warmup_steps: int = field(default=0, metadata={"help": "Linear warmup over warmup_steps."})
logging_steps: int = field(default=500, metadata={"help": "Log every X updates steps."})
save_steps: int = field(default=500, metadata={"help": "Save checkpoint every X updates steps."})
eval_steps: int = field(default=None, metadata={"help": "Run an evaluation every X steps."})
seed: int = field(default=42, metadata={"help": "Random seed that will be set at the beginning of training."})
push_to_hub: bool = field(
default=False, metadata={"help": "Whether or not to upload the trained model to the model hub after training."}
)
hub_model_id: str = field(
default=None, metadata={"help": "The name of the repository to keep in sync with the local `output_dir`."}
)
hub_token: str = field(default=None, metadata={"help": "The token to use to push to the Model Hub."})
def __post_init__(self):
if self.output_dir is not None:
self.output_dir = os.path.expanduser(self.output_dir)
def to_dict(self):
"""
Serializes this instance while replace `Enum` by their values (for JSON serialization support). It obfuscates
the token values by removing their value.
"""
d = asdict(self)
for k, v in d.items():
if isinstance(v, Enum):
d[k] = v.value
if isinstance(v, list) and len(v) > 0 and isinstance(v[0], Enum):
d[k] = [x.value for x in v]
if k.endswith("_token"):
d[k] = f"<{k.upper()}>"
return d
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization."
"Don't set if you want to train a model from scratch."
},
)
model_type: Optional[str] = field(
default=None,
metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
dtype: Optional[str] = field(
default="float32",
metadata={
"help": "Floating-point format in which the model weights should be initialized and trained. Choose one of `[float32, float16, bfloat16]`."
},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
validation_split_percentage: Optional[int] = field(
default=5,
metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
},
)
block_size: Optional[int] = field(
default=None,
metadata={
"help": "Optional input sequence length after tokenization. "
"The training dataset will be truncated in block of this size for training. "
"Default to the model max input length for single sentence inputs (take into account special tokens)."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
keep_linebreaks: bool = field(
default=True, metadata={"help": "Whether to keep line breaks when using TXT files or not."}
)
def __post_init__(self):
if self.dataset_name is None and self.train_file is None and self.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
class TrainState(train_state.TrainState):
dropout_rng: jnp.ndarray
def replicate(self):
return jax_utils.replicate(self).replace(dropout_rng=shard_prng_key(self.dropout_rng))
def data_loader(rng: jax.random.PRNGKey, dataset: Dataset, batch_size: int, shuffle: bool = False):
"""
Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices.
Shuffle batches if `shuffle` is `True`.
"""
steps_per_epoch = len(dataset) // batch_size
if shuffle:
batch_idx = jax.random.permutation(rng, len(dataset))
else:
batch_idx = jnp.arange(len(dataset))
batch_idx = batch_idx[: steps_per_epoch * batch_size] # Skip incomplete batch.
batch_idx = batch_idx.reshape((steps_per_epoch, batch_size))
for idx in batch_idx:
batch = dataset[idx]
batch = {k: np.array(v) for k, v in batch.items()}
yield batch
def write_train_metric(summary_writer, train_metrics, train_time, step):
summary_writer.scalar("train_time", train_time, step)
train_metrics = get_metrics(train_metrics)
for key, vals in train_metrics.items():
tag = f"train_{key}"
for i, val in enumerate(vals):
summary_writer.scalar(tag, val, step - len(vals) + i + 1)
def write_eval_metric(summary_writer, eval_metrics, step):
for metric_name, value in eval_metrics.items():
summary_writer.scalar(f"eval_{metric_name}", value, step)
def create_learning_rate_fn(
train_ds_size: int, train_batch_size: int, num_train_epochs: int, num_warmup_steps: int, learning_rate: float
) -> Callable[[int], jnp.array]:
"""Returns a linear warmup, linear_decay learning rate function."""
steps_per_epoch = train_ds_size // train_batch_size
num_train_steps = steps_per_epoch * num_train_epochs
warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps)
decay_fn = optax.linear_schedule(
init_value=learning_rate, end_value=0, transition_steps=num_train_steps - num_warmup_steps
)
schedule_fn = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps])
return schedule_fn
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty."
"Use --overwrite_output_dir to overcome."
)
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR)
if jax.process_index() == 0:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# Set the verbosity to info of the Transformers logger (on main process only):
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
set_seed(training_args.seed)
# Handle the repository creation
if training_args.push_to_hub:
if training_args.hub_model_id is None:
repo_name = get_full_repo_name(
Path(training_args.output_dir).absolute().name, token=training_args.hub_token
)
else:
repo_name = training_args.hub_model_id
repo = Repository(training_args.output_dir, clone_from=repo_name)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
dataset = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, keep_in_memory=False
)
if "validation" not in dataset.keys():
dataset["validation"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir,
)
dataset["train"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir,
)
else:
data_files = {}
dataset_args = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.train_file.split(".")[-1]
if extension == "txt":
extension = "text"
dataset_args["keep_linebreaks"] = data_args.keep_linebreaks
dataset = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir, **dataset_args)
if "validation" not in dataset.keys():
dataset["validation"] = load_dataset(
extension,
data_files=data_files,
split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir,
**dataset_args,
)
dataset["train"] = load_dataset(
extension,
data_files=data_files,
split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir,
**dataset_args,
)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if model_args.model_name_or_path:
model = FlaxAutoModelForCausalLM.from_pretrained(
model_args.model_name_or_path, config=config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype)
)
else:
model = FlaxAutoModelForCausalLM.from_config(
config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype)
)
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
column_names = dataset["train"].column_names
else:
column_names = dataset["validation"].column_names
text_column_name = "text" if "text" in column_names else column_names[0]
# since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function
tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base")
def tokenize_function(examples):
with CaptureLogger(tok_logger) as cl:
output = tokenizer(examples[text_column_name])
# clm input could be much much longer than block_size
if "Token indices sequence length is longer than the" in cl.out:
tok_logger.warning(
"^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits before being passed to the model."
)
return output
tokenized_datasets = dataset.map(
tokenize_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
)
if data_args.block_size is None:
block_size = tokenizer.model_max_length
if block_size > config.max_position_embeddings:
logger.warning(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --block_size xxx."
)
block_size = 1024
else:
if data_args.block_size > tokenizer.model_max_length:
logger.warning(
f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model"
f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
)
block_size = min(data_args.block_size, tokenizer.model_max_length)
# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
if total_length >= block_size:
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
result["labels"] = result["input_ids"].copy()
return result
# Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder
# for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower
# to preprocess.
#
# To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
lm_datasets = tokenized_datasets.map(
group_texts,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
if training_args.do_train:
if "train" not in tokenized_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = lm_datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
if training_args.do_eval:
if "validation" not in tokenized_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = lm_datasets["validation"]
if data_args.max_eval_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
# Enable tensorboard only on the master node
has_tensorboard = is_tensorboard_available()
if has_tensorboard and jax.process_index() == 0:
try:
from flax.metrics.tensorboard import SummaryWriter
summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir))
except ImportError as ie:
has_tensorboard = False
logger.warning(
f"Unable to display metrics through TensorBoard because some package are not installed: {ie}"
)
else:
logger.warning(
"Unable to display metrics through TensorBoard because the package is not installed: "
"Please run pip install tensorboard to enable."
)
# Initialize our training
rng = jax.random.PRNGKey(training_args.seed)
rng, dropout_rng = jax.random.split(rng)
# Store some constant
num_epochs = int(training_args.num_train_epochs)
train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count()
eval_batch_size = int(training_args.per_device_eval_batch_size) * jax.device_count()
steps_per_epoch = len(train_dataset) // train_batch_size
total_train_steps = steps_per_epoch * num_epochs
# Create learning rate schedule
linear_decay_lr_schedule_fn = create_learning_rate_fn(
len(train_dataset),
train_batch_size,
training_args.num_train_epochs,
training_args.warmup_steps,
training_args.learning_rate,
)
# We use Optax's "masking" functionality to not apply weight decay
# to bias and LayerNorm scale parameters. decay_mask_fn returns a
# mask boolean with the same structure as the parameters.
# The mask is True for parameters that should be decayed.
# Note that this mask is specifically adapted for FlaxGPT2.
# For other models, one should correct the layer norm parameter naming
# accordingly.
def decay_mask_fn(params):
flat_params = traverse_util.flatten_dict(params)
flat_mask = {
path: (path[-1] != "bias" and path[-2:] not in [("ln_1", "scale"), ("ln_2", "scale"), ("ln_f", "scale")])
for path in flat_params
}
return traverse_util.unflatten_dict(flat_mask)
# create adam optimizer
if training_args.adafactor:
# We use the default parameters here to initialize adafactor,
# For more details about the parameters please check https://github.com/deepmind/optax/blob/ed02befef9bf81cbbf236be3d2b0e032e9ed4a40/optax/_src/alias.py#L74
optimizer = optax.adafactor(
learning_rate=linear_decay_lr_schedule_fn,
)
else:
optimizer = optax.adamw(
learning_rate=linear_decay_lr_schedule_fn,
b1=training_args.adam_beta1,
b2=training_args.adam_beta2,
eps=training_args.adam_epsilon,
weight_decay=training_args.weight_decay,
mask=decay_mask_fn,
)
# Setup train state
state = TrainState.create(apply_fn=model.__call__, params=model.params, tx=optimizer, dropout_rng=dropout_rng)
def loss_fn(logits, labels):
shift_logits = logits[..., :-1, :]
shift_labels = labels[..., 1:]
loss = optax.softmax_cross_entropy(shift_logits, onehot(shift_labels, shift_logits.shape[-1]))
return loss.mean()
# Define gradient update step fn
def train_step(state, batch):
dropout_rng, new_dropout_rng = jax.random.split(state.dropout_rng)
def compute_loss(params):
labels = batch.pop("labels")
logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0]
loss = loss_fn(logits, labels)
return loss
grad_fn = jax.value_and_grad(compute_loss)
loss, grad = grad_fn(state.params)
grad = jax.lax.pmean(grad, "batch")
new_state = state.apply_gradients(grads=grad, dropout_rng=new_dropout_rng)
metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)}
metrics = jax.lax.pmean(metrics, axis_name="batch")
return new_state, metrics
# Define eval fn
def eval_step(params, batch):
labels = batch.pop("labels")
logits = model(**batch, params=params, train=False)[0]
loss = loss_fn(logits, labels)
# summarize metrics
metrics = {"loss": loss}
metrics = jax.lax.pmean(metrics, axis_name="batch")
return metrics
# Create parallel version of the train and eval step
p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,))
p_eval_step = jax.pmap(eval_step, "batch")
# Replicate the train state on each device
state = state.replicate()
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {num_epochs}")
logger.info(f" Instantaneous batch size per device = {training_args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel & distributed) = {train_batch_size}")
logger.info(f" Total optimization steps = {total_train_steps}")
train_time = 0
train_metrics = []
epochs = tqdm(range(num_epochs), desc="Epoch ... ", position=0)
for epoch in epochs:
# ======================== Training ================================
train_start = time.time()
# Create sampling rng
rng, input_rng = jax.random.split(rng)
# Generate an epoch by shuffling sampling indices from the train dataset
train_loader = data_loader(input_rng, train_dataset, train_batch_size, shuffle=True)
steps_per_epoch = len(train_dataset) // train_batch_size
# train
for step in tqdm(range(steps_per_epoch), desc="Training...", position=1, leave=False):
batch = next(train_loader)
batch = shard(batch)
state, train_metric = p_train_step(state, batch)
train_metrics.append(train_metric)
cur_step = epoch * (len(train_dataset) // train_batch_size) + step
if cur_step % training_args.logging_steps == 0 and cur_step > 0:
# Save metrics
train_metric = unreplicate(train_metric)
train_time += time.time() - train_start
if has_tensorboard and jax.process_index() == 0:
write_train_metric(summary_writer, train_metrics, train_time, cur_step)
epochs.write(
f"Step... ({cur_step} | Loss: {train_metric['loss'].mean()}, Learning Rate: {train_metric['learning_rate'].mean()})"
)
train_metrics = []
if cur_step % training_args.eval_steps == 0 and cur_step > 0:
# ======================== Evaluating ==============================
eval_metrics = []
eval_loader = data_loader(input_rng, eval_dataset, eval_batch_size)
eval_steps = len(eval_dataset) // eval_batch_size
for _ in tqdm(range(eval_steps), desc="Evaluating...", position=2, leave=False):
# Model forward
batch = next(eval_loader)
batch = shard(batch)
metrics = p_eval_step(state.params, batch)
eval_metrics.append(metrics)
# normalize eval metrics
eval_metrics = get_metrics(eval_metrics)
eval_metrics = jax.tree_map(jnp.mean, eval_metrics)
try:
eval_metrics["perplexity"] = math.exp(eval_metrics["loss"])
except OverflowError:
eval_metrics["perplexity"] = float("inf")
# Print metrics and update progress bar
desc = f"Step... ({cur_step} | Eval Loss: {eval_metrics['loss']} | Eval Perplexity: {eval_metrics['perplexity']})"
epochs.write(desc)
epochs.desc = desc
# Save metrics
if has_tensorboard and jax.process_index() == 0:
write_eval_metric(summary_writer, eval_metrics, cur_step)
if cur_step % training_args.save_steps == 0 and cur_step > 0:
# save checkpoint after each epoch and push checkpoint to the hub
if jax.process_index() == 0:
params = jax.device_get(unreplicate(state.params))
model.save_pretrained(training_args.output_dir, params=params)
tokenizer.save_pretrained(training_args.output_dir)
if training_args.push_to_hub:
repo.push_to_hub(commit_message=f"Saving weights and logs of step {cur_step}", blocking=False)
# Eval after training
if training_args.do_eval:
eval_metrics = []
eval_loader = data_loader(input_rng, eval_dataset, eval_batch_size)
eval_steps = len(eval_dataset) // eval_batch_size
for _ in tqdm(range(eval_steps), desc="Evaluating...", position=2, leave=False):
# Model forward
batch = shard(next(eval_loader))
metrics = p_eval_step(state.params, batch)
eval_metrics.append(metrics)
# normalize eval metrics
eval_metrics = get_metrics(eval_metrics)
eval_metrics = jax.tree_map(lambda x: jnp.mean(x).item(), eval_metrics)
try:
eval_metrics["perplexity"] = math.exp(eval_metrics["loss"])
except OverflowError:
eval_metrics["perplexity"] = float("inf")
if jax.process_index() == 0:
eval_metrics = {f"eval_{metric_name}": value for metric_name, value in eval_metrics.items()}
path = os.path.join(training_args.output_dir, "eval_results.json")
with open(path, "w") as f:
json.dump(eval_metrics, f, indent=4, sort_keys=True)
if __name__ == "__main__":
main()
| |
#-----------------------------------------------------------------------------
# Copyright (c) 2008-2015, David P. D. Moss. All rights reserved.
#
# Released under the BSD license. See the LICENSE file for details.
#-----------------------------------------------------------------------------
"""
IPv6 address logic.
"""
import struct as _struct
OPT_IMPORTS = False
# Check whether we need to use fallback code or not.
try:
import socket as _socket
# These might all generate exceptions on different platforms.
if not _socket.has_ipv6:
raise Exception('IPv6 disabled')
_socket.inet_pton
_socket.AF_INET6
from _socket import inet_pton as _inet_pton, \
inet_ntop as _inet_ntop, \
AF_INET6
OPT_IMPORTS = True
except Exception:
from netaddr.fbsocket import inet_pton as _inet_pton, \
inet_ntop as _inet_ntop, \
AF_INET6
from netaddr.core import AddrFormatError
from netaddr.strategy import \
valid_words as _valid_words, \
int_to_words as _int_to_words, \
words_to_int as _words_to_int, \
valid_bits as _valid_bits, \
bits_to_int as _bits_to_int, \
int_to_bits as _int_to_bits, \
valid_bin as _valid_bin, \
int_to_bin as _int_to_bin, \
bin_to_int as _bin_to_int
#: The width (in bits) of this address type.
width = 128
#: The individual word size (in bits) of this address type.
word_size = 16
#: The separator character used between each word.
word_sep = ':'
#: The AF_* constant value of this address type.
family = AF_INET6
#: A friendly string name address type.
family_name = 'IPv6'
#: The version of this address type.
version = 6
#: The number base to be used when interpreting word values as integers.
word_base = 16
#: The maximum integer value that can be represented by this address type.
max_int = 2 ** width - 1
#: The number of words in this address type.
num_words = width // word_size
#: The maximum integer value for an individual word in this address type.
max_word = 2 ** word_size - 1
#: A dictionary mapping IPv6 CIDR prefixes to the equivalent netmasks.
prefix_to_netmask = dict(
[(i, max_int ^ (2 ** (width - i) - 1)) for i in range(0, width+1)])
#: A dictionary mapping IPv6 netmasks to their equivalent CIDR prefixes.
netmask_to_prefix = dict(
[(max_int ^ (2 ** (width - i) - 1), i) for i in range(0, width+1)])
#: A dictionary mapping IPv6 CIDR prefixes to the equivalent hostmasks.
prefix_to_hostmask = dict(
[(i, (2 ** (width - i) - 1)) for i in range(0, width+1)])
#: A dictionary mapping IPv6 hostmasks to their equivalent CIDR prefixes.
hostmask_to_prefix = dict(
[((2 ** (width - i) - 1), i) for i in range(0, width+1)])
#-----------------------------------------------------------------------------
# Dialect classes.
#-----------------------------------------------------------------------------
class ipv6_compact(object):
"""An IPv6 dialect class - compact form."""
#: The format string used to converting words into string values.
word_fmt = '%x'
#: Boolean flag indicating if IPv6 compaction algorithm should be used.
compact = True
class ipv6_full(ipv6_compact):
"""An IPv6 dialect class - 'all zeroes' form."""
#: Boolean flag indicating if IPv6 compaction algorithm should be used.
compact = False
class ipv6_verbose(ipv6_compact):
"""An IPv6 dialect class - extra wide 'all zeroes' form."""
#: The format string used to converting words into string values.
word_fmt = '%.4x'
#: Boolean flag indicating if IPv6 compaction algorithm should be used.
compact = False
def valid_str(addr, flags=0):
"""
:param addr: An IPv6 address in presentation (string) format.
:param flags: decides which rules are applied to the interpretation of the
addr value. Future use - currently has no effect.
:return: ``True`` if IPv6 address is valid, ``False`` otherwise.
"""
if addr == '':
raise AddrFormatError('Empty strings are not supported!')
try:
_inet_pton(AF_INET6, addr)
except:
return False
return True
def str_to_int(addr, flags=0):
"""
:param addr: An IPv6 address in string form.
:param flags: decides which rules are applied to the interpretation of the
addr value. Future use - currently has no effect.
:return: The equivalent unsigned integer for a given IPv6 address.
"""
try:
packed_int = _inet_pton(AF_INET6, addr)
return packed_to_int(packed_int)
except Exception:
raise AddrFormatError('%r is not a valid IPv6 address string!' % addr)
def int_to_str(int_val, dialect=None):
"""
:param int_val: An unsigned integer.
:param dialect: (optional) a Python class defining formatting options.
:return: The IPv6 presentation (string) format address equivalent to the
unsigned integer provided.
"""
if dialect is None:
dialect = ipv6_compact
addr = None
try:
packed_int = int_to_packed(int_val)
if dialect.compact:
# Default return value.
addr = _inet_ntop(AF_INET6, packed_int)
else:
# Custom return value.
words = list(_struct.unpack('>8H', packed_int))
tokens = [dialect.word_fmt % word for word in words]
addr = word_sep.join(tokens)
except Exception:
raise ValueError('%r is not a valid 128-bit unsigned integer!' \
% int_val)
return addr
def int_to_arpa(int_val):
"""
:param int_val: An unsigned integer.
:return: The reverse DNS lookup for an IPv6 address in network byte
order integer form.
"""
addr = int_to_str(int_val, ipv6_verbose)
tokens = list(addr.replace(':', ''))
tokens.reverse()
# We won't support ip6.int here - see RFC 3152 for details.
tokens = tokens + ['ip6', 'arpa', '']
return '.'.join(tokens)
def int_to_packed(int_val):
"""
:param int_val: the integer to be packed.
:return: a packed string that is equivalent to value represented by an
unsigned integer.
"""
words = int_to_words(int_val, 4, 32)
return _struct.pack('>4I', *words)
def packed_to_int(packed_int):
"""
:param packed_int: a packed string containing an unsigned integer.
It is assumed that string is packed in network byte order.
:return: An unsigned integer equivalent to value of network address
represented by packed binary string.
"""
words = list(_struct.unpack('>4I', packed_int))
int_val = 0
for i, num in enumerate(reversed(words)):
word = num
word = word << 32 * i
int_val = int_val | word
return int_val
def valid_words(words):
return _valid_words(words, word_size, num_words)
def int_to_words(int_val, num_words=None, word_size=None):
if num_words is None:
num_words = globals()['num_words']
if word_size is None:
word_size = globals()['word_size']
return _int_to_words(int_val, word_size, num_words)
def words_to_int(words):
return _words_to_int(words, word_size, num_words)
def valid_bits(bits):
return _valid_bits(bits, width, word_sep)
def bits_to_int(bits):
return _bits_to_int(bits, width, word_sep)
def int_to_bits(int_val, word_sep=None):
if word_sep is None:
word_sep = globals()['word_sep']
return _int_to_bits(int_val, word_size, num_words, word_sep)
def valid_bin(bin_val):
return _valid_bin(bin_val, width)
def int_to_bin(int_val):
return _int_to_bin(int_val, width)
def bin_to_int(bin_val):
return _bin_to_int(bin_val, width)
| |
"""
Support for recording details.
Component that records all events and state changes. Allows other components
to query this database.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/recorder/
"""
import atexit
import json
import logging
import queue
import sqlite3
import threading
from datetime import date, datetime
import homeassistant.util.dt as dt_util
from homeassistant.const import (
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP, EVENT_STATE_CHANGED,
EVENT_TIME_CHANGED, MATCH_ALL)
from homeassistant.core import Event, EventOrigin, State
from homeassistant.remote import JSONEncoder
DOMAIN = "recorder"
DB_FILE = 'home-assistant.db'
RETURN_ROWCOUNT = "rowcount"
RETURN_LASTROWID = "lastrowid"
RETURN_ONE_ROW = "one_row"
_INSTANCE = None
_LOGGER = logging.getLogger(__name__)
def query(sql_query, arguments=None):
"""Query the database."""
_verify_instance()
return _INSTANCE.query(sql_query, arguments)
def query_states(state_query, arguments=None):
"""Query the database and return a list of states."""
return [
row for row in
(row_to_state(row) for row in query(state_query, arguments))
if row is not None]
def query_events(event_query, arguments=None):
"""Query the database and return a list of states."""
return [
row for row in
(row_to_event(row) for row in query(event_query, arguments))
if row is not None]
def row_to_state(row):
"""Convert a database row to a state."""
try:
return State(
row[1], row[2], json.loads(row[3]),
dt_util.utc_from_timestamp(row[4]),
dt_util.utc_from_timestamp(row[5]))
except ValueError:
# When json.loads fails
_LOGGER.exception("Error converting row to state: %s", row)
return None
def row_to_event(row):
"""Convert a databse row to an event."""
try:
return Event(row[1], json.loads(row[2]), EventOrigin(row[3]),
dt_util.utc_from_timestamp(row[5]))
except ValueError:
# When json.loads fails
_LOGGER.exception("Error converting row to event: %s", row)
return None
def run_information(point_in_time=None):
"""Return information about current run.
There is also the run that covers point_in_time.
"""
_verify_instance()
if point_in_time is None or point_in_time > _INSTANCE.recording_start:
return RecorderRun()
run = _INSTANCE.query(
"SELECT * FROM recorder_runs WHERE start<? AND END>?",
(point_in_time, point_in_time), return_value=RETURN_ONE_ROW)
return RecorderRun(run) if run else None
def setup(hass, config):
"""Setup the recorder."""
# pylint: disable=global-statement
global _INSTANCE
_INSTANCE = Recorder(hass)
return True
class RecorderRun(object):
"""Representation of arecorder run."""
def __init__(self, row=None):
"""Initialize the recorder run."""
self.end = None
if row is None:
self.start = _INSTANCE.recording_start
self.closed_incorrect = False
else:
self.start = dt_util.utc_from_timestamp(row[1])
if row[2] is not None:
self.end = dt_util.utc_from_timestamp(row[2])
self.closed_incorrect = bool(row[3])
def entity_ids(self, point_in_time=None):
"""Return the entity ids that existed in this run.
Specify point_in_time if you want to know which existed at that point
in time inside the run.
"""
where = self.where_after_start_run
where_data = []
if point_in_time is not None or self.end is not None:
where += "AND created < ? "
where_data.append(point_in_time or self.end)
return [row[0] for row in query(
"SELECT entity_id FROM states WHERE {}"
"GROUP BY entity_id".format(where), where_data)]
@property
def where_after_start_run(self):
"""Return SQL WHERE clause.
Selection of the rows created after the start of the run.
"""
return "created >= {} ".format(_adapt_datetime(self.start))
@property
def where_limit_to_run(self):
"""Return a SQL WHERE clause.
For limiting the results to this run.
"""
where = self.where_after_start_run
if self.end is not None:
where += "AND created < {} ".format(_adapt_datetime(self.end))
return where
class Recorder(threading.Thread):
"""A threaded recorder class."""
def __init__(self, hass):
"""Initialize the recorder."""
threading.Thread.__init__(self)
self.hass = hass
self.conn = None
self.queue = queue.Queue()
self.quit_object = object()
self.lock = threading.Lock()
self.recording_start = dt_util.utcnow()
self.utc_offset = dt_util.now().utcoffset().total_seconds()
def start_recording(event):
"""Start recording."""
self.start()
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, start_recording)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, self.shutdown)
hass.bus.listen(MATCH_ALL, self.event_listener)
def run(self):
"""Start processing events to save."""
self._setup_connection()
self._setup_run()
while True:
event = self.queue.get()
if event == self.quit_object:
self._close_run()
self._close_connection()
self.queue.task_done()
return
elif event.event_type == EVENT_TIME_CHANGED:
self.queue.task_done()
continue
event_id = self.record_event(event)
if event.event_type == EVENT_STATE_CHANGED:
self.record_state(
event.data['entity_id'], event.data.get('new_state'),
event_id)
self.queue.task_done()
def event_listener(self, event):
"""Listen for new events and put them in the process queue."""
self.queue.put(event)
def shutdown(self, event):
"""Tell the recorder to shut down."""
self.queue.put(self.quit_object)
self.block_till_done()
def record_state(self, entity_id, state, event_id):
"""Save a state to the database."""
now = dt_util.utcnow()
# State got deleted
if state is None:
state_state = ''
state_domain = ''
state_attr = '{}'
last_changed = last_updated = now
else:
state_domain = state.domain
state_state = state.state
state_attr = json.dumps(dict(state.attributes))
last_changed = state.last_changed
last_updated = state.last_updated
info = (
entity_id, state_domain, state_state, state_attr,
last_changed, last_updated,
now, self.utc_offset, event_id)
self.query(
"""
INSERT INTO states (
entity_id, domain, state, attributes, last_changed, last_updated,
created, utc_offset, event_id)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
info)
def record_event(self, event):
"""Save an event to the database."""
info = (
event.event_type, json.dumps(event.data, cls=JSONEncoder),
str(event.origin), dt_util.utcnow(), event.time_fired,
self.utc_offset
)
return self.query(
"INSERT INTO events ("
"event_type, event_data, origin, created, time_fired, utc_offset"
") VALUES (?, ?, ?, ?, ?, ?)", info, RETURN_LASTROWID)
def query(self, sql_query, data=None, return_value=None):
"""Query the database."""
try:
with self.conn, self.lock:
_LOGGER.debug("Running query %s", sql_query)
cur = self.conn.cursor()
if data is not None:
cur.execute(sql_query, data)
else:
cur.execute(sql_query)
if return_value == RETURN_ROWCOUNT:
return cur.rowcount
elif return_value == RETURN_LASTROWID:
return cur.lastrowid
elif return_value == RETURN_ONE_ROW:
return cur.fetchone()
else:
return cur.fetchall()
except (sqlite3.IntegrityError, sqlite3.OperationalError,
sqlite3.ProgrammingError):
_LOGGER.exception(
"Error querying the database using: %s", sql_query)
return []
def block_till_done(self):
"""Block till all events processed."""
self.queue.join()
def _setup_connection(self):
"""Ensure database is ready to fly."""
db_path = self.hass.config.path(DB_FILE)
self.conn = sqlite3.connect(db_path, check_same_thread=False)
self.conn.row_factory = sqlite3.Row
# Make sure the database is closed whenever Python exits
# without the STOP event being fired.
atexit.register(self._close_connection)
# Have datetime objects be saved as integers.
sqlite3.register_adapter(date, _adapt_datetime)
sqlite3.register_adapter(datetime, _adapt_datetime)
# Validate we are on the correct schema or that we have to migrate.
cur = self.conn.cursor()
def save_migration(migration_id):
"""Save and commit a migration to the database."""
cur.execute('INSERT INTO schema_version VALUES (?, ?)',
(migration_id, dt_util.utcnow()))
self.conn.commit()
_LOGGER.info("Database migrated to version %d", migration_id)
try:
cur.execute('SELECT max(migration_id) FROM schema_version;')
migration_id = cur.fetchone()[0] or 0
except sqlite3.OperationalError:
# The table does not exist.
cur.execute('CREATE TABLE schema_version ('
'migration_id integer primary key, performed integer)')
migration_id = 0
if migration_id < 1:
cur.execute("""
CREATE TABLE recorder_runs (
run_id integer primary key,
start integer,
end integer,
closed_incorrect integer default 0,
created integer)
""")
cur.execute("""
CREATE TABLE events (
event_id integer primary key,
event_type text,
event_data text,
origin text,
created integer)
""")
cur.execute(
'CREATE INDEX events__event_type ON events(event_type)')
cur.execute("""
CREATE TABLE states (
state_id integer primary key,
entity_id text,
state text,
attributes text,
last_changed integer,
last_updated integer,
created integer)
""")
cur.execute('CREATE INDEX states__entity_id ON states(entity_id)')
save_migration(1)
if migration_id < 2:
cur.execute("""
ALTER TABLE events
ADD COLUMN time_fired integer
""")
cur.execute('UPDATE events SET time_fired=created')
save_migration(2)
if migration_id < 3:
utc_offset = self.utc_offset
cur.execute("""
ALTER TABLE recorder_runs
ADD COLUMN utc_offset integer
""")
cur.execute("""
ALTER TABLE events
ADD COLUMN utc_offset integer
""")
cur.execute("""
ALTER TABLE states
ADD COLUMN utc_offset integer
""")
cur.execute("UPDATE recorder_runs SET utc_offset=?", [utc_offset])
cur.execute("UPDATE events SET utc_offset=?", [utc_offset])
cur.execute("UPDATE states SET utc_offset=?", [utc_offset])
save_migration(3)
if migration_id < 4:
# We had a bug where we did not save utc offset for recorder runs.
cur.execute(
"""UPDATE recorder_runs SET utc_offset=?
WHERE utc_offset IS NULL""", [self.utc_offset])
cur.execute("""
ALTER TABLE states
ADD COLUMN event_id integer
""")
save_migration(4)
if migration_id < 5:
# Add domain so that thermostat graphs look right.
try:
cur.execute("""
ALTER TABLE states
ADD COLUMN domain text
""")
except sqlite3.OperationalError:
# We had a bug in this migration for a while on dev.
# Without this, dev-users will have to throw away their db.
pass
# TravisCI has Python compiled against an old version of SQLite3
# which misses the instr method.
self.conn.create_function(
"instr", 2,
lambda string, substring: string.find(substring) + 1)
# Populate domain with defaults.
cur.execute("""
UPDATE states
set domain=substr(entity_id, 0, instr(entity_id, '.'))
""")
# Add indexes we are going to use a lot on selects.
cur.execute("""
CREATE INDEX states__state_changes ON
states (last_changed, last_updated, entity_id)""")
cur.execute("""
CREATE INDEX states__significant_changes ON
states (domain, last_updated, entity_id)""")
save_migration(5)
def _close_connection(self):
"""Close connection to the database."""
_LOGGER.info("Closing database")
atexit.unregister(self._close_connection)
self.conn.close()
def _setup_run(self):
"""Log the start of the current run."""
if self.query("""UPDATE recorder_runs SET end=?, closed_incorrect=1
WHERE end IS NULL""", (self.recording_start, ),
return_value=RETURN_ROWCOUNT):
_LOGGER.warning("Found unfinished sessions")
self.query(
"""INSERT INTO recorder_runs (start, created, utc_offset)
VALUES (?, ?, ?)""",
(self.recording_start, dt_util.utcnow(), self.utc_offset))
def _close_run(self):
"""Save end time for current run."""
self.query(
"UPDATE recorder_runs SET end=? WHERE start=?",
(dt_util.utcnow(), self.recording_start))
def _adapt_datetime(datetimestamp):
"""Turn a datetime into an integer for in the DB."""
return dt_util.as_utc(datetimestamp.replace(microsecond=0)).timestamp()
def _verify_instance():
"""Throw error if recorder not initialized."""
if _INSTANCE is None:
raise RuntimeError("Recorder not initialized.")
| |
"""Support ezviz camera devices."""
from __future__ import annotations
import logging
from pyezviz.exceptions import HTTPError, InvalidHost, PyEzvizError
import voluptuous as vol
from homeassistant.components import ffmpeg
from homeassistant.components.camera import PLATFORM_SCHEMA, SUPPORT_STREAM, Camera
from homeassistant.components.ffmpeg import get_ffmpeg_manager
from homeassistant.config_entries import (
SOURCE_DISCOVERY,
SOURCE_IGNORE,
SOURCE_IMPORT,
ConfigEntry,
)
from homeassistant.const import CONF_IP_ADDRESS, CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant
from homeassistant.helpers import config_validation as cv, entity_platform
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from .const import (
ATTR_DIRECTION,
ATTR_ENABLE,
ATTR_LEVEL,
ATTR_SERIAL,
ATTR_SPEED,
ATTR_TYPE,
CONF_CAMERAS,
CONF_FFMPEG_ARGUMENTS,
DATA_COORDINATOR,
DEFAULT_CAMERA_USERNAME,
DEFAULT_FFMPEG_ARGUMENTS,
DEFAULT_RTSP_PORT,
DIR_DOWN,
DIR_LEFT,
DIR_RIGHT,
DIR_UP,
DOMAIN,
SERVICE_ALARM_SOUND,
SERVICE_ALARM_TRIGER,
SERVICE_DETECTION_SENSITIVITY,
SERVICE_PTZ,
SERVICE_WAKE_DEVICE,
)
from .coordinator import EzvizDataUpdateCoordinator
from .entity import EzvizEntity
CAMERA_SCHEMA = vol.Schema(
{vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_CAMERAS, default={}): {cv.string: CAMERA_SCHEMA},
}
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: entity_platform.AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up a Ezviz IP Camera from platform config."""
_LOGGER.warning(
"Loading ezviz via platform config is deprecated, it will be automatically imported. Please remove it afterwards"
)
# Check if entry config exists and skips import if it does.
if hass.config_entries.async_entries(DOMAIN):
return
# Check if importing camera account.
if CONF_CAMERAS in config:
cameras_conf = config[CONF_CAMERAS]
for serial, camera in cameras_conf.items():
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={
ATTR_SERIAL: serial,
CONF_USERNAME: camera[CONF_USERNAME],
CONF_PASSWORD: camera[CONF_PASSWORD],
},
)
)
# Check if importing main ezviz cloud account.
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=config,
)
)
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: entity_platform.AddEntitiesCallback,
) -> None:
"""Set up Ezviz cameras based on a config entry."""
coordinator: EzvizDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id][
DATA_COORDINATOR
]
camera_entities = []
for camera, value in coordinator.data.items():
camera_rtsp_entry = [
item
for item in hass.config_entries.async_entries(DOMAIN)
if item.unique_id == camera and item.source != SOURCE_IGNORE
]
# There seem to be a bug related to localRtspPort in Ezviz API.
local_rtsp_port = (
value["local_rtsp_port"]
if value["local_rtsp_port"] != 0
else DEFAULT_RTSP_PORT
)
if camera_rtsp_entry:
ffmpeg_arguments = camera_rtsp_entry[0].options[CONF_FFMPEG_ARGUMENTS]
camera_username = camera_rtsp_entry[0].data[CONF_USERNAME]
camera_password = camera_rtsp_entry[0].data[CONF_PASSWORD]
camera_rtsp_stream = f"rtsp://{camera_username}:{camera_password}@{value['local_ip']}:{local_rtsp_port}{ffmpeg_arguments}"
_LOGGER.debug(
"Configuring Camera %s with ip: %s rtsp port: %s ffmpeg arguments: %s",
camera,
value["local_ip"],
local_rtsp_port,
ffmpeg_arguments,
)
else:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_DISCOVERY},
data={
ATTR_SERIAL: camera,
CONF_IP_ADDRESS: value["local_ip"],
},
)
)
_LOGGER.warning(
"Found camera with serial %s without configuration. Please go to integration to complete setup",
camera,
)
ffmpeg_arguments = DEFAULT_FFMPEG_ARGUMENTS
camera_username = DEFAULT_CAMERA_USERNAME
camera_password = None
camera_rtsp_stream = ""
camera_entities.append(
EzvizCamera(
hass,
coordinator,
camera,
camera_username,
camera_password,
camera_rtsp_stream,
local_rtsp_port,
ffmpeg_arguments,
)
)
async_add_entities(camera_entities)
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_PTZ,
{
vol.Required(ATTR_DIRECTION): vol.In(
[DIR_UP, DIR_DOWN, DIR_LEFT, DIR_RIGHT]
),
vol.Required(ATTR_SPEED): cv.positive_int,
},
"perform_ptz",
)
platform.async_register_entity_service(
SERVICE_ALARM_TRIGER,
{
vol.Required(ATTR_ENABLE): cv.positive_int,
},
"perform_sound_alarm",
)
platform.async_register_entity_service(
SERVICE_WAKE_DEVICE, {}, "perform_wake_device"
)
platform.async_register_entity_service(
SERVICE_ALARM_SOUND,
{vol.Required(ATTR_LEVEL): cv.positive_int},
"perform_alarm_sound",
)
platform.async_register_entity_service(
SERVICE_DETECTION_SENSITIVITY,
{
vol.Required(ATTR_LEVEL): cv.positive_int,
vol.Required(ATTR_TYPE): cv.positive_int,
},
"perform_set_alarm_detection_sensibility",
)
class EzvizCamera(EzvizEntity, Camera):
"""An implementation of a Ezviz security camera."""
coordinator: EzvizDataUpdateCoordinator
def __init__(
self,
hass: HomeAssistant,
coordinator: EzvizDataUpdateCoordinator,
serial: str,
camera_username: str,
camera_password: str | None,
camera_rtsp_stream: str | None,
local_rtsp_port: int,
ffmpeg_arguments: str | None,
) -> None:
"""Initialize a Ezviz security camera."""
super().__init__(coordinator, serial)
Camera.__init__(self)
self._username = camera_username
self._password = camera_password
self._rtsp_stream = camera_rtsp_stream
self._local_rtsp_port = local_rtsp_port
self._ffmpeg_arguments = ffmpeg_arguments
self._ffmpeg = get_ffmpeg_manager(hass)
self._attr_unique_id = serial
self._attr_name = self.data["name"]
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self.data["status"] != 2
@property
def supported_features(self) -> int:
"""Return supported features."""
if self._password:
return SUPPORT_STREAM
return 0
@property
def is_on(self) -> bool:
"""Return true if on."""
return bool(self.data["status"])
@property
def is_recording(self) -> bool:
"""Return true if the device is recording."""
return self.data["alarm_notify"]
@property
def motion_detection_enabled(self) -> bool:
"""Camera Motion Detection Status."""
return self.data["alarm_notify"]
def enable_motion_detection(self) -> None:
"""Enable motion detection in camera."""
try:
self.coordinator.ezviz_client.set_camera_defence(self._serial, 1)
except InvalidHost as err:
raise InvalidHost("Error enabling motion detection") from err
def disable_motion_detection(self) -> None:
"""Disable motion detection."""
try:
self.coordinator.ezviz_client.set_camera_defence(self._serial, 0)
except InvalidHost as err:
raise InvalidHost("Error disabling motion detection") from err
async def async_camera_image(
self, width: int | None = None, height: int | None = None
) -> bytes | None:
"""Return a frame from the camera stream."""
if self._rtsp_stream is None:
return None
return await ffmpeg.async_get_image(
self.hass, self._rtsp_stream, width=width, height=height
)
async def stream_source(self) -> str | None:
"""Return the stream source."""
if self._password is None:
return None
local_ip = self.data["local_ip"]
self._rtsp_stream = (
f"rtsp://{self._username}:{self._password}@"
f"{local_ip}:{self._local_rtsp_port}{self._ffmpeg_arguments}"
)
_LOGGER.debug(
"Configuring Camera %s with ip: %s rtsp port: %s ffmpeg arguments: %s",
self._serial,
local_ip,
self._local_rtsp_port,
self._ffmpeg_arguments,
)
return self._rtsp_stream
def perform_ptz(self, direction: str, speed: int) -> None:
"""Perform a PTZ action on the camera."""
try:
self.coordinator.ezviz_client.ptz_control(
str(direction).upper(), self._serial, "START", speed
)
self.coordinator.ezviz_client.ptz_control(
str(direction).upper(), self._serial, "STOP", speed
)
except HTTPError as err:
raise HTTPError("Cannot perform PTZ") from err
def perform_sound_alarm(self, enable: int) -> None:
"""Sound the alarm on a camera."""
try:
self.coordinator.ezviz_client.sound_alarm(self._serial, enable)
except HTTPError as err:
raise HTTPError("Cannot sound alarm") from err
def perform_wake_device(self) -> None:
"""Basically wakes the camera by querying the device."""
try:
self.coordinator.ezviz_client.get_detection_sensibility(self._serial)
except (HTTPError, PyEzvizError) as err:
raise PyEzvizError("Cannot wake device") from err
def perform_alarm_sound(self, level: int) -> None:
"""Enable/Disable movement sound alarm."""
try:
self.coordinator.ezviz_client.alarm_sound(self._serial, level, 1)
except HTTPError as err:
raise HTTPError(
"Cannot set alarm sound level for on movement detected"
) from err
def perform_set_alarm_detection_sensibility(
self, level: int, type_value: int
) -> None:
"""Set camera detection sensibility level service."""
try:
self.coordinator.ezviz_client.detection_sensibility(
self._serial, level, type_value
)
except (HTTPError, PyEzvizError) as err:
raise PyEzvizError("Cannot set detection sensitivity level") from err
| |
##########################################
# File: shard.py #
# Copyright Richard Stebbing 2014. #
# Distributed under the MIT License. #
# (See accompany file LICENSE or copy at #
# http://opensource.org/licenses/MIT) #
##########################################
# Imports
import numpy as np
from scipy.linalg import norm
# LineSegment
class LineSegment(object):
def __init__(self, m, x0):
self.m = m
self.x0 = x0
@classmethod
def from_points(cls, *args):
if len(args) == 1:
p, q = args[0]
else:
p, q = args
return cls(q - p, p)
@property
def points(self):
return np.r_['0,2', self.x0, self.x0 + self.l * self.m]
@property
def m(self):
return self._m
@m.setter
def m(self, m):
m = np.asarray(m)
norm_m = norm(m)
if norm_m == 0.0:
raise ZeroDivisionError
self._m = m / norm_m
self.l = norm_m
@property
def n(self):
return np.r_[-self.m[1], self.m[0]]
def __call__(self, t):
t = np.atleast_1d(t)
return self.m * self.l * t[:, np.newaxis] + self.x0
# closest_preimage
def closest_preimage(self, Q):
Q = np.atleast_2d(Q)
u = np.atleast_1d(np.dot(Q - self.x0, self.m) / self.l)
u[u < 0.0] = 0.0
u[u > 1.0] = 1.0
return u
# _dt_integral_domain
_dt_integral_domain = {}
@classmethod
def _integral_domain(cls, integral_domain):
k = tuple(integral_domain)
try:
X = cls._dt_integral_domain[k]
except KeyError:
slice_ = tuple(slice(None, d) for d in integral_domain[::-1])
G = map(np.ravel, np.mgrid[slice_])
X = np.transpose(G)
X = np.fliplr(X)
X[:,1] *= -1
X[:,1] += integral_domain[1]
cls._dt_integral_domain[k] = X
return X
# dt
def dt(self, integral_domain, outside_left=None):
# create `X` as scanning `integral_domain` along rows, from
# "top" (max y) to "bottom" (min y)
X = self._integral_domain(integral_domain)
u = self.closest_preimage(X)
Y = self(u)
r = X - Y
d = np.sqrt(np.sum(r**2, axis=1))
if outside_left is not None:
s = np.require((np.dot(r, self.n) > 0),
dtype=np.float64)
s *= 2.0
s -= 1.0
if not outside_left:
s *= -1.0
d *= s
return d.reshape(integral_domain[::-1])
def __repr__(self):
fmt = '[%s]' % ', '.join('%.7g' for i in xrange(len(self.m)))
return "%s(%s, %s, %.7g)" % (self.__class__.__name__,
fmt % tuple(self.m),
fmt % tuple(self.x0),
self.l)
# Polygon
class Polygon(object):
def __init__(self, lines):
self.lines = list(lines)
# is_closed
def is_closed(self, tolerance):
for i, line in enumerate(self.lines):
prev_line = self.lines[i - 1]
end = prev_line.points[1]
start = line.points[0]
if norm(end - start) > tolerance:
return False
return True
@classmethod
def from_points(cls, P, *args, **kwargs):
if len(P) < 3:
raise ValueError("number of points = %d (< 3)" % P.shape[0])
lines = []
for i, p in enumerate(P):
q = P[(i + 1) % len(P)]
lines.append(LineSegment.from_points(p, q))
return cls(lines, *args, **kwargs)
@property
def points(self):
return np.asarray(map(lambda l: l.points[0],
self.lines))
@points.setter
def points(self, P):
if len(self.lines) != len(P):
raise ValueError("len(P) != %d" % len(self.lines))
for i, p in enumerate(P):
q = P[(i + 1) % len(P)]
self.lines[i].x0 = p
self.lines[i].m = q - p
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__,
repr(self.lines))
# dt
def dt(self, integral_domain, outside_left=None):
D = np.empty(tuple(integral_domain)[::-1] + (len(self.lines),),
dtype=np.float64)
for i, l in enumerate(self.lines):
D[..., i] = l.dt(integral_domain, outside_left)
shape = D.shape
D = D.reshape(-1, shape[2])
if outside_left is not None:
s = np.any(D > 0.0, axis=1).astype(np.float64)
s *= 2.0
s -= 1.0
np.absolute(D, D)
I = np.argmin(D, axis=1)
D = D[np.arange(D.shape[0]), I]
if outside_left is not None:
D *= s
return I.reshape(shape[:2]), D.reshape(shape[:2])
def interior_angles(self):
thetas = map(lambda l: np.arctan2(l.m[1], l.m[0]), self.lines)
thetas = np.asarray(thetas) % (2.0 * np.pi)
n = thetas.shape[0]
interior_angles = np.empty(n, dtype=np.float64)
for i, t0 in enumerate(thetas):
t1 = thetas[(i + 1) % n]
interior_angles[i] = (np.pi + t0 - t1) % (2.0 * np.pi)
return interior_angles
# sigmoid
def sigmoid(t, k=-1.0):
t = np.require(np.atleast_1d(t), dtype=np.float64)
return 1.0 / (1.0 + np.exp(k * t))
# sigmoid_dt
def sigmoid_dt(t, k=-1.0, ktlim=1e2):
# TODO Better handling of limits than `ktlim` ...
t = np.require(np.atleast_1d(t), dtype=np.float64)
kt = k * t
kt[kt > ktlim] = ktlim
kt[kt < -ktlim] = -ktlim
ekt = np.exp(kt)
return (-k * ekt) / (1.0 + ekt)**2
# inverse_sigmoid
def inverse_sigmoid(y, k=-1.0, eps=1e-9):
if k == 0.0:
raise ValueError('k == 0.0')
# copy of `y` is intentional
y = np.atleast_1d(y).astype(np.float64)
min_, max_ = eps, 1.0 - eps
y[y < min_] = min_
y[y > max_] = max_
return np.log((1 - y) / y) / k
# Shard
class Shard(object):
def __init__(self, X, k):
self._X = np.atleast_2d(X)
self._k = k
self._poly = Polygon.from_points(self._X)
# determine `outside_left`
interior_angles = self._poly.interior_angles()
sum_interior = np.sum(interior_angles)
n = self._X.shape[0]
expected_interior = (n - 2) * np.pi
expected_exterior = 2.0 * np.pi * n - expected_interior
self._outside_left = (np.abs(sum_interior - expected_interior) >=
np.abs(sum_interior - expected_exterior))
def __call__(self, integral_domain, return_dX=False, epsilon=1e-6):
I, D = self._poly.dt(integral_domain, self._outside_left)
H = sigmoid(D, self._k)
if not return_dX:
return H
_x = self._X.ravel()
n = _x.shape[0]
dX = np.empty((n,) + H.shape, dtype=np.float64)
for i in xrange(n):
x = _x.copy()
x[i] += epsilon
self._poly.points = x.reshape(self._X.shape)
dX[i] = self(integral_domain)
dX -= H
dX /= epsilon
return H, dX
# main_test_LineSegment
def main_test_LineSegment():
x0 = np.r_[0.0, 0.0]
m = np.r_[2.0, 1.0]
m *= (4.0 / norm(m))
line = LineSegment(m, x0)
Q = np.array([[2.0, 2.0],
[4.0, 3.0]], dtype=np.float64)
u = line.closest_preimage(Q)
f, ax = plt.subplots()
ax.set_aspect('equal')
plot = Plotter(delta=0.05)
t = np.linspace(0.0, 1.0, 20, endpoint=True)
plot(ax, line(t), 'ro-')
for i, ui in enumerate(u):
plot(ax, np.r_['0,2', Q[i], line(ui)], 'bo-')
ax.set_xlim(plot.xlim)
ax.set_ylim(plot.ylim)
plt.show()
# main_test_linedt
def main_test_linedt():
x0 = np.r_[25.0, 25.0]
m = np.r_[2.0, 1.0]
m *= (10.0 / norm(m))
line = LineSegment(m, x0)
D = line.dt((50, 100), outside_left=True)
f, ax = plt.subplots()
ax.imshow(D)
plt.show()
# main_test_Polygon
def main_test_Polygon():
P1 = np.array([[ 10., 10.],
[ 135., 60.],
[ 60., 10.]])
P2 = P1.copy()
P2[1] += (10.0, 40.0)
domain = (160, 100)
poly = Polygon.from_points(P1)
for P in (P1, P2):
print 'is closed?', poly.is_closed(1e-4)
poly.points = P
I, D = poly.dt(domain, outside_left=True)
x, y = np.transpose(np.r_['0,2', P, P[0]])
y = D.shape[0] - y
for M in (D, sigmoid(D, k=1.0), I):
f, ax = plt.subplots()
im = ax.imshow(M, cmap='gray')
ax.set_xlim(-0.5, M.shape[1] - 0.5)
ax.set_ylim(M.shape[0] - 0.5, -0.5)
ax.plot(x, y, 'ro-')
f.colorbar(im)
plt.show()
# main_test_Shard
def main_test_Shard():
P = np.array([[ 10., 10.],
[ 135., 60.],
[ 60., 10.]])
k = 0.6
shard = Shard(P, k)
H, dX = shard((150, 100), return_dX=True, epsilon=1e-6)
# colour `dX` so that all images are on the same scale
min_, max_ = np.amin(dX), np.amax(dX)
scaled_dX = (dX - min_) * (255. / (max_ - min_))
I = np.around(scaled_dX).astype(np.int32)
cmap = cm.gray(np.linspace(0., 1., 256, endpoint=True))
coloured_dX = cmap[I]
assert dX.shape[0] % 2 == 0
f, axs = plt.subplots(2, dX.shape[0] / 2)
for i, D in enumerate(dX):
ax = axs[i % 2, i / 2]
ax.set_title('x[%d] : (%.5g, %.5g)' % (i, np.amin(D), np.amax(D)))
ax.imshow(coloured_dX[i])
plt.show()
if __name__ == '__main__':
import matplotlib.pyplot as plt
from matplotlib import cm
# Plotter
class Plotter(object):
def __init__(self, delta=0.0):
self._delta = delta
i = np.finfo(np.float64)
self._xmin = i.max
self._xmax = i.min
self._ymin = i.max
self._ymax = i.min
def __call__(self, ax, P, *args, **kwargs):
P = np.atleast_2d(P)
x, y = np.transpose(P)
ax.plot(x, y, *args, **kwargs)
self._xmin = min(self._xmin, np.amin(x))
self._xmax = max(self._xmax, np.amax(x))
self._ymin = min(self._ymin, np.amin(y))
self._ymax = max(self._ymax, np.amax(y))
def _lim(self, min_, max_):
d = self._delta * (max_ - min_)
return (min_ - d, max_ + d)
@property
def xlim(self):
return self._lim(self._xmin, self._xmax)
@property
def ylim(self):
return self._lim(self._ymin, self._ymax)
main_test_LineSegment()
main_test_linedt()
main_test_Polygon()
main_test_Shard()
| |
## Title: Server_Project_With_GUI.py
## Abstract:
## Author(s): Greg Greenleaf (pacohojaverde@gmail.com)
## Alex Hauser (ahauser@csumb.edu)
## Zac Leids (zleids@csumb.edu)
## Date: September 24, 2014
import socket, sys, pygame, time, os, random
if (sys.version_info < (3,0)):
import thread
else: import _thread as thread
pygame.init()
pygame.display.set_caption('Game')
size = width, height = 640, 480
speed = [2, 2]
BLUE = ( 0, 0, 255)
BLACK = ( 0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
pygame.mixer.pre_init(44100, -16, 2, 2048)
pygame.init()
screen = pygame.display.set_mode(size)
basicfont = pygame.font.SysFont(None, 48)
playerImage = pygame.image.load(os.path.join("Player.png"))
playerImage.convert()
playerImage = pygame.transform.scale(playerImage, (25,25))
c = playerImage.get_at((1,1))
Ground1Image = pygame.image.load(os.path.join("Ground1.png"))
Ground1Image.convert()
Ground1Image = pygame.transform.scale(Ground1Image, (25,25))
Enemy1Image = pygame.image.load(os.path.join("Enemy1.png"))
Enemy1Image.convert()
Enemy1Image = pygame.transform.scale(Enemy1Image, (25,25))
lastX = 0
lastY = 0
bg = pygame.image.load(os.path.join("background0.png"))
bg.convert()
bg = pygame.transform.scale(bg, (640,480))
adjX = 0
adjY = 0
sprites = pygame.sprite.Group()
terrain = pygame.sprite.Group()
players = pygame.sprite.Group()
bgMusic = pygame.mixer.Sound(os.path.join('music.wav'))
bgMusic.play()
class Player(pygame.sprite.Sprite):
def __init__(self, surf):
super().__init__()
self.image = surf;
#self.image.fill(color)
self.rect = self.image.get_rect()
class Terrain(pygame.sprite.Sprite):
def __init__(self, color, width, height):
super().__init__()
self.image = Ground1Image
#self.image.fill(color)
self.rect = self.image.get_rect()
class Block(pygame.sprite.Sprite):
myLastX = 0
myLastY = 0
def __init__(self, color, width, height):
super().__init__()
self.image = Enemy1Image
#self.image.fill(color)
self.rect = self.image.get_rect()
def Update(self):
myLastX = self.rect.x
myLastY = self.rect.y
self.rect.x+=random.randrange(-1,1)
self.rect.y+=random.randrange(-1,1)
if random.randrange(0,10) > 3:
if self.rect.x < player.rect.x:
self.rect.x+=1
else:
self.rect.x-=1
if self.rect.y < player.rect.y:
self.rect.y+=1
else:
self.rect.y-=1
player_hit_list = pygame.sprite.spritecollide(self, players, False)
if (len(player_hit_list) > 0):
self.rect.x = myLastX
self.rect.y = myLastY
terrain_hit_list = pygame.sprite.spritecollide(self, terrain, False)
if (len(terrain_hit_list) > 0):
self.rect.x = myLastX
self.rect.y = myLastY
def drawPoint(x, y):
pygame.draw.line(screen, BLUE, (x, y), (x, y+1), 1)
def drawPixel(xy,c):
bg.set_at(xy, c)
def drawTrail():
bg.set_at((player.rect.x-1,player.rect.y),RED)
bg.set_at((player.rect.x+1,player.rect.y),RED)
bg.set_at((player.rect.x,player.rect.y),RED)
bg.set_at((player.rect.x-1,player.rect.y-1),RED)
bg.set_at((player.rect.x+1,player.rect.y),RED)
bg.set_at((player.rect.x,player.rect.y+1),RED)
def Update():
global sprites
for sprite in sprites:
sprite.Update()
terrain_hit_list = pygame.sprite.spritecollide(player, terrain, False)
for t in terrain_hit_list:
player.rect.x = lastX
player.rect.y = lastY
enemy_hit_list = pygame.sprite.spritecollide(player, sprites, False)
if (len(enemy_hit_list) > 0):
player.rect.x = lastX
player.rect.y = lastY
def Draw():
global sprites
#screen.fill(BLACK)
screen.blit(bg, (0, 0))
text = basicfont.render("Hello World", True, (255, 0, 0), (255, 255, 255))
textrect = text.get_rect()
textrect.centerx = screen.get_rect().centerx
textrect.centery = screen.get_rect().centery - 50
text2 = basicfont.render("Hello", True, (255, 0, 0), (255, 255, 255))
textrect2 = text2.get_rect()
textrect2.centerx = screen.get_rect().centerx
textrect2.centery = screen.get_rect().centery + 25
terrain.draw(screen)
sprites.draw(screen)
players.draw(screen)
#screen.blit(player, (screen.get_rect().centerx - 50 + (adjX * 5), screen.get_rect().centery - 50 + (adjY * 5)))
pygame.display.flip()
screenChanged = False;
block = Block(RED,200,150)
block.rect.x = 50
block.rect.y = 50
player = Player(playerImage)
player.rect.x = width/2
player.rect.y = height/2
lastX = player.rect.x
lastY = player.rect.y
players.add(player)
t = Terrain(BLUE,200,150)
t.rect.x = width/2 + 100
t.rect.y = height/2 + 100
terrain.add(t)
sprites.add(block)
clock = pygame.time.Clock()
curColor = RED
colState = 0
while 1:
lastY = player.rect.y
lastX = player.rect.x
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.display.quit()
sys.exit()
if event.type == pygame.MOUSEBUTTONUP:
curColor = BLACK
if colState == 0:
curColor = BLACK
elif colState == 1:
curColor = BLUE
elif colState == 2:
curColor = RED
colState = -1
colState+=1
t = Terrain(BLUE,200,150)
x, y = pygame.mouse.get_pos()
t.rect.x = x
t.rect.y = y
terrain.add(t)
pygame.image.save(bg,'temp.jpeg')
drawPixel(pygame.mouse.get_pos(),curColor)
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_w:
player.rect.y-=15
drawTrail()
if event.key == pygame.K_s:
player.rect.y+=15
drawTrail()
if event.key == pygame.K_a:
player.rect.x-=15
drawTrail()
if event.key == pygame.K_d:
player.rect.x+=15
drawTrail()
Update()
Draw()
| |
# Author: Lisandro Dalcin
# Contact: dalcinl@gmail.com
"""
Support for building mpi4py with distutils/setuptools.
"""
# -----------------------------------------------------------------------------
import sys
import os
import shlex
import shutil
import platform
from distutils import log
from distutils import sysconfig
from distutils.util import convert_path
# Fix missing variables PyPy's distutils.sysconfig
if hasattr(sys, 'pypy_version_info'):
config_vars = sysconfig.get_config_vars()
for name in ('prefix', 'exec_prefix'):
if name not in config_vars:
config_vars[name] = os.path.normpath(getattr(sys, name))
if sys.platform == 'darwin' and 'LDSHARED' in config_vars:
if '-undefined' not in config_vars['LDSHARED']:
config_vars['LDSHARED'] += ' -undefined dynamic_lookup'
# Workaround distutils.cygwinccompiler.get_versions()
# failing when the compiler path contains spaces
from distutils import cygwinccompiler as cygcc
if hasattr(cygcc, 'get_versions'):
cygcc_get_versions = cygcc.get_versions
def get_versions():
import distutils.spawn
find_executable_orig = distutils.spawn.find_executable
def find_executable(exe):
exe = find_executable_orig(exe)
if exe and ' ' in exe: exe = '"' + exe + '"'
return exe
distutils.spawn.find_executable = find_executable
versions = cygcc_get_versions()
distutils.spawn.find_executable = find_executable_orig
return versions
cygcc.get_versions = get_versions
# Normalize linker flags for runtime library dirs
from distutils.unixccompiler import UnixCCompiler
rpath_option_orig = UnixCCompiler.runtime_library_dir_option
def rpath_option(compiler, dir):
option = rpath_option_orig(compiler, dir)
if sys.platform.startswith('linux'):
if option.startswith('-R'):
option = option.replace('-R', '-Wl,-rpath,', 1)
elif option.startswith('-Wl,-R,'):
option = option.replace('-Wl,-R,', '-Wl,-rpath,', 1)
return option
UnixCCompiler.runtime_library_dir_option = rpath_option
def fix_compiler_cmd(cc, mpicc):
if not mpicc: return
i = 0
while os.path.basename(cc[i]) == 'env':
i = i + 1
while '=' in cc[i]:
i = i + 1
while os.path.basename(cc[i]) == 'ccache':
i = i + 1
cc[i:i+1] = shlex.split(mpicc)
def fix_linker_cmd(ld, mpild):
if not mpild: return
i = 0
if (sys.platform.startswith('aix') and
os.path.basename(ld[i]) == 'ld_so_aix'):
i = 1
while os.path.basename(ld[i]) == 'env':
i = i + 1
while '=' in ld[i]:
i = i + 1
while os.path.basename(ld[i]) == 'ccache':
del ld[i]
ld[i:i+1] = shlex.split(mpild)
def customize_compiler(compiler, lang=None,
mpicc=None, mpicxx=None, mpild=None,
):
sysconfig.customize_compiler(compiler)
if compiler.compiler_type == 'unix':
ld = compiler.linker_exe
for envvar in ('LDFLAGS', 'CFLAGS', 'CPPFLAGS'):
if envvar in os.environ:
ld += shlex.split(os.environ[envvar])
if sys.platform == 'darwin':
badcflags = ['-mno-fused-madd']
for attr in ('preprocessor',
'compiler', 'compiler_cxx', 'compiler_so',
'linker_so', 'linker_exe'):
compiler_cmd = getattr(compiler, attr, None)
if compiler_cmd is None: continue
for flag in badcflags:
while flag in compiler_cmd:
compiler_cmd.remove(flag)
if compiler.compiler_type == 'unix':
# Compiler command overriding
if mpicc:
fix_compiler_cmd(compiler.compiler, mpicc)
if lang in ('c', None):
fix_compiler_cmd(compiler.compiler_so, mpicc)
if mpicxx:
fix_compiler_cmd(compiler.compiler_cxx, mpicxx)
if lang == 'c++':
fix_compiler_cmd(compiler.compiler_so, mpicxx)
if mpild:
for ld in [compiler.linker_so, compiler.linker_exe]:
fix_linker_cmd(ld, mpild)
if compiler.compiler_type == 'cygwin':
compiler.set_executables(
preprocessor = 'gcc -mcygwin -E',
)
if compiler.compiler_type == 'mingw32':
compiler.set_executables(
preprocessor = 'gcc -mno-cygwin -E',
)
if compiler.compiler_type in ('unix', 'cygwin', 'mingw32'):
badcxxflags = [ '-Wimplicit', '-Wstrict-prototypes']
for flag in badcxxflags:
while flag in compiler.compiler_cxx:
compiler.compiler_cxx.remove(flag)
if lang == 'c++':
while flag in compiler.compiler_so:
compiler.compiler_so.remove(flag)
if compiler.compiler_type == 'mingw32':
# Remove msvcrXX.dll
del compiler.dll_libraries[:]
# https://bugs.python.org/issue12641
if compiler.gcc_version >= '4.4':
for attr in (
'preprocessor',
'compiler', 'compiler_cxx', 'compiler_so',
'linker_so', 'linker_exe'):
try: getattr(compiler, attr).remove('-mno-cygwin')
except: pass
# Add required define and compiler flags for AMD64
if platform.architecture()[0] == '64bit':
for attr in (
'preprocessor',
'compiler', 'compiler_cxx', 'compiler_so',
'linker_so', 'linker_exe'):
getattr(compiler, attr).insert(1, '-DMS_WIN64')
getattr(compiler, attr).insert(1, '-m64')
if compiler.compiler_type == 'msvc':
if not compiler.initialized: compiler.initialize()
compiler.ldflags_shared.append('/MANIFEST')
compiler.ldflags_shared_debug.append('/MANIFEST')
# -----------------------------------------------------------------------------
from mpiconfig import Config
def configuration(command_obj, verbose=True):
config = Config(log)
config.setup(command_obj)
if verbose:
if config.section and config.filename:
config.log.info("MPI configuration: [%s] from '%s'",
config.section, ','.join(config.filename))
config.info()
return config
def configure_compiler(compiler, config, lang=None):
#
mpicc = config.get('mpicc')
mpicxx = config.get('mpicxx')
mpild = config.get('mpild')
if not mpild and (mpicc or mpicxx):
if lang == 'c': mpild = mpicc
if lang == 'c++': mpild = mpicxx
if not mpild: mpild = mpicc or mpicxx
#
customize_compiler(compiler, lang,
mpicc=mpicc, mpicxx=mpicxx, mpild=mpild)
#
for k, v in config.get('define_macros', []):
compiler.define_macro(k, v)
for v in config.get('undef_macros', []):
compiler.undefine_macro(v)
for v in config.get('include_dirs', []):
compiler.add_include_dir(v)
for v in config.get('libraries', []):
compiler.add_library(v)
for v in config.get('library_dirs', []):
compiler.add_library_dir(v)
for v in config.get('runtime_library_dirs', []):
compiler.add_runtime_library_dir(v)
for v in config.get('extra_objects', []):
compiler.add_link_object(v)
if compiler.compiler_type in \
('unix', 'intel', 'cygwin', 'mingw32'):
cc_args = config.get('extra_compile_args', [])
ld_args = config.get('extra_link_args', [])
compiler.compiler += cc_args
compiler.compiler_so += cc_args
compiler.compiler_cxx += cc_args
compiler.linker_so += ld_args
compiler.linker_exe += ld_args
return compiler
# -----------------------------------------------------------------------------
try:
from mpiscanner import Scanner
except ImportError:
class Scanner(object):
def parse_file(self, *args):
raise NotImplementedError(
"You forgot to grab 'mpiscanner.py'")
class ConfigureMPI(object):
SRCDIR = 'src'
SOURCES = [os.path.join('mpi4py', 'libmpi.pxd')]
DESTDIR = os.path.join('src', 'lib-mpi')
CONFIG_H = os.path.join('config', 'config.h')
MISSING_H = 'missing.h'
CONFIGTEST_H = """\
/* _configtest.h */
#if !defined(MPIAPI)
# define MPIAPI
#endif
"""
def __init__(self, config_cmd):
self.scanner = Scanner()
for filename in self.SOURCES:
fullname = os.path.join(self.SRCDIR, filename)
self.scanner.parse_file(fullname)
self.config_cmd = config_cmd
def run(self):
results = []
with open('_configtest.h', 'w') as f:
f.write(self.CONFIGTEST_H)
for node in self.scanner:
name = node.name
testcode = node.config()
confcode = node.missing(guard=False)
log.info("checking for '%s' ..." % name)
ok = self.run_test(testcode)
if not ok:
log.info("**** failed check for '%s'" % name)
with open('_configtest.h', 'a') as f:
f.write(confcode)
results.append((name, ok))
try: os.remove('_configtest.h')
except OSError: pass
return results
def gen_test(self, code):
body = ['#include "_configtest.h"',
'int main(int argc, char **argv) {',
'\n'.join([' ' + line for line in code.split('\n')]),
' (void)argc; (void)argv;',
' return 0;',
'}']
body = '\n'.join(body) + '\n'
return body
def run_test(self, code, lang='c'):
level = log.set_threshold(log.WARN)
log.set_threshold(level)
if not self.config_cmd.noisy:
level = log.set_threshold(log.WARN)
try:
body = self.gen_test(code)
headers = ['stdlib.h', 'mpi.h']
ok = self.config_cmd.try_link(body, headers=headers, lang=lang)
return ok
finally:
log.set_threshold(level)
def dump(self, results):
destdir = self.DESTDIR
config_h = os.path.join(destdir, self.CONFIG_H)
missing_h = os.path.join(destdir, self.MISSING_H)
log.info("writing '%s'", config_h)
self.scanner.dump_config_h(config_h, results)
log.info("writing '%s'", missing_h)
self.scanner.dump_missing_h(missing_h, None)
# -----------------------------------------------------------------------------
cmd_mpi_opts = [
('mpild=', None,
"MPI linker command, "
"overridden by environment variable 'MPILD' "
"(defaults to 'mpicc' or 'mpicxx' if any is available)"),
('mpif77=', None,
"MPI F77 compiler command, "
"overridden by environment variable 'MPIF77' "
"(defaults to 'mpif77' if available)"),
('mpif90=', None,
"MPI F90 compiler command, "
"overridden by environment variable 'MPIF90' "
"(defaults to 'mpif90' if available)"),
('mpifort=', None,
"MPI Fortran compiler command, "
"overridden by environment variable 'MPIFORT' "
"(defaults to 'mpifort' if available)"),
('mpicxx=', None,
"MPI C++ compiler command, "
"overridden by environment variable 'MPICXX' "
"(defaults to 'mpicxx', 'mpiCC', or 'mpic++' if any is available)"),
('mpicc=', None,
"MPI C compiler command, "
"overridden by environment variables 'MPICC' "
"(defaults to 'mpicc' if available)"),
('mpi=', None,
"specify a configuration section, "
"and an optional list of configuration files "
+ "(e.g. --mpi=section,file1" + os.path.pathsep + "file2), " +
"to look for MPI includes/libraries, "
"overridden by environment variable 'MPICFG' "
"(defaults to section 'mpi' in configuration file 'mpi.cfg')"),
('configure', None,
"exhaustive test for checking missing MPI constants/types/functions"),
]
def cmd_get_mpi_options(cmd_opts):
optlist = []
for (option, _, _) in cmd_opts:
if option[-1] == '=':
option = option[:-1]
option = option.replace('-','_')
optlist.append(option)
return optlist
def cmd_initialize_mpi_options(cmd):
mpiopts = cmd_get_mpi_options(cmd_mpi_opts)
for op in mpiopts:
setattr(cmd, op, None)
def cmd_set_undefined_mpi_options(cmd, basecmd):
mpiopts = cmd_get_mpi_options(cmd_mpi_opts)
optlist = tuple(zip(mpiopts, mpiopts))
cmd.set_undefined_options(basecmd, *optlist)
# -----------------------------------------------------------------------------
try:
import setuptools
except ImportError:
setuptools = None
def import_command(cmd):
try:
from importlib import import_module
except ImportError:
import_module = lambda n: __import__(n, fromlist=[None])
try:
if not setuptools: raise ImportError
return import_module('setuptools.command.' + cmd)
except ImportError:
return import_module('distutils.command.' + cmd)
if setuptools:
from setuptools import Distribution as cls_Distribution
from setuptools import Extension as cls_Extension
from setuptools import Command
else:
from distutils.core import Distribution as cls_Distribution
from distutils.core import Extension as cls_Extension
from distutils.core import Command
cmd_config = import_command('config')
cmd_build = import_command('build')
cmd_install = import_command('install')
cmd_clean = import_command('clean')
cmd_build_clib = import_command('build_clib')
cmd_build_ext = import_command('build_ext')
cmd_install_lib = import_command('install_lib')
cmd_install_data = import_command('install_data')
from distutils.errors import DistutilsError
from distutils.errors import DistutilsSetupError
from distutils.errors import DistutilsPlatformError
from distutils.errors import DistutilsOptionError
from distutils.errors import CCompilerError
try:
from packaging.version import (
Version,
LegacyVersion,
)
except ImportError:
try:
from setuptools.extern.packaging.version import (
Version,
LegacyVersion,
)
except ImportError:
from distutils.version import (
StrictVersion as Version,
LooseVersion as LegacyVersion
)
try:
from setuptools import dep_util
except ImportError:
from distutils import dep_util
# -----------------------------------------------------------------------------
# Distribution class supporting a 'executables' keyword
class Distribution(cls_Distribution):
def __init__ (self, attrs=None):
# support for pkg data
self.package_data = {}
# PEP 314
self.provides = None
self.requires = None
self.obsoletes = None
# supports 'executables' keyword
self.executables = None
cls_Distribution.__init__(self, attrs)
def has_executables(self):
return self.executables and len(self.executables) > 0
def is_pure (self):
return (cls_Distribution.is_pure(self) and
not self.has_executables())
# Extension class
class Extension(cls_Extension):
def __init__ (self, **kw):
optional = kw.pop('optional', None)
configure = kw.pop('configure', None)
cls_Extension.__init__(self, **kw)
self.optional = optional
self.configure = configure
# Library class
class Library(Extension):
def __init__ (self, **kw):
kind = kw.pop('kind', "static")
package = kw.pop('package', None)
dest_dir = kw.pop('dest_dir', None)
Extension.__init__(self, **kw)
self.kind = kind
self.package = package
self.dest_dir = dest_dir
# Executable class
class Executable(Extension):
def __init__ (self, **kw):
package = kw.pop('package', None)
dest_dir = kw.pop('dest_dir', None)
Extension.__init__(self, **kw)
self.package = package
self.dest_dir = dest_dir
# setup function
def setup(**attrs):
if setuptools:
from setuptools import setup as fcn_setup
else:
from distutils.core import setup as fcn_setup
if 'distclass' not in attrs:
attrs['distclass'] = Distribution
if 'cmdclass' not in attrs:
attrs['cmdclass'] = {}
cmdclass = attrs['cmdclass']
for cmd in (config, build, install, clean,
build_src, build_clib, build_ext, build_exe,
install_lib, install_data, install_exe,
):
if cmd.__name__ not in cmdclass:
cmdclass[cmd.__name__] = cmd
return fcn_setup(**attrs)
# -----------------------------------------------------------------------------
# A minimalistic MPI program :-)
ConfigTest = """\
int main(int argc, char **argv)
{
int ierr;
(void)argc; (void)argv;
ierr = MPI_Init(&argc, &argv);
if (ierr) return -1;
ierr = MPI_Finalize();
if (ierr) return -1;
return 0;
}
"""
class config(cmd_config.config):
user_options = cmd_config.config.user_options + cmd_mpi_opts
def initialize_options (self):
cmd_config.config.initialize_options(self)
cmd_initialize_mpi_options(self)
self.noisy = 0
def finalize_options (self):
cmd_config.config.finalize_options(self)
if not self.noisy:
self.dump_source = 0
def _clean(self, *a, **kw):
if sys.platform.startswith('win'):
for fn in ('_configtest.exe.manifest', ):
if os.path.exists(fn):
self.temp_files.append(fn)
cmd_config.config._clean(self, *a, **kw)
def check_header (self, header, headers=None, include_dirs=None):
if headers is None: headers = []
log.info("checking for header '%s' ..." % header)
body = "int main(int n, char**v) { (void)n; (void)v; return 0; }"
ok = self.try_compile(body, list(headers) + [header], include_dirs)
log.info(ok and 'success!' or 'failure.')
return ok
def check_macro (self, macro, headers=None, include_dirs=None):
log.info("checking for macro '%s' ..." % macro)
body = ("#ifndef %s\n"
"#error macro '%s' not defined\n"
"#endif\n") % (macro, macro)
body += "int main(int n, char**v) { (void)n; (void)v; return 0; }"
ok = self.try_compile(body, headers, include_dirs)
return ok
def check_library (self, library, library_dirs=None,
headers=None, include_dirs=None,
other_libraries=[], lang="c"):
if sys.platform == "darwin":
self.compiler.linker_exe.append('-flat_namespace')
self.compiler.linker_exe.append('-undefined')
self.compiler.linker_exe.append('suppress')
log.info("checking for library '%s' ..." % library)
body = "int main(int n, char**v) { (void)n; (void)v; return 0; }"
ok = self.try_link(body, headers, include_dirs,
[library]+other_libraries, library_dirs,
lang=lang)
if sys.platform == "darwin":
self.compiler.linker_exe.remove('-flat_namespace')
self.compiler.linker_exe.remove('-undefined')
self.compiler.linker_exe.remove('suppress')
return ok
def check_function (self, function,
headers=None, include_dirs=None,
libraries=None, library_dirs=None,
decl=0, call=0, lang="c"):
log.info("checking for function '%s' ..." % function)
body = []
if decl:
if call: proto = "int %s (void);"
else: proto = "int %s;"
if lang == "c":
proto = "\n".join([
"#ifdef __cplusplus",
"extern \"C\"",
"#endif", proto])
body.append(proto % function)
body.append( "int main (int n, char**v) {")
if call:
body.append(" (void)%s();" % function)
else:
body.append(" %s;" % function)
body.append( " (void)n; (void)v;")
body.append( " return 0;")
body.append( "}")
body = "\n".join(body) + "\n"
ok = self.try_link(body, headers, include_dirs,
libraries, library_dirs, lang=lang)
return ok
def check_symbol (self, symbol, type="int",
headers=None, include_dirs=None,
libraries=None, library_dirs=None,
decl=0, lang="c"):
log.info("checking for symbol '%s' ..." % symbol)
body = []
if decl:
body.append("%s %s;" % (type, symbol))
body.append("int main (int n, char**v) {")
body.append(" %s s; s = %s; (void)s;" % (type, symbol))
body.append(" (void)n; (void)v;")
body.append(" return 0;")
body.append("}")
body = "\n".join(body) + "\n"
ok = self.try_link(body, headers, include_dirs,
libraries, library_dirs, lang=lang)
return ok
def check_function_call (self, function, args='',
headers=None, include_dirs=None,
libraries=None, library_dirs=None,
lang="c"):
log.info("checking for function '%s' ..." % function)
body = []
body.append("int main (int n, char**v) {")
body.append(" (void)%s(%s);" % (function, args))
body.append(" (void)n; (void)v;")
body.append(" return 0;")
body.append("}")
body = "\n".join(body) + "\n"
ok = self.try_link(body, headers, include_dirs,
libraries, library_dirs, lang=lang)
return ok
check_hdr = check_header
check_lib = check_library
check_func = check_function
check_sym = check_symbol
def run (self):
#
config = configuration(self, verbose=True)
# test MPI C compiler
self.compiler = getattr(
self.compiler, 'compiler_type', self.compiler)
self._check_compiler()
configure_compiler(self.compiler, config, lang='c')
self.try_link(ConfigTest, headers=['mpi.h'], lang='c')
# test MPI C++ compiler
self.compiler = getattr(
self.compiler, 'compiler_type', self.compiler)
self._check_compiler()
configure_compiler(self.compiler, config, lang='c++')
self.try_link(ConfigTest, headers=['mpi.h'], lang='c++')
class build(cmd_build.build):
user_options = cmd_build.build.user_options + cmd_mpi_opts
def initialize_options(self):
cmd_build.build.initialize_options(self)
cmd_initialize_mpi_options(self)
def finalize_options(self):
cmd_build.build.finalize_options(self)
config_cmd = self.get_finalized_command('config')
if isinstance(config_cmd, config):
cmd_set_undefined_mpi_options(self, 'config')
def has_executables (self):
return self.distribution.has_executables()
sub_commands = \
[('build_src', lambda *args: True)] + \
cmd_build.build.sub_commands + \
[('build_exe', has_executables)]
# XXX disable build_exe subcommand !!!
del sub_commands[-1]
class build_src(Command):
description = "build C sources from Cython files"
user_options = [
('force', 'f',
"forcibly build everything (ignore file timestamps)"),
]
boolean_options = ['force']
def initialize_options(self):
self.force = False
def finalize_options(self):
self.set_undefined_options('build',
('force', 'force'),
)
def run(self):
pass
# Command class to build libraries
class build_clib(cmd_build_clib.build_clib):
user_options = [
('build-clib-a=', 's',
"directory to build C/C++ static libraries to"),
('build-clib-so=', 's',
"directory to build C/C++ shared libraries to"),
]
user_options += cmd_build_clib.build_clib.user_options + cmd_mpi_opts
def initialize_options (self):
self.libraries = None
self.libraries_a = []
self.libraries_so = []
self.library_dirs = None
self.rpath = None
self.link_objects = None
self.build_lib = None
self.build_clib_a = None
self.build_clib_so = None
cmd_build_clib.build_clib.initialize_options(self)
cmd_initialize_mpi_options(self)
def finalize_options (self):
cmd_build_clib.build_clib.finalize_options(self)
build_cmd = self.get_finalized_command('build')
if isinstance(build_cmd, build):
cmd_set_undefined_mpi_options(self, 'build')
#
self.set_undefined_options('build',
('build_lib', 'build_lib'),
('build_lib', 'build_clib_a'),
('build_lib', 'build_clib_so'))
#
if self.libraries:
libraries = self.libraries[:]
self.libraries = []
self.check_library_list (libraries)
for i, lib in enumerate(libraries):
if isinstance(lib, Library):
if lib.kind == "static":
self.libraries_a.append(lib)
else:
self.libraries_so.append(lib)
else:
self.libraries.append(lib)
def check_library_list (self, libraries):
ListType, TupleType = type([]), type(())
if not isinstance(libraries, ListType):
raise DistutilsSetupError(
"'libraries' option must be a list of "
"Library instances or 2-tuples")
for lib in libraries:
#
if isinstance(lib, Library):
lib_name = lib.name
build_info = lib.__dict__
elif isinstance(lib, TupleType) and len(lib) == 2:
lib_name, build_info = lib
else:
raise DistutilsSetupError(
"each element of 'libraries' option must be an "
"Library instance or 2-tuple")
#
if not isinstance(lib_name, str):
raise DistutilsSetupError(
"first element of each tuple in 'libraries' "
"must be a string (the library name)")
if '/' in lib_name or (os.sep != '/' and os.sep in lib_name):
raise DistutilsSetupError(
"bad library name '%s': "
"may not contain directory separators" % lib[0])
if not isinstance(build_info, dict):
raise DistutilsSetupError(
"second element of each tuple in 'libraries' "
"must be a dictionary (build info)")
lib_type = build_info.get('kind', 'static')
if lib_type not in ('static', 'shared', 'dylib'):
raise DistutilsSetupError(
"in 'kind' option (library '%s'), "
"'kind' must be one of "
" \"static\", \"shared\", \"dylib\"" % lib_name)
sources = build_info.get('sources')
if (sources is None or
type(sources) not in (ListType, TupleType)):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'sources' must be present and must be "
"a list of source filenames" % lib_name)
depends = build_info.get('depends')
if (depends is not None and
type(depends) not in (ListType, TupleType)):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'depends' must be a list "
"of source filenames" % lib_name)
def run (self):
cmd_build_clib.build_clib.run(self)
if (not self.libraries_a and
not self.libraries_so):
return
#
from distutils.ccompiler import new_compiler
self.compiler = new_compiler(compiler=self.compiler,
dry_run=self.dry_run,
force=self.force)
#
if self.define is not None:
for (name, value) in self.define:
self.compiler.define_macro(name, value)
if self.undef is not None:
for macro in self.undef:
self.compiler.undefine_macro(macro)
if self.include_dirs is not None:
self.compiler.set_include_dirs(self.include_dirs)
if self.library_dirs is not None:
self.compiler.set_library_dirs(self.library_dirs)
if self.rpath is not None:
self.compiler.set_runtime_library_dirs(self.rpath)
if self.link_objects is not None:
self.compiler.set_link_objects(self.link_objects)
#
config = configuration(self, verbose=True)
configure_compiler(self.compiler, config)
if self.compiler.compiler_type == "unix":
try: del self.compiler.shared_lib_extension
except: pass
#
self.build_libraries(self.libraries)
self.build_libraries(self.libraries_a)
self.build_libraries(self.libraries_so)
def build_libraries (self, libraries):
for lib in libraries:
# old-style
if not isinstance(lib, Library):
cmd_build_clib.build_clib.build_libraries(self, [lib])
continue
# new-style
try:
self.build_library(lib)
except (DistutilsError, CCompilerError):
if not lib.optional: raise
e = sys.exc_info()[1]
self.warn('%s' % e)
self.warn('building optional library "%s" failed' % lib.name)
def config_library (self, lib):
if lib.configure:
config_cmd = self.get_finalized_command('config')
config_cmd.compiler = self.compiler # fix compiler
return lib.configure(lib, config_cmd)
def build_library(self, lib):
sources = [convert_path(p) for p in lib.sources]
depends = [convert_path(p) for p in lib.depends]
depends = sources + depends
if lib.kind == "static":
build_dir = self.build_clib_a
else:
build_dir = self.build_clib_so
lib_fullpath = self.get_lib_fullpath(lib, build_dir)
if not (self.force or
dep_util.newer_group(depends, lib_fullpath, 'newer')):
log.debug("skipping '%s' %s library (up-to-date)",
lib.name, lib.kind)
return
ok = self.config_library(lib)
log.info("building '%s' %s library", lib.name, lib.kind)
# First, compile the source code to object files in the library
# directory. (This should probably change to putting object
# files in a temporary build directory.)
macros = lib.define_macros[:]
for undef in lib.undef_macros:
macros.append((undef,))
objects = self.compiler.compile(
sources,
depends=lib.depends,
output_dir=self.build_temp,
macros=macros,
include_dirs=lib.include_dirs,
extra_preargs=None,
extra_postargs=lib.extra_compile_args,
debug=self.debug,
)
if lib.kind == "static":
# Now "link" the object files together
# into a static library.
self.compiler.create_static_lib(
objects,
lib.name,
output_dir=os.path.dirname(lib_fullpath),
debug=self.debug,
)
else:
extra_objects = lib.extra_objects[:]
export_symbols = lib.export_symbols[:]
extra_link_args = lib.extra_link_args[:]
extra_preargs = None
objects.extend(extra_objects)
if (self.compiler.compiler_type == 'msvc' and
export_symbols is not None):
output_dir = os.path.dirname(lib_fullpath)
implib_filename = self.compiler.library_filename(lib.name)
implib_file = os.path.join(output_dir, lib_fullpath)
extra_link_args.append ('/IMPLIB:' + implib_file)
# Detect target language, if not provided
src_language = self.compiler.detect_language(sources)
language = (lib.language or src_language)
# Now "link" the object files together
# into a shared library.
if sys.platform == 'darwin':
linker_so = self.compiler.linker_so[:]
while '-bundle' in self.compiler.linker_so:
pos = self.compiler.linker_so.index('-bundle')
self.compiler.linker_so[pos] = '-shared'
install_name = os.path.basename(lib_fullpath)
extra_preargs = ['-install_name', install_name]
if sys.platform.startswith('linux'):
extra_preargs = ['-Wl,--no-as-needed']
self.compiler.link(
self.compiler.SHARED_LIBRARY,
objects, lib_fullpath,
#
libraries=lib.libraries,
library_dirs=lib.library_dirs,
runtime_library_dirs=lib.runtime_library_dirs,
export_symbols=export_symbols,
extra_preargs=extra_preargs,
extra_postargs=extra_link_args,
debug=self.debug,
target_lang=language,
)
if sys.platform == 'darwin':
self.compiler.linker_so = linker_so
return
def get_lib_fullpath (self, lib, build_dir):
package_dir = (lib.package or '').split('.')
dest_dir = convert_path(lib.dest_dir or '')
output_dir = os.path.join(build_dir, *package_dir+[dest_dir])
lib_type = lib.kind
if sys.platform != 'darwin':
if lib_type == 'dylib':
lib_type = 'shared'
lib_fullpath = self.compiler.library_filename(
lib.name, lib_type=lib_type, output_dir=output_dir)
return lib_fullpath
def get_source_files (self):
filenames = cmd_build_clib.build_clib.get_source_files(self)
self.check_library_list(self.libraries)
self.check_library_list(self.libraries_a)
self.check_library_list(self.libraries_so)
for (lib_name, build_info) in self.libraries:
filenames.extend(build_info.get(sources, []))
for lib in self.libraries_so + self.libraries_a:
filenames.extend(lib.sources)
return filenames
def get_outputs (self):
outputs = []
for lib in self.libraries_a:
lib_fullpath = self.get_lib_fullpath(lib, self.build_clib_a)
outputs.append(lib_fullpath)
for lib in self.libraries_so:
lib_fullpath = self.get_lib_fullpath(lib, self.build_clib_so)
outputs.append(lib_fullpath)
return outputs
# Command class to build extension modules
class build_ext(cmd_build_ext.build_ext):
user_options = cmd_build_ext.build_ext.user_options + cmd_mpi_opts
def initialize_options(self):
cmd_build_ext.build_ext.initialize_options(self)
cmd_initialize_mpi_options(self)
def finalize_options(self):
cmd_build_ext.build_ext.finalize_options(self)
build_cmd = self.get_finalized_command('build')
if isinstance(build_cmd, build):
cmd_set_undefined_mpi_options(self, 'build')
#
if ((sys.platform.startswith('linux') or
sys.platform.startswith('gnu') or
sys.platform.startswith('sunos')) and
sysconfig.get_config_var('Py_ENABLE_SHARED')):
# Remove <prefix>/lib[64]/pythonX.Y/config
libdir = os.path.dirname(sysconfig.get_makefile_filename())
if libdir in self.library_dirs:
self.library_dirs.remove(bad_libdir)
# Add <prefix>/lib[64]
libdir = sysconfig.get_config_var("LIBDIR")
if libdir not in self.library_dirs:
self.library_dirs.append(libdir)
if libdir not in self.rpath:
self.rpath.append(libdir)
# Special-case
if sys.exec_prefix == '/usr':
self.library_dirs.remove(libdir)
self.rpath.remove(libdir)
def run (self):
if self.distribution.has_c_libraries():
build_clib = self.get_finalized_command('build_clib')
if build_clib.libraries:
build_clib.run()
cmd_build_ext.build_ext.run(self)
def build_extensions(self):
from copy import deepcopy
# First, sanity-check the 'extensions' list
self.check_extensions_list(self.extensions)
# customize compiler
self.compiler_sys = deepcopy(self.compiler)
customize_compiler(self.compiler_sys)
# parse configuration file and configure compiler
self.compiler_mpi = self.compiler
self.config = configuration(self, verbose=True)
configure_compiler(self.compiler, self.config)
# extra configuration, check for all MPI symbols
if self.configure:
log.info('testing for missing MPI symbols')
config_cmd = self.get_finalized_command('config')
config_cmd.compiler = self.compiler # fix compiler
configure = ConfigureMPI(config_cmd)
results = configure.run()
configure.dump(results)
#
macro = 'HAVE_CONFIG_H'
log.info("defining preprocessor macro '%s'" % macro)
self.compiler.define_macro(macro, 1)
# build extensions
for ext in self.extensions:
try:
self.build_extension(ext)
except (DistutilsError, CCompilerError):
if not ext.optional: raise
e = sys.exc_info()[1]
self.warn('%s' % e)
exe = isinstance(ext, Executable)
knd = 'executable' if exe else 'extension'
self.warn('building optional %s "%s" failed' % (knd, ext.name))
def config_extension (self, ext):
configure = getattr(ext, 'configure', None)
if configure:
config_cmd = self.get_finalized_command('config')
config_cmd.compiler = self.compiler # fix compiler
configure(ext, config_cmd)
def build_extension (self, ext):
fullname = self.get_ext_fullname(ext.name)
filename = os.path.join(
self.build_lib, self.get_ext_filename(fullname))
depends = ext.sources + ext.depends
if not (self.force or
dep_util.newer_group(depends, filename, 'newer')):
log.debug("skipping '%s' extension (up-to-date)", ext.name)
return
#
# XXX -- this is a Vile HACK!
self.compiler = self.compiler_mpi
if ext.name == 'mpi4py.dl':
self.compiler = self.compiler_sys
#
self.config_extension(ext)
cmd_build_ext.build_ext.build_extension(self, ext)
#
# XXX -- this is a Vile HACK!
if ext.name == 'mpi4py.MPI':
dest_dir = os.path.dirname(filename)
self.mkpath(dest_dir)
mpi_cfg = os.path.join(dest_dir, 'mpi.cfg')
log.info("writing %s" % mpi_cfg)
if not self.dry_run:
self.config.dump(filename=mpi_cfg)
def get_outputs(self):
outputs = cmd_build_ext.build_ext.get_outputs(self)
for ext in self.extensions:
# XXX -- this is a Vile HACK!
if ext.name == 'mpi4py.MPI':
fullname = self.get_ext_fullname(ext.name)
filename = os.path.join(
self.build_lib,
self.get_ext_filename(fullname))
dest_dir = os.path.dirname(filename)
mpi_cfg = os.path.join(dest_dir, 'mpi.cfg')
outputs.append(mpi_cfg)
return outputs
# Command class to build executables
class build_exe(build_ext):
description = "build binary executable components"
user_options = [
('build-exe=', None,
"build directory for executable components"),
] + build_ext.user_options
def initialize_options (self):
build_ext.initialize_options(self)
self.build_base = None
self.build_exe = None
def finalize_options (self):
build_ext.finalize_options(self)
self.configure = None
self.set_undefined_options('build',
('build_base','build_base'),
('build_lib', 'build_exe'))
self.executables = self.distribution.executables
# XXX This is a hack
self.extensions = self.distribution.executables
self.check_extensions_list = self.check_executables_list
self.build_extension = self.build_executable
self.get_ext_filename = self.get_exe_filename
self.build_lib = self.build_exe
def check_executables_list (self, executables):
ListType, TupleType = type([]), type(())
if type(executables) is not ListType:
raise DistutilsSetupError(
"'executables' option must be a list of Executable instances")
for exe in executables:
if not isinstance(exe, Executable):
raise DistutilsSetupError(
"'executables' items must be Executable instances")
if (exe.sources is None or
type(exe.sources) not in (ListType, TupleType)):
raise DistutilsSetupError(
("in 'executables' option (executable '%s'), " +
"'sources' must be present and must be " +
"a list of source filenames") % exe.name)
def get_exe_filename(self, exe_name):
exe_ext = sysconfig.get_config_var('EXE') or ''
return exe_name + exe_ext
def get_exe_fullpath(self, exe, build_dir=None):
build_dir = build_dir or self.build_exe
package_dir = (exe.package or '').split('.')
dest_dir = convert_path(exe.dest_dir or '')
output_dir = os.path.join(build_dir, *package_dir+[dest_dir])
exe_filename = self.get_exe_filename(exe.name)
return os.path.join(output_dir, exe_filename)
def config_executable (self, exe):
build_ext.config_extension(self, exe)
def build_executable (self, exe):
sources = list(exe.sources)
depends = list(exe.depends)
exe_fullpath = self.get_exe_fullpath(exe)
depends = sources + depends
if not (self.force or
dep_util.newer_group(depends, exe_fullpath, 'newer')):
log.debug("skipping '%s' executable (up-to-date)", exe.name)
return
self.config_executable(exe)
log.info("building '%s' executable", exe.name)
# Next, compile the source code to object files.
# XXX not honouring 'define_macros' or 'undef_macros' -- the
# CCompiler API needs to change to accommodate this, and I
# want to do one thing at a time!
macros = exe.define_macros[:]
for undef in exe.undef_macros:
macros.append((undef,))
# Two possible sources for extra compiler arguments:
# - 'extra_compile_args' in Extension object
# - CFLAGS environment variable (not particularly
# elegant, but people seem to expect it and I
# guess it's useful)
# The environment variable should take precedence, and
# any sensible compiler will give precedence to later
# command line args. Hence we combine them in order:
extra_args = exe.extra_compile_args[:]
objects = self.compiler.compile(
sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=exe.include_dirs,
debug=self.debug,
extra_postargs=extra_args,
depends=exe.depends)
self._built_objects = objects[:]
# Now link the object files together into a "shared object" --
# of course, first we have to figure out all the other things
# that go into the mix.
if exe.extra_objects:
objects.extend(exe.extra_objects)
extra_args = exe.extra_link_args[:]
# Get special linker flags for building a executable with
# bundled Python library, also fix location of needed
# python.exp file on AIX
ldflags = sysconfig.get_config_var('PY_LDFLAGS') or ''
linkshared = sysconfig.get_config_var('LINKFORSHARED') or ''
linkshared = linkshared.replace('-Xlinker ', '-Wl,')
if sys.platform == 'darwin': # fix wrong framework paths
fwkprefix = sysconfig.get_config_var('PYTHONFRAMEWORKPREFIX')
fwkdir = sysconfig.get_config_var('PYTHONFRAMEWORKDIR')
if fwkprefix and fwkdir and fwkdir != 'no-framework':
for flag in shlex.split(linkshared):
if flag.startswith(fwkdir):
fwkpath = os.path.join(fwkprefix, flag)
linkshared = linkshared.replace(flag, fwkpath)
if sys.platform.startswith('aix'):
python_lib = sysconfig.get_python_lib(standard_lib=1)
python_exp = os.path.join(python_lib, 'config', 'python.exp')
linkshared = linkshared.replace('Modules/python.exp', python_exp)
# Detect target language, if not provided
language = exe.language or self.compiler.detect_language(sources)
self.compiler.link(
self.compiler.EXECUTABLE,
objects, exe_fullpath,
output_dir=None,
libraries=self.get_libraries(exe),
library_dirs=exe.library_dirs,
runtime_library_dirs=exe.runtime_library_dirs,
extra_preargs=shlex.split(ldflags) + shlex.split(linkshared),
extra_postargs=extra_args,
debug=self.debug,
target_lang=language)
def get_outputs (self):
outputs = []
for exe in self.executables:
outputs.append(self.get_exe_fullpath(exe))
return outputs
class install(cmd_install.install):
def run(self):
cmd_install.install.run(self)
def has_lib (self):
return (cmd_install.install.has_lib(self) and
self.has_exe())
def has_exe (self):
return self.distribution.has_executables()
sub_commands = \
cmd_install.install.sub_commands[:] + \
[('install_exe', has_exe)]
# XXX disable install_exe subcommand !!!
del sub_commands[-1]
class install_lib(cmd_install_lib.install_lib):
def get_outputs(self):
outputs = cmd_install_lib.install_lib.get_outputs(self)
for (build_cmd, build_dir) in (('build_clib', 'build_lib'),
('build_exe', 'build_exe')):
outs = self._mutate_outputs(1, build_cmd, build_dir,
self.install_dir)
build_cmd = self.get_finalized_command(build_cmd)
build_files = build_cmd.get_outputs()
for out in outs:
if os.path.exists(out):
outputs.append(out)
return outputs
class install_data(cmd_install_data.install_data):
def finalize_options (self):
self.set_undefined_options('install',
('install_lib', 'install_dir'),
('root', 'root'),
('force', 'force'),
)
class install_exe(cmd_install_lib.install_lib):
description = "install binary executable components"
user_options = [
('install-dir=', 'd', "directory to install to"),
('build-dir=','b', "build directory (where to install from)"),
('force', 'f', "force installation (overwrite existing files)"),
('skip-build', None, "skip the build steps"),
]
boolean_options = ['force', 'skip-build']
negative_opt = { }
def initialize_options (self):
self.install_dir = None
self.build_dir = None
self.force = 0
self.skip_build = None
def finalize_options (self):
self.set_undefined_options('build_exe',
('build_exe', 'build_dir'))
self.set_undefined_options('install',
('force', 'force'),
('skip_build', 'skip_build'),
('install_scripts', 'install_dir'))
def run (self):
self.build()
self.install()
def build (self):
if not self.skip_build:
if self.distribution.has_executables():
self.run_command('build_exe')
def install (self):
self.outfiles = []
if self.distribution.has_executables():
build_exe = self.get_finalized_command('build_exe')
for exe in build_exe.executables:
exe_fullpath = build_exe.get_exe_fullpath(exe)
exe_filename = os.path.basename(exe_fullpath)
if exe_filename.startswith("python-") and os.name == 'posix':
install_name = exe_filename.replace(
"python-", "python%d.%d-" % sys.version_info[:2])
link = None
else:
install_name = exe_filename
link = None
source = exe_fullpath
target = os.path.join(self.install_dir, install_name)
self.mkpath(self.install_dir)
out, done = self.copy_file(source, target, link=link)
self.outfiles.append(out)
def get_outputs (self):
return self.outfiles
def get_inputs (self):
inputs = []
if self.distribution.has_executables():
build_exe = self.get_finalized_command('build_exe')
inputs.extend(build_exe.get_outputs())
return inputs
class clean(cmd_clean.clean):
description = "clean up temporary files from 'build' command"
user_options = \
cmd_clean.clean.user_options[:2] + [
('build-exe=', None,
"build directory for executable components "
"(default: 'build_exe.build-exe')"),
] + cmd_clean.clean.user_options[2:]
def initialize_options(self):
cmd_clean.clean.initialize_options(self)
self.build_exe = None
def finalize_options(self):
cmd_clean.clean.finalize_options(self)
self.set_undefined_options('build_exe',
('build_exe', 'build_exe'))
def run(self):
from distutils.dir_util import remove_tree
# remove the build/temp.<plat> directory
# (unless it's already gone)
if os.path.exists(self.build_temp):
remove_tree(self.build_temp, dry_run=self.dry_run)
else:
log.debug("'%s' does not exist -- can't clean it",
self.build_temp)
if self.all:
# remove build directories
for directory in (self.build_lib,
self.build_exe,
self.build_scripts,
self.bdist_base,
):
if os.path.exists(directory):
remove_tree(directory, dry_run=self.dry_run)
else:
log.debug("'%s' does not exist -- can't clean it",
directory)
# just for the heck of it, try to remove the base build directory:
# we might have emptied it right now, but if not we don't care
if not self.dry_run:
try:
os.rmdir(self.build_base)
log.info("removing '%s'", self.build_base)
except OSError:
pass
if self.all:
# remove the <package>.egg_info directory
try:
egg_info = self.get_finalized_command('egg_info').egg_info
if os.path.exists(egg_info):
remove_tree(egg_info, dry_run=self.dry_run)
else:
log.debug("'%s' does not exist -- can't clean it",
egg_info)
except DistutilsError:
pass
# -----------------------------------------------------------------------------
if setuptools:
try:
from setuptools.command import egg_info as mod_egg_info
_FileList = mod_egg_info.FileList
class FileList(_FileList):
def process_template_line(self, line):
level = log.set_threshold(log.ERROR)
try:
_FileList.process_template_line(self, line)
finally:
log.set_threshold(level)
mod_egg_info.FileList = FileList
except:
pass
# -----------------------------------------------------------------------------
| |
#
# The Python Imaging Library
# $Id$
#
# JPEG2000 file handling
#
# History:
# 2014-03-12 ajh Created
#
# Copyright (c) 2014 Coriolis Systems Limited
# Copyright (c) 2014 Alastair Houghton
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.1"
from PIL import Image, ImageFile, _binary
import struct
import os
import io
def _parse_codestream(fp):
"""Parse the JPEG 2000 codestream to extract the size and component
count from the SIZ marker segment, returning a PIL (size, mode) tuple."""
hdr = fp.read(2)
lsiz = struct.unpack('>H', hdr)[0]
siz = hdr + fp.read(lsiz - 2)
lsiz, rsiz, xsiz, ysiz, xosiz, yosiz, xtsiz, ytsiz, \
xtosiz, ytosiz, csiz \
= struct.unpack('>HHIIIIIIIIH', siz[:38])
ssiz = [None]*csiz
xrsiz = [None]*csiz
yrsiz = [None]*csiz
for i in range(csiz):
ssiz[i], xrsiz[i], yrsiz[i] \
= struct.unpack('>BBB', siz[36 + 3 * i:39 + 3 * i])
size = (xsiz - xosiz, ysiz - yosiz)
if csiz == 1:
mode = 'L'
elif csiz == 2:
mode = 'LA'
elif csiz == 3:
mode = 'RGB'
elif csiz == 4:
mode == 'RGBA'
else:
mode = None
return (size, mode)
def _parse_jp2_header(fp):
"""Parse the JP2 header box to extract size, component count and
color space information, returning a PIL (size, mode) tuple."""
# Find the JP2 header box
header = None
while True:
lbox, tbox = struct.unpack('>I4s', fp.read(8))
if lbox == 1:
lbox = struct.unpack('>Q', fp.read(8))[0]
hlen = 16
else:
hlen = 8
if tbox == b'jp2h':
header = fp.read(lbox - hlen)
break
else:
fp.seek(lbox - hlen, os.SEEK_CUR)
if header is None:
raise SyntaxError('could not find JP2 header')
size = None
mode = None
hio = io.BytesIO(header)
while True:
lbox, tbox = struct.unpack('>I4s', hio.read(8))
if lbox == 1:
lbox = struct.unpack('>Q', hio.read(8))[0]
hlen = 16
else:
hlen = 8
content = hio.read(lbox - hlen)
if tbox == b'ihdr':
height, width, nc, bpc, c, unkc, ipr \
= struct.unpack('>IIHBBBB', content)
size = (width, height)
if unkc:
if nc == 1:
mode = 'L'
elif nc == 2:
mode = 'LA'
elif nc == 3:
mode = 'RGB'
elif nc == 4:
mode = 'RGBA'
break
elif tbox == b'colr':
meth, prec, approx = struct.unpack('>BBB', content[:3])
if meth == 1:
cs = struct.unpack('>I', content[3:7])[0]
if cs == 16: # sRGB
if nc == 3:
mode = 'RGB'
elif nc == 4:
mode = 'RGBA'
break
elif cs == 17: # grayscale
if nc == 1:
mode = 'L'
elif nc == 2:
mode = 'LA'
break
elif cs == 18: # sYCC
if nc == 3:
mode = 'RGB'
elif nc == 4:
mode == 'RGBA'
break
return (size, mode)
##
# Image plugin for JPEG2000 images.
class Jpeg2KImageFile(ImageFile.ImageFile):
format = "JPEG2000"
format_description = "JPEG 2000 (ISO 15444)"
def _open(self):
sig = self.fp.read(4)
if sig == b'\xff\x4f\xff\x51':
self.codec = "j2k"
self.size, self.mode = _parse_codestream(self.fp)
else:
sig = sig + self.fp.read(8)
if sig == b'\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a':
self.codec = "jp2"
self.size, self.mode = _parse_jp2_header(self.fp)
else:
raise SyntaxError('not a JPEG 2000 file')
if self.size is None or self.mode is None:
raise SyntaxError('unable to determine size/mode')
self.reduce = 0
self.layers = 0
fd = -1
if hasattr(self.fp, "fileno"):
try:
fd = self.fp.fileno()
except:
fd = -1
self.tile = [('jpeg2k', (0, 0) + self.size, 0,
(self.codec, self.reduce, self.layers, fd))]
def load(self):
if self.reduce:
power = 1 << self.reduce
adjust = power >> 1
self.size = (int((self.size[0] + adjust) / power),
int((self.size[1] + adjust) / power))
if self.tile:
# Update the reduce and layers settings
t = self.tile[0]
t3 = (t[3][0], self.reduce, self.layers, t[3][3])
self.tile = [(t[0], (0, 0) + self.size, t[2], t3)]
ImageFile.ImageFile.load(self)
def _accept(prefix):
return (prefix[:4] == b'\xff\x4f\xff\x51'
or prefix[:12] == b'\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a')
# ------------------------------------------------------------
# Save support
def _save(im, fp, filename):
if filename.endswith('.j2k'):
kind = 'j2k'
else:
kind = 'jp2'
# Get the keyword arguments
info = im.encoderinfo
offset = info.get('offset', None)
tile_offset = info.get('tile_offset', None)
tile_size = info.get('tile_size', None)
quality_mode = info.get('quality_mode', 'rates')
quality_layers = info.get('quality_layers', None)
num_resolutions = info.get('num_resolutions', 0)
cblk_size = info.get('codeblock_size', None)
precinct_size = info.get('precinct_size', None)
irreversible = info.get('irreversible', False)
progression = info.get('progression', 'LRCP')
cinema_mode = info.get('cinema_mode', 'no')
fd = -1
if hasattr(fp, "fileno"):
try:
fd = fp.fileno()
except:
fd = -1
im.encoderconfig = (
offset,
tile_offset,
tile_size,
quality_mode,
quality_layers,
num_resolutions,
cblk_size,
precinct_size,
irreversible,
progression,
cinema_mode,
fd
)
ImageFile._save(im, fp, [('jpeg2k', (0, 0)+im.size, 0, kind)])
# ------------------------------------------------------------
# Registry stuff
Image.register_open('JPEG2000', Jpeg2KImageFile, _accept)
Image.register_save('JPEG2000', _save)
Image.register_extension('JPEG2000', '.jp2')
Image.register_extension('JPEG2000', '.j2k')
Image.register_extension('JPEG2000', '.jpc')
Image.register_extension('JPEG2000', '.jpf')
Image.register_extension('JPEG2000', '.jpx')
Image.register_extension('JPEG2000', '.j2c')
Image.register_mime('JPEG2000', 'image/jp2')
Image.register_mime('JPEG2000', 'image/jpx')
| |
# -*- coding: utf-8 -*-
from ...Qt import QtCore, QtGui
from ..Node import Node
from . import functions
from ... import functions as pgfn
from .common import *
import numpy as np
from ... import metaarray as metaarray
class Downsample(CtrlNode):
"""Downsample by averaging samples together."""
nodeName = 'Downsample'
uiTemplate = [
('n', 'intSpin', {'min': 1, 'max': 1000000})
]
def processData(self, data):
return functions.downsample(data, self.ctrls['n'].value(), axis=0)
class Subsample(CtrlNode):
"""Downsample by selecting every Nth sample."""
nodeName = 'Subsample'
uiTemplate = [
('n', 'intSpin', {'min': 1, 'max': 1000000})
]
def processData(self, data):
return data[::self.ctrls['n'].value()]
class Bessel(CtrlNode):
"""Bessel filter. Input data must have time values."""
nodeName = 'BesselFilter'
uiTemplate = [
('band', 'combo', {'values': ['lowpass', 'highpass'], 'index': 0}),
('cutoff', 'spin', {'value': 1000., 'step': 1, 'dec': True, 'range': [0.0, None], 'suffix': 'Hz', 'siPrefix': True}),
('order', 'intSpin', {'value': 4, 'min': 1, 'max': 16}),
('bidir', 'check', {'checked': True})
]
def processData(self, data):
s = self.stateGroup.state()
if s['band'] == 'lowpass':
mode = 'low'
else:
mode = 'high'
return functions.besselFilter(data, bidir=s['bidir'], btype=mode, cutoff=s['cutoff'], order=s['order'])
class Butterworth(CtrlNode):
"""Butterworth filter"""
nodeName = 'ButterworthFilter'
uiTemplate = [
('band', 'combo', {'values': ['lowpass', 'highpass'], 'index': 0}),
('wPass', 'spin', {'value': 1000., 'step': 1, 'dec': True, 'range': [0.0, None], 'suffix': 'Hz', 'siPrefix': True}),
('wStop', 'spin', {'value': 2000., 'step': 1, 'dec': True, 'range': [0.0, None], 'suffix': 'Hz', 'siPrefix': True}),
('gPass', 'spin', {'value': 2.0, 'step': 1, 'dec': True, 'range': [0.0, None], 'suffix': 'dB', 'siPrefix': True}),
('gStop', 'spin', {'value': 20.0, 'step': 1, 'dec': True, 'range': [0.0, None], 'suffix': 'dB', 'siPrefix': True}),
('bidir', 'check', {'checked': True})
]
def processData(self, data):
s = self.stateGroup.state()
if s['band'] == 'lowpass':
mode = 'low'
else:
mode = 'high'
ret = functions.butterworthFilter(data, bidir=s['bidir'], btype=mode, wPass=s['wPass'], wStop=s['wStop'], gPass=s['gPass'], gStop=s['gStop'])
return ret
class ButterworthNotch(CtrlNode):
"""Butterworth notch filter"""
nodeName = 'ButterworthNotchFilter'
uiTemplate = [
('low_wPass', 'spin', {'value': 1000., 'step': 1, 'dec': True, 'range': [0.0, None], 'suffix': 'Hz', 'siPrefix': True}),
('low_wStop', 'spin', {'value': 2000., 'step': 1, 'dec': True, 'range': [0.0, None], 'suffix': 'Hz', 'siPrefix': True}),
('low_gPass', 'spin', {'value': 2.0, 'step': 1, 'dec': True, 'range': [0.0, None], 'suffix': 'dB', 'siPrefix': True}),
('low_gStop', 'spin', {'value': 20.0, 'step': 1, 'dec': True, 'range': [0.0, None], 'suffix': 'dB', 'siPrefix': True}),
('high_wPass', 'spin', {'value': 3000., 'step': 1, 'dec': True, 'range': [0.0, None], 'suffix': 'Hz', 'siPrefix': True}),
('high_wStop', 'spin', {'value': 4000., 'step': 1, 'dec': True, 'range': [0.0, None], 'suffix': 'Hz', 'siPrefix': True}),
('high_gPass', 'spin', {'value': 2.0, 'step': 1, 'dec': True, 'range': [0.0, None], 'suffix': 'dB', 'siPrefix': True}),
('high_gStop', 'spin', {'value': 20.0, 'step': 1, 'dec': True, 'range': [0.0, None], 'suffix': 'dB', 'siPrefix': True}),
('bidir', 'check', {'checked': True})
]
def processData(self, data):
s = self.stateGroup.state()
low = functions.butterworthFilter(data, bidir=s['bidir'], btype='low', wPass=s['low_wPass'], wStop=s['low_wStop'], gPass=s['low_gPass'], gStop=s['low_gStop'])
high = functions.butterworthFilter(data, bidir=s['bidir'], btype='high', wPass=s['high_wPass'], wStop=s['high_wStop'], gPass=s['high_gPass'], gStop=s['high_gStop'])
return low + high
class Mean(CtrlNode):
"""Filters data by taking the mean of a sliding window"""
nodeName = 'MeanFilter'
uiTemplate = [
('n', 'intSpin', {'min': 1, 'max': 1000000})
]
@metaArrayWrapper
def processData(self, data):
n = self.ctrls['n'].value()
return functions.rollingSum(data, n) / n
class Median(CtrlNode):
"""Filters data by taking the median of a sliding window"""
nodeName = 'MedianFilter'
uiTemplate = [
('n', 'intSpin', {'min': 1, 'max': 1000000})
]
@metaArrayWrapper
def processData(self, data):
try:
import scipy.ndimage
except ImportError:
raise Exception("MedianFilter node requires the package scipy.ndimage.")
return scipy.ndimage.median_filter(data, self.ctrls['n'].value())
class Mode(CtrlNode):
"""Filters data by taking the mode (histogram-based) of a sliding window"""
nodeName = 'ModeFilter'
uiTemplate = [
('window', 'intSpin', {'value': 500, 'min': 1, 'max': 1000000}),
]
@metaArrayWrapper
def processData(self, data):
return functions.modeFilter(data, self.ctrls['window'].value())
class Denoise(CtrlNode):
"""Removes anomalous spikes from data, replacing with nearby values"""
nodeName = 'DenoiseFilter'
uiTemplate = [
('radius', 'intSpin', {'value': 2, 'min': 0, 'max': 1000000}),
('threshold', 'doubleSpin', {'value': 4.0, 'min': 0, 'max': 1000})
]
def processData(self, data):
#print "DENOISE"
s = self.stateGroup.state()
return functions.denoise(data, **s)
class Gaussian(CtrlNode):
"""Gaussian smoothing filter."""
nodeName = 'GaussianFilter'
uiTemplate = [
('sigma', 'doubleSpin', {'min': 0, 'max': 1000000})
]
@metaArrayWrapper
def processData(self, data):
try:
import scipy.ndimage
except ImportError:
raise Exception("GaussianFilter node requires the package scipy.ndimage.")
return pgfn.gaussianFilter(data, self.ctrls['sigma'].value())
class Derivative(CtrlNode):
"""Returns the pointwise derivative of the input"""
nodeName = 'DerivativeFilter'
def processData(self, data):
if hasattr(data, 'implements') and data.implements('MetaArray'):
info = data.infoCopy()
if 'values' in info[0]:
info[0]['values'] = info[0]['values'][:-1]
return metaarray.MetaArray(data[1:] - data[:-1], info=info)
else:
return data[1:] - data[:-1]
class Integral(CtrlNode):
"""Returns the pointwise integral of the input"""
nodeName = 'IntegralFilter'
@metaArrayWrapper
def processData(self, data):
data[1:] += data[:-1]
return data
class Detrend(CtrlNode):
"""Removes linear trend from the data"""
nodeName = 'DetrendFilter'
@metaArrayWrapper
def processData(self, data):
try:
from scipy.signal import detrend
except ImportError:
raise Exception("DetrendFilter node requires the package scipy.signal.")
return detrend(data)
class AdaptiveDetrend(CtrlNode):
"""Removes baseline from data, ignoring anomalous events"""
nodeName = 'AdaptiveDetrend'
uiTemplate = [
('threshold', 'doubleSpin', {'value': 3.0, 'min': 0, 'max': 1000000})
]
def processData(self, data):
return functions.adaptiveDetrend(data, threshold=self.ctrls['threshold'].value())
class HistogramDetrend(CtrlNode):
"""Removes baseline from data by computing mode (from histogram) of beginning and end of data."""
nodeName = 'HistogramDetrend'
uiTemplate = [
('windowSize', 'intSpin', {'value': 500, 'min': 10, 'max': 1000000, 'suffix': 'pts'}),
('numBins', 'intSpin', {'value': 50, 'min': 3, 'max': 1000000}),
('offsetOnly', 'check', {'checked': False}),
]
def processData(self, data):
s = self.stateGroup.state()
#ws = self.ctrls['windowSize'].value()
#bn = self.ctrls['numBins'].value()
#offset = self.ctrls['offsetOnly'].checked()
return functions.histogramDetrend(data, window=s['windowSize'], bins=s['numBins'], offsetOnly=s['offsetOnly'])
class RemovePeriodic(CtrlNode):
nodeName = 'RemovePeriodic'
uiTemplate = [
#('windowSize', 'intSpin', {'value': 500, 'min': 10, 'max': 1000000, 'suffix': 'pts'}),
#('numBins', 'intSpin', {'value': 50, 'min': 3, 'max': 1000000})
('f0', 'spin', {'value': 60, 'suffix': 'Hz', 'siPrefix': True, 'min': 0, 'max': None}),
('harmonics', 'intSpin', {'value': 30, 'min': 0}),
('samples', 'intSpin', {'value': 1, 'min': 1}),
]
def processData(self, data):
times = data.xvals('Time')
dt = times[1]-times[0]
data1 = data.asarray()
ft = np.fft.fft(data1)
## determine frequencies in fft data
df = 1.0 / (len(data1) * dt)
freqs = np.linspace(0.0, (len(ft)-1) * df, len(ft))
## flatten spikes at f0 and harmonics
f0 = self.ctrls['f0'].value()
for i in xrange(1, self.ctrls['harmonics'].value()+2):
f = f0 * i # target frequency
## determine index range to check for this frequency
ind1 = int(np.floor(f / df))
ind2 = int(np.ceil(f / df)) + (self.ctrls['samples'].value()-1)
if ind1 > len(ft)/2.:
break
mag = (abs(ft[ind1-1]) + abs(ft[ind2+1])) * 0.5
for j in range(ind1, ind2+1):
phase = np.angle(ft[j]) ## Must preserve the phase of each point, otherwise any transients in the trace might lead to large artifacts.
re = mag * np.cos(phase)
im = mag * np.sin(phase)
ft[j] = re + im*1j
ft[len(ft)-j] = re - im*1j
data2 = np.fft.ifft(ft).real
ma = metaarray.MetaArray(data2, info=data.infoCopy())
return ma
| |
# -*- coding: utf-8 -*-
"""
werkzeug.local
~~~~~~~~~~~~~~
This module implements context-local objects.
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
# Since each thread has its own greenlet we can just use those as identifiers
# for the context. If greenlets are not available we fall back to the
# current thread ident.
try:
from greenlet import getcurrent as get_ident
except ImportError: # noqa
try:
from thread import get_ident # noqa
except ImportError: # noqa
try:
from _thread import get_ident # noqa
except ImportError: # noqa
from dummy_thread import get_ident # noqa
def release_local(local):
"""Releases the contents of the local for the current context.
This makes it possible to use locals without a manager.
Example::
>>> loc = Local()
>>> loc.foo = 42
>>> release_local(loc)
>>> hasattr(loc, 'foo')
False
With this function one can release :class:`Local` objects as well
as :class:`StackLocal` objects. However it is not possible to
release data held by proxies that way, one always has to retain
a reference to the underlying local object in order to be able
to release it.
.. versionadded:: 0.6.1
"""
local.__release_local__()
class Local(object):
__slots__ = ('__storage__', '__ident_func__')
def __init__(self):
object.__setattr__(self, '__storage__', {})
object.__setattr__(self, '__ident_func__', get_ident)
def __iter__(self):
return iter(self.__storage__.items())
def __call__(self, proxy):
"""Create a proxy for a name."""
return LocalProxy(self, proxy)
def __release_local__(self):
self.__storage__.pop(self.__ident_func__(), None)
def __getattr__(self, name):
try:
return self.__storage__[self.__ident_func__()][name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
ident = self.__ident_func__()
storage = self.__storage__
try:
storage[ident][name] = value
except KeyError:
storage[ident] = {name: value}
def __delattr__(self, name):
try:
del self.__storage__[self.__ident_func__()][name]
except KeyError:
raise AttributeError(name)
class LocalStack(object):
"""This class works similar to a :class:`Local` but keeps a stack
of objects instead. This is best explained with an example::
>>> ls = LocalStack()
>>> ls.push(42)
>>> ls.top
42
>>> ls.push(23)
>>> ls.top
23
>>> ls.pop()
23
>>> ls.top
42
They can be force released by using a :class:`LocalManager` or with
the :func:`release_local` function but the correct way is to pop the
item from the stack after using. When the stack is empty it will
no longer be bound to the current context (and as such released).
By calling the stack without arguments it returns a proxy that resolves to
the topmost item on the stack.
.. versionadded:: 0.6.1
"""
def __init__(self):
self._local = Local()
def __release_local__(self):
self._local.__release_local__()
def _get__ident_func__(self):
return self._local.__ident_func__
def _set__ident_func__(self, value): # noqa
object.__setattr__(self._local, '__ident_func__', value)
__ident_func__ = property(_get__ident_func__, _set__ident_func__)
del _get__ident_func__, _set__ident_func__
def __call__(self):
def _lookup():
rv = self.top
if rv is None:
raise RuntimeError('object unbound')
return rv
return LocalProxy(_lookup)
def push(self, obj):
"""Pushes a new item to the stack"""
rv = getattr(self._local, 'stack', None)
if rv is None:
self._local.stack = rv = []
rv.append(obj)
return rv
def pop(self):
"""Removes the topmost item from the stack, will return the
old value or `None` if the stack was already empty.
"""
stack = getattr(self._local, 'stack', None)
if stack is None:
return None
elif len(stack) == 1:
release_local(self._local)
return stack[-1]
else:
return stack.pop()
@property
def top(self):
"""The topmost item on the stack. If the stack is empty,
`None` is returned.
"""
try:
return self._local.stack[-1]
except (AttributeError, IndexError):
return None
def __len__(self):
stack = getattr(self._local, 'stack', None)
if stack is None:
return 0
return len(stack)
class LocalManager(object):
"""Local objects cannot manage themselves. For that you need a local
manager. You can pass a local manager multiple locals or add them later
by appending them to `manager.locals`. Everytime the manager cleans up
it, will clean up all the data left in the locals for this context.
The `ident_func` parameter can be added to override the default ident
function for the wrapped locals.
.. versionchanged:: 0.6.1
Instead of a manager the :func:`release_local` function can be used
as well.
.. versionchanged:: 0.7
`ident_func` was added.
"""
def __init__(self, locals=None, ident_func=None):
if locals is None:
self.locals = []
elif isinstance(locals, Local):
self.locals = [locals]
else:
self.locals = list(locals)
if ident_func is not None:
self.ident_func = ident_func
for local in self.locals:
object.__setattr__(local, '__ident_func__', ident_func)
else:
self.ident_func = get_ident
def get_ident(self):
"""Return the context identifier the local objects use internally for
this context. You cannot override this method to change the behavior
but use it to link other context local objects (such as SQLAlchemy's
scoped sessions) to the Werkzeug locals.
.. versionchanged:: 0.7
You can pass a different ident function to the local manager that
will then be propagated to all the locals passed to the
constructor.
"""
return self.ident_func()
def cleanup(self):
"""Manually clean up the data in the locals for this context. Call
this at the end of the request or use `make_middleware()`.
"""
for local in self.locals:
release_local(local)
def __repr__(self):
return '<%s storages: %d>' % (
self.__class__.__name__,
len(self.locals)
)
class LocalProxy(object):
"""Acts as a proxy for a werkzeug local. Forwards all operations to
a proxied object. The only operations not supported for forwarding
are right handed operands and any kind of assignment.
Example usage::
from werkzeug.local import Local
l = Local()
# these are proxies
request = l('request')
user = l('user')
from werkzeug.local import LocalStack
_response_local = LocalStack()
# this is a proxy
response = _response_local()
Whenever something is bound to l.user / l.request the proxy objects
will forward all operations. If no object is bound a :exc:`RuntimeError`
will be raised.
To create proxies to :class:`Local` or :class:`LocalStack` objects,
call the object as shown above. If you want to have a proxy to an
object looked up by a function, you can (as of Werkzeug 0.6.1) pass
a function to the :class:`LocalProxy` constructor::
session = LocalProxy(lambda: get_current_request().session)
.. versionchanged:: 0.6.1
The class can be instanciated with a callable as well now.
"""
__slots__ = ('__local', '__dict__', '__name__')
def __init__(self, local, name=None):
object.__setattr__(self, '_LocalProxy__local', local)
object.__setattr__(self, '__name__', name)
def _get_current_object(self):
"""Return the current object. This is useful if you want the real
object behind the proxy at a time for performance reasons or because
you want to pass the object into a different context.
"""
if not hasattr(self.__local, '__release_local__'):
return self.__local()
try:
return getattr(self.__local, self.__name__)
except AttributeError:
raise RuntimeError('no object bound to %s' % self.__name__)
@property
def __dict__(self):
try:
return self._get_current_object().__dict__
except RuntimeError:
raise AttributeError('__dict__')
def __repr__(self):
try:
obj = self._get_current_object()
except RuntimeError:
return '<%s unbound>' % self.__class__.__name__
return repr(obj)
def __nonzero__(self):
try:
return bool(self._get_current_object())
except RuntimeError:
return False
def __unicode__(self):
try:
return unicode(self._get_current_object())
except RuntimeError:
return repr(self)
def __dir__(self):
try:
return dir(self._get_current_object())
except RuntimeError:
return []
def __getattr__(self, name):
if name == '__members__':
return dir(self._get_current_object())
return getattr(self._get_current_object(), name)
def __setitem__(self, key, value):
self._get_current_object()[key] = value
def __delitem__(self, key):
del self._get_current_object()[key]
def __setslice__(self, i, j, seq):
self._get_current_object()[i:j] = seq
def __delslice__(self, i, j):
del self._get_current_object()[i:j]
__setattr__ = lambda x, n, v: setattr(x._get_current_object(), n, v)
__delattr__ = lambda x, n: delattr(x._get_current_object(), n)
__str__ = lambda x: str(x._get_current_object())
__lt__ = lambda x, o: x._get_current_object() < o
__le__ = lambda x, o: x._get_current_object() <= o
__eq__ = lambda x, o: x._get_current_object() == o
__ne__ = lambda x, o: x._get_current_object() != o
__gt__ = lambda x, o: x._get_current_object() > o
__ge__ = lambda x, o: x._get_current_object() >= o
__cmp__ = lambda x, o: cmp(x._get_current_object(), o)
__hash__ = lambda x: hash(x._get_current_object())
__call__ = lambda x, *a, **kw: x._get_current_object()(*a, **kw)
__len__ = lambda x: len(x._get_current_object())
__getitem__ = lambda x, i: x._get_current_object()[i]
__iter__ = lambda x: iter(x._get_current_object())
__contains__ = lambda x, i: i in x._get_current_object()
__getslice__ = lambda x, i, j: x._get_current_object()[i:j]
__add__ = lambda x, o: x._get_current_object() + o
__sub__ = lambda x, o: x._get_current_object() - o
__mul__ = lambda x, o: x._get_current_object() * o
__floordiv__ = lambda x, o: x._get_current_object() // o
__mod__ = lambda x, o: x._get_current_object() % o
__divmod__ = lambda x, o: x._get_current_object().__divmod__(o)
__pow__ = lambda x, o: x._get_current_object() ** o
__lshift__ = lambda x, o: x._get_current_object() << o
__rshift__ = lambda x, o: x._get_current_object() >> o
__and__ = lambda x, o: x._get_current_object() & o
__xor__ = lambda x, o: x._get_current_object() ^ o
__or__ = lambda x, o: x._get_current_object() | o
__div__ = lambda x, o: x._get_current_object().__div__(o)
__truediv__ = lambda x, o: x._get_current_object().__truediv__(o)
__neg__ = lambda x: -(x._get_current_object())
__pos__ = lambda x: +(x._get_current_object())
__abs__ = lambda x: abs(x._get_current_object())
__invert__ = lambda x: ~(x._get_current_object())
__complex__ = lambda x: complex(x._get_current_object())
__int__ = lambda x: int(x._get_current_object())
__long__ = lambda x: long(x._get_current_object())
__float__ = lambda x: float(x._get_current_object())
__oct__ = lambda x: oct(x._get_current_object())
__hex__ = lambda x: hex(x._get_current_object())
__index__ = lambda x: x._get_current_object().__index__()
__coerce__ = lambda x, o: x._get_current_object().__coerce__(x, o)
__enter__ = lambda x: x._get_current_object().__enter__()
__exit__ = lambda x, *a, **kw: x._get_current_object().__exit__(*a, **kw)
| |
"""Configuration used when connecting to a device.
A configuration describes a device, e.g. it's name, IP address and credentials. It is
possible to manually create a configuration, but generally scanning for devices will
provide configurations for you.
"""
from copy import deepcopy
from ipaddress import IPv4Address
from typing import Dict, List, Mapping, Optional
from pyatv.const import PairingRequirement, Protocol
from pyatv.interface import BaseConfig, BaseService, DeviceInfo
from pyatv.support import deprecated
class AppleTV(BaseConfig):
"""Representation of a device configuration.
An instance of this class represents a single device. A device can have
several services depending on the protocols it supports, e.g. DMAP or
AirPlay.
"""
def __init__(
self,
address: IPv4Address,
name: str,
deep_sleep: bool = False,
properties: Optional[Mapping[str, Mapping[str, str]]] = None,
device_info: Optional[DeviceInfo] = None,
) -> None:
"""Initialize a new AppleTV."""
super().__init__(properties or {})
self._address = address
self._name = name
self._deep_sleep = deep_sleep
self._services: Dict[Protocol, BaseService] = {}
self._device_info = device_info or DeviceInfo({})
@property
def address(self) -> IPv4Address:
"""IP address of device."""
return self._address
@property
def name(self) -> str:
"""Name of device."""
return self._name
@property
def deep_sleep(self) -> bool:
"""If device is in deep sleep."""
return self._deep_sleep
def add_service(self, service: BaseService) -> None:
"""Add a new service.
If the service already exists, it will be merged.
"""
existing = self._services.get(service.protocol)
if existing is not None:
existing.merge(service)
else:
self._services[service.protocol] = service
def get_service(self, protocol: Protocol) -> Optional[BaseService]:
"""Look up a service based on protocol.
If a service with the specified protocol is not available, None is
returned.
"""
return self._services.get(protocol)
@property
def services(self) -> List[BaseService]:
"""Return all supported services."""
return list(self._services.values())
@property
def device_info(self) -> DeviceInfo:
"""Return general device information."""
return self._device_info
def __deepcopy__(self, memo) -> "BaseConfig":
"""Return deep-copy of instance."""
copy = AppleTV(
self._address,
self._name,
self._deep_sleep,
self._properties,
self._device_info,
)
for service in self.services:
copy.add_service(deepcopy(service))
return copy
class ManualService(BaseService):
"""Service used when manually creating and adding a service."""
def __init__(
self,
identifier: Optional[str],
protocol: Protocol,
port: int,
properties: Optional[Mapping[str, str]],
credentials: Optional[str] = None,
password: Optional[str] = None,
requires_password: bool = False,
pairing_requirement: PairingRequirement = PairingRequirement.Unsupported,
enabled: bool = True,
) -> None:
"""Initialize a new ManualService."""
super().__init__(
identifier, protocol, port, properties, credentials, password, enabled
)
self._requires_password = requires_password
self._pairing_requirement = pairing_requirement
@property
def requires_password(self) -> bool:
"""Return if a password is required to access service."""
return self._requires_password
@property
def pairing(self) -> PairingRequirement:
"""Return if pairing is required by service."""
return self._pairing_requirement
def __deepcopy__(self, memo) -> "BaseService":
"""Return deep-copy of instance."""
return ManualService(
self.identifier,
self.protocol,
self.port,
self.properties,
self.credentials,
self.password,
self.requires_password,
self.pairing,
self.enabled,
)
# pylint: disable=too-few-public-methods
class DmapService(ManualService):
"""Representation of a DMAP service.
**DEPRECATED: Use `pyatv.conf.ManualService` instead.**
"""
@deprecated
def __init__(
self,
identifier: Optional[str],
credentials: Optional[str],
port: int = 3689,
properties: Optional[Mapping[str, str]] = None,
) -> None:
"""Initialize a new DmapService."""
super().__init__(identifier, Protocol.DMAP, port, properties, credentials)
# pylint: disable=too-few-public-methods
class MrpService(ManualService):
"""Representation of a MediaRemote Protocol (MRP) service.
**DEPRECATED: Use `pyatv.conf.ManualService` instead.**
"""
@deprecated
def __init__(
self,
identifier: Optional[str],
port: int,
credentials: Optional[str] = None,
properties: Optional[Mapping[str, str]] = None,
) -> None:
"""Initialize a new MrpService."""
super().__init__(identifier, Protocol.MRP, port, properties, credentials)
# pylint: disable=too-few-public-methods
class AirPlayService(ManualService):
"""Representation of an AirPlay service.
**DEPRECATED: Use `pyatv.conf.ManualService` instead.**
"""
@deprecated
def __init__(
self,
identifier: Optional[str],
port: int = 7000,
credentials: Optional[str] = None,
properties: Optional[Mapping[str, str]] = None,
) -> None:
"""Initialize a new AirPlayService."""
super().__init__(identifier, Protocol.AirPlay, port, properties, credentials)
# pylint: disable=too-few-public-methods
class CompanionService(ManualService):
"""Representation of a Companion link service.
**DEPRECATED: Use `pyatv.conf.ManualService` instead.**
"""
@deprecated
def __init__(
self,
port: int,
credentials: Optional[str] = None,
properties: Optional[Mapping[str, str]] = None,
) -> None:
"""Initialize a new CompaniomService."""
super().__init__(None, Protocol.Companion, port, properties, credentials)
# pylint: disable=too-few-public-methods
class RaopService(ManualService):
"""Representation of an RAOP service.
**DEPRECATED: Use `pyatv.conf.ManualService` instead.**
"""
@deprecated
def __init__(
self,
identifier: Optional[str],
port: int = 7000,
credentials: Optional[str] = None,
password: Optional[str] = None,
properties: Optional[Mapping[str, str]] = None,
) -> None:
"""Initialize a new RaopService."""
super().__init__(
identifier, Protocol.RAOP, port, properties, credentials, password=password
)
| |
# -*- coding: utf-8 -*-
from __future__ import division
import os
import re
import importlib
import logging
from copy import deepcopy
from sentence import Sentence
from data_prep import DataPrep
from file_io import fileRead, fileWrite
__version__ = '1.1'
logger = logging.getLogger('DATAPOOL')
class DataPool():
"""
Data object that holds all sentences (dependency trees) and provides
interface for loading data from the disk and retrieving them using an
index.
Data are classified into sections when stored in the disk, but we
do not preserve such structural information, and all sentences
will be loaded and "flattened" to be held in a single list.
The instance maintains a current_index variable, which is used to
locate the last sentence object we have read. Calling get_next()
method will increase this by 1, and calling has_next() will test
this index against the total number. The value of the index is
persistent during get_next() and has_next() calls, and will only
be reset to initial value -1 when reset() is called (manually or
during init).
"""
def __init__(self,
fgen,
data_format,
data_regex = None,
data_path = None,
textString = None,
prep_path = 'data/prep/',
shards = 1,
sparkContext = None,
hadoop = False):
"""
Initialize the Data set
:param data_regex: the sections to be used.
A regular expression that indicates which sections to be used e.g.
(0[0-9])|(1[0-9])|(2[0-1])/.*tab
:type data_regex: str
:param data_path: the relative or absolute path to the 'penn-wsj-deps' folder
(including "penn-wsj-deps")
:type data_path: str
:param format_path: the file that describes the file format for the type of data
:type format_path: str
"""
if isinstance(fgen, basestring):
self.fgen = importlib.import_module('feature.' + fgen).FeatureGenerator
else:
self.fgen = fgen
if isinstance(data_format, basestring):
self.data_format = importlib.import_module('data.data_format.' + data_format).DataFormat(self.fgen)
else:
self.data_format = data_format
self.hadoop = hadoop
self.reset_all()
if textString is not None:
self.load_stringtext(textString)
if data_regex is not None:
self.load(data_path = data_path,
data_regex = data_regex,
shards = shards,
prep_path = prep_path,
sparkContext = sparkContext)
return
def load(self,
data_path,
data_regex,
shards,
prep_path,
sparkContext):
"""
For each section in the initializer, iterate through all files
under that section directory, and load the content of each
individual file into the class instance.
This method should be called after section regex has been initalized
and before any get_data method is called.
"""
logger.info("Loading data...")
self.dataPrep = DataPrep(dataURI = data_path,
dataRegex = data_regex,
shardNum = shards,
targetPath = prep_path,
sparkContext = sparkContext)
# Load data
if self.hadoop is True:
self.dataPrep.loadHadoop()
else:
self.dataPrep.loadLocal()
# Add data to data_list
# If using yarn mode, local data will not be loaded
if self.hadoop is False:
for dirName, subdirList, fileList in os.walk(self.dataPrep.localPath()):
for file_name in fileList:
file_path = "%s/%s" % (str(dirName), str(file_name))
self.data_list += self.data_format.get_data_from_file(file_path)
else:
aRdd = sparkContext.textFile(self.dataPrep.hadoopPath()).cache()
tmp = aRdd.collect()
tmpStr = ''.join(str(e) + "\n" for e in tmp)
self.load_stringtext(textString = tmpStr)
logger.info("Data loaded")
return
def load_stringtext(self, textString):
self.data_list += self.data_format.load_stringtext(textString)
return
def loadedPath(self):
if self.dataPrep:
if self.hadoop is True:
return self.dataPrep.hadoopPath()
else:
return self.dataPrep.localPath()
else:
raise RuntimeError("DATAPOOL [ERROR]: Data has not been loaded by DataPrep, cannot retrieve data path.")
return
def __add__(self, another_data_pool):
if another_data_pool is None:
return deepcopy(self)
# if self.fgen != another_data_pool.fgen:
# raise RuntimeError("DATAPOOL [ERROR]: Merging dataPools do not have the same fgen")
# if self.data_format != another_data_pool.data_format:
# raise RuntimeError("DATAPOOL [ERROR]: Merging dataPools do not have the same format")
newDataPool = deepcopy(self)
newDataPool.data_list = newDataPool.data_list + another_data_pool.data_list
newDataPool.reset_index()
return newDataPool
def export(self, fileURI, sparkContext=None):
self.data_format.export_to_file(self, fileURI, sparkContext)
return
def reset_all(self):
"""
Reset the index variables and the data list.
Restores the instance to a state when no sentence has been read
"""
self.reset_index()
self.data_list = []
return
def reset_index(self):
"""
Reset the index variable to the very beginning of
sentence list
"""
self.current_index = -1
def has_next_data(self):
"""
Returns True if there is still sentence not read. This call
does not advence data pointer. Call to get_next_data() will
do the job.
:return: False if we have reaches the end of data_list
True otherwise
"""
i = self.current_index + 1
if i >= 0 and i < len(self.data_list):
return True
else:
return False
def get_next_data(self):
"""
Return the next sentence object, which is previously read
from disk files.
This method does not perform index checking, so please make sure
the internal index is valid by calling has_next_data(), or an exception
will be raise (which would be definitely not what you want)
"""
if(self.has_next_data()):
self.current_index += 1
# Logging how many entries we have supplied
if self.current_index % 1000 == 0:
logger.debug("Data finishing %.2f%% ..." %
(100 * self.current_index / len(self.data_list), ))
return self.data_list[self.current_index]
raise IndexError("Run out of data while calling get_next_data()")
def get_sent_num(self):
return len(self.data_list)
| |
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2019 David Shah <dave@ds0.me>
# SPDX-License-Identifier: BSD-2-Clause
from litex.build.generic_platform import *
from litex.build.lattice import LatticePlatform
from litex.build.lattice.programmer import OpenOCDJTAGProgrammer
# IOs ----------------------------------------------------------------------------------------------
_io = [
# Clk / Rst
("clk100", 0, Pins("B29"), IOStandard("LVDS")), # [broken on rev1.0 (non diff pair)]
("clk12", 0, Pins("B3"), IOStandard("LVCMOS33")),
("clkref", 0, Pins("E17"), IOStandard("LVCMOS33")),
# Leds
("user_led", 0, Pins("C26"), IOStandard("LVCMOS33"), Misc("PULLMODE=NONE")),
("user_led", 1, Pins("D26"), IOStandard("LVCMOS33"), Misc("PULLMODE=NONE")),
("user_led", 2, Pins("A28"), IOStandard("LVCMOS33"), Misc("PULLMODE=NONE")),
("user_led", 3, Pins("A29"), IOStandard("LVCMOS33"), Misc("PULLMODE=NONE")),
("user_led", 4, Pins("A30"), IOStandard("LVCMOS33"), Misc("PULLMODE=NONE")),
("user_led", 5, Pins("AK29"), IOStandard("LVCMOS33"), Misc("PULLMODE=NONE")),
("user_led", 6, Pins("AH32"), IOStandard("LVCMOS33"), Misc("PULLMODE=NONE")),
("user_led", 7, Pins("AH30"), IOStandard("LVCMOS33"), Misc("PULLMODE=NONE")),
("user_led", 8, Pins("AH28"), IOStandard("LVCMOS33"), Misc("PULLMODE=NONE")),
("user_led", 9, Pins("AG30"), IOStandard("LVCMOS33"), Misc("PULLMODE=NONE")),
("user_led", 10, Pins("AG29"), IOStandard("LVCMOS33"), Misc("PULLMODE=NONE")),
("user_led", 11, Pins("AK30"), IOStandard("LVCMOS33"), Misc("PULLMODE=NONE")),
# Buttons
("user_btn", 0, Pins("Y32"), IOStandard("SSTL135_I")),
("user_btn", 1, Pins("W31"), IOStandard("SSTL135_I")),
("user_btn", 2, Pins("AD30"), IOStandard("SSTL135_I")),
("user_btn", 3, Pins("AD29"), IOStandard("SSTL135_I")),
# Switches
("user_dip", 0, Pins("AE31"), IOStandard("SSTL135_I")),
("user_dip", 1, Pins("AE32"), IOStandard("SSTL135_I")),
("user_dip", 2, Pins("AD32"), IOStandard("SSTL135_I")),
("user_dip", 3, Pins("AC32"), IOStandard("SSTL135_I")),
("user_dip", 4, Pins("AB32"), IOStandard("SSTL135_I")),
("user_dip", 5, Pins("AB31"), IOStandard("SSTL135_I")),
("user_dip", 6, Pins("AC31"), IOStandard("SSTL135_I")),
("user_dip", 7, Pins("AC30"), IOStandard("SSTL135_I")),
# Serial
("serial", 0,
Subsignal("rx", Pins("AM28"), IOStandard("LVCMOS33")),
Subsignal("tx", Pins("AL28"), IOStandard("LVCMOS33")),
),
# USB FIFO
("usb_fifo", 0,
Subsignal("dq", Pins("AM28 AL28 AM29 AK28 AK32 AM30 AJ32 AL30"), IOStandard("LVCMOS33")),
Subsignal("txe_n", Pins("AM31"), IOStandard("LVCMOS33")),
Subsignal("rxf_n", Pins("AJ31"), IOStandard("LVCMOS33")),
Subsignal("rd_n", Pins("AL32"), IOStandard("LVCMOS33")),
Subsignal("wr_n", Pins("AG28"), IOStandard("LVCMOS33")),
Subsignal("siwu_n", Pins("AJ28"), IOStandard("LVCMOS33")),
),
# DDR3 SDRAM
("dram_vtt_en", 0, Pins("E25"), IOStandard("LVCMOS33")),
("ddram", 0,
Subsignal("a", Pins(
"E30 F28 C32 E29 F32 D30 E32 D29",
"D32 C31 H32 F31 F29 B32 D31"),
IOStandard("SSTL135_I")),
Subsignal("ba", Pins("H31 H30 J30"), IOStandard("SSTL135_I")),
Subsignal("ras_n", Pins("K31"), IOStandard("SSTL135_I")),
Subsignal("cas_n", Pins("K30"), IOStandard("SSTL135_I")),
Subsignal("we_n", Pins("J32"), IOStandard("SSTL135_I")),
Subsignal("cs_n", Pins("K29"), IOStandard("SSTL135_I")),
Subsignal("dm", Pins("R26 L27 Y27 U31"), IOStandard("SSTL135_I")),
Subsignal("dq", Pins(
" V26 R27 V27 T26 U28 T27 T29 U26",
" P27 K28 P26 L26 K27 N26 L29 K26",
"AC27 W28 AC26 Y26 AB26 W29 AD26 Y28",
" T32 U32 P31 V32 P32 W32 N32 U30"),
IOStandard("SSTL135_I"),
Misc("TERMINATION=75")),
Subsignal("dqs_p", Pins("R29 N30 AB28 R32"), IOStandard("SSTL135D_I"),
Misc("TERMINATION=OFF"),
Misc("DIFFRESISTOR=100")),
Subsignal("clk_p", Pins("L31"), IOStandard("SSTL135D_I")),
Subsignal("cke", Pins("K32"), IOStandard("SSTL135_I")),
Subsignal("odt", Pins("J29"), IOStandard("SSTL135_I")),
Subsignal("reset_n", Pins("L32"), IOStandard("SSTL135_I")),
Misc("SLEWRATE=FAST"),
),
# RGMII Ethernet
("eth_clocks", 0,
Subsignal("tx", Pins("A15")),
Subsignal("rx", Pins("C17")),
Subsignal("ref", Pins("A17")),
IOStandard("LVCMOS33")
),
("eth", 0,
Subsignal("rst_n", Pins("D16")),
Subsignal("int_n", Pins("E16")),
Subsignal("mdio", Pins("F17")),
Subsignal("mdc", Pins("B17")),
Subsignal("rx_ctl", Pins("A16")),
Subsignal("rx_data", Pins("C16 B16 B14 F16")),
Subsignal("tx_ctl", Pins("D15")),
Subsignal("tx_data", Pins("A14 F15 C15 C14")),
IOStandard("LVCMOS33")
),
# I2C Clock Generator
("clkgen", 0,
Subsignal("sda", Pins("C22")),
Subsignal("scl", Pins("A22")),
Subsignal("sd_oe", Pins("A2")),
IOStandard("LVCMOS33")
),
# PCIe
("pcie_x2", 0,
Subsignal("clk_p", Pins("AM14")),
Subsignal("clk_n", Pins("AM15")),
Subsignal("rx_p", Pins("AM8 AK12")),
Subsignal("rx_n", Pins("AM9 AK13")),
Subsignal("tx_p", Pins("AK9 AM11")),
Subsignal("tx_n", Pins("AK10 AM12")),
Subsignal("perst", Pins("D22"), IOStandard("LVCMOS33")),
Subsignal("wake_n", Pins("A23"), IOStandard("LVCMOS33")),
),
# M2
("m2", 0,
Subsignal("clk_p", Pins("AM23")),
Subsignal("clk_n", Pins("AM24")),
Subsignal("rx_p", Pins("AM17 AK21")),
Subsignal("rx_n", Pins("AM18 AK22")),
Subsignal("tx_p", Pins("AK18 AM20")),
Subsignal("tx_n", Pins("AK19 AM21")),
Subsignal("clksel", Pins("N3"), IOStandard("LVCMOS33")),
Subsignal("sdio_clk", Pins("L4"), IOStandard("LVCMOS33")),
Subsignal("sdio_cmd", Pins("K4"), IOStandard("LVCMOS33")),
Subsignal("sdio_dq", Pins("L7 N4 L6 N6"), IOStandard("LVCMOS33")),
Subsignal("uart_tx", Pins("P6"), IOStandard("LVCMOS33")),
Subsignal("uart_rx", Pins("K5"), IOStandard("LVCMOS33")),
Subsignal("uart_rts_n", Pins("N7"), IOStandard("LVCMOS33")),
Subsignal("uart_cts_n", Pins("P7"), IOStandard("LVCMOS33"))
),
# SDCard
("spisdcard", 0,
Subsignal("clk", Pins("AK3")),
Subsignal("mosi", Pins("AH3"), Misc("PULLMODE=UP")),
Subsignal("cs_n", Pins("AK1"), Misc("PULLMODE=UP")),
Subsignal("miso", Pins("AG1"), Misc("PULLMODE=UP")),
Misc("SLEWRATE=FAST"),
IOStandard("LVCMOS33"),
),
("sdcard", 0,
Subsignal("clk", Pins("AK3")),
Subsignal("cmd", Pins("AH3"), Misc("PULLMODE=UP")),
Subsignal("data", Pins("AG1 AJ1 AH1 AK1"), Misc("PULLMODE=UP")),
Misc("SLEWRATE=FAST"),
IOStandard("LVCMOS33"),
),
# SPIFlash
("spiflash", 0,
Subsignal("clk", Pins("AM3")),
Subsignal("cs_n", Pins("AJ3")),
Subsignal("mosi", Pins("AK2")),
Subsignal("miso", Pins("AJ2")),
Subsignal("wp", Pins("AM2")),
Subsignal("hold", Pins("AL1")),
IOStandard("LVCMOS33")
),
("spiflash4x", 0,
Subsignal("clk", Pins("AM3")),
Subsignal("cs_n", Pins("AJ3")),
Subsignal("dq", Pins("AK2 AJ2 AM2 AL1")),
IOStandard("LVCMOS33")
),
# USB ULPI
("ulpi", 0,
Subsignal("clk", Pins("A18")),
Subsignal("stp", Pins("D18")),
Subsignal("dir", Pins("C18")),
Subsignal("nxt", Pins("F18")),
Subsignal("reset", Pins("D17")),
Subsignal("data", Pins("C20 C19 E19 D20 A20 B19 D19 A19")),
IOStandard("LVCMOS33")
),
# HDMI
("hdmi", 0,
Subsignal("r", Pins("F10 F9 D9 D8 C7 F8 E8 D11")),
Subsignal("g", Pins("B8 A7 C8 C9 F11 E11 E10 D10")),
Subsignal("b", Pins("C11 A11 B11 A10 B10 C10 A8 B7")),
Subsignal("de", Pins("F14")),
Subsignal("clk", Pins("A9")),
Subsignal("vsync_n", Pins("E14")),
Subsignal("hsync_n", Pins("F13")),
Subsignal("sda", Pins("D13")),
Subsignal("scl", Pins("C13")),
IOStandard("LVCMOS33")
),
]
# Connectors ---------------------------------------------------------------------------------------
_connectors = [
("pmoda", "F19 F20 B22 C23 D14 A13 E22 D23"),
("pmodb", "C25 A26 F23 F25 B25 D25 F22 F24"),
("pmodx", "A24 C24 D24 B23 D23 A25"),
("ext0", "T1 U1 AE5 AE4 AB5 AB6 Y5 W5 W2 Y1 AB7 AC6 AB3 AB4 AD3 AE3 AB1 AC1 AD1 AE1 AD6 AE6 AC7 AD7"),
("ext1", "P5 P4 R7 T7 R6 T6 U6 U7 R4 T5 T4 U5 U4 V4 V6 V7 P2 P3 R3 T3 N1 P1 U2 U3"),
("ext2", "K6 K7 J7 J6 H6 H5 F4 F5 F3 E3 C4 C3 C5 D5 D3 D2 H2 H3 J3 K3 B1 C2 F1 H1")
]
# PMODS --------------------------------------------------------------------------------------------
def raw_pmod_io(pmod):
return [(pmod, 0, Pins(" ".join([f"{pmod}:{i:d}" for i in range(8)])), IOStandard("LVCMOS33"))]
def sdcard_pmod_io(pmod):
return [
# SDCard PMOD:
# - https://store.digilentinc.com/pmod-microsd-microsd-card-slot/
# - https://github.com/antmicro/arty-expansion-board
("spisdcard", 0,
Subsignal("clk", Pins(f"{pmod}:3")),
Subsignal("mosi", Pins(f"{pmod}:1"), Misc("PULLMODE=UP")),
Subsignal("cs_n", Pins(f"{pmod}:0"), Misc("PULLMODE=UP")),
Subsignal("miso", Pins(f"{pmod}:2"), Misc("PULLMODE=UP")),
Misc("SLEWRATE=FAST"),
IOStandard("LVCMOS33"),
),
("sdcard", 0,
Subsignal("data", Pins(f"{pmod}:2 {pmod}:4 {pmod}:5 {pmod}:0"), Misc("PULLMODE=UP")),
Subsignal("cmd", Pins(f"{pmod}:1"), Misc("PULLMODE=UP")),
Subsignal("clk", Pins(f"{pmod}:3")),
Subsignal("cd", Pins(f"{pmod}:6")),
Misc("SLEWRATE=FAST"),
IOStandard("LVCMOS33"),
),
]
_sdcard_pmod_io = sdcard_pmod_io("pmoda") # SDCARD PMOD on PMODA.
# Platform -----------------------------------------------------------------------------------------
class Platform(LatticePlatform):
default_clk_name = "clk12"
default_clk_period = 1e9/12e6
def __init__(self, toolchain="trellis", **kwargs):
LatticePlatform.__init__(self, "LFE5UM5G-85F-8BG756C", _io, _connectors, toolchain=toolchain, **kwargs)
def create_programmer(self):
return OpenOCDJTAGProgrammer("openocd_trellisboard.cfg")
def do_finalize(self, fragment):
LatticePlatform.do_finalize(self, fragment)
self.add_period_constraint(self.lookup_request("clk100", loose=True), 1e9/100e6)
self.add_period_constraint(self.lookup_request("clk12", loose=True), 1e9/12e6)
self.add_period_constraint(self.lookup_request("eth_clocks:rx", loose=True), 1e9/125e6)
| |
from django.apps.registry import Apps
from django.contrib.contenttypes.fields import GenericForeignKey
from django.db import models
from django.db.migrations.exceptions import InvalidBasesError
from django.db.migrations.operations import (
AddField, AlterField, DeleteModel, RemoveField,
)
from django.db.migrations.state import (
ModelState, ProjectState, get_related_models_recursive,
)
from django.test import SimpleTestCase, override_settings
from django.test.utils import isolate_apps
from django.utils import six
from .models import (
FoodManager, FoodQuerySet, ModelWithCustomBase, NoMigrationFoodManager,
UnicodeModel,
)
class StateTests(SimpleTestCase):
"""
Tests state construction, rendering and modification by operations.
"""
def test_create(self):
"""
Tests making a ProjectState from an Apps
"""
new_apps = Apps(["migrations"])
class Author(models.Model):
name = models.CharField(max_length=255)
bio = models.TextField()
age = models.IntegerField(blank=True, null=True)
class Meta:
app_label = "migrations"
apps = new_apps
unique_together = ["name", "bio"]
index_together = ["bio", "age"]
class AuthorProxy(Author):
class Meta:
app_label = "migrations"
apps = new_apps
proxy = True
ordering = ["name"]
class SubAuthor(Author):
width = models.FloatField(null=True)
class Meta:
app_label = "migrations"
apps = new_apps
class Book(models.Model):
title = models.CharField(max_length=1000)
author = models.ForeignKey(Author, models.CASCADE)
contributors = models.ManyToManyField(Author)
class Meta:
app_label = "migrations"
apps = new_apps
verbose_name = "tome"
db_table = "test_tome"
indexes = [models.Index(fields=['title'])]
class Food(models.Model):
food_mgr = FoodManager('a', 'b')
food_qs = FoodQuerySet.as_manager()
food_no_mgr = NoMigrationFoodManager('x', 'y')
class Meta:
app_label = "migrations"
apps = new_apps
class FoodNoManagers(models.Model):
class Meta:
app_label = "migrations"
apps = new_apps
class FoodNoDefaultManager(models.Model):
food_no_mgr = NoMigrationFoodManager('x', 'y')
food_mgr = FoodManager('a', 'b')
food_qs = FoodQuerySet.as_manager()
class Meta:
app_label = "migrations"
apps = new_apps
mgr1 = FoodManager('a', 'b')
mgr2 = FoodManager('x', 'y', c=3, d=4)
class FoodOrderedManagers(models.Model):
# The managers on this model should be ordered by their creation
# counter and not by the order in model body
food_no_mgr = NoMigrationFoodManager('x', 'y')
food_mgr2 = mgr2
food_mgr1 = mgr1
class Meta:
app_label = "migrations"
apps = new_apps
project_state = ProjectState.from_apps(new_apps)
author_state = project_state.models['migrations', 'author']
author_proxy_state = project_state.models['migrations', 'authorproxy']
sub_author_state = project_state.models['migrations', 'subauthor']
book_state = project_state.models['migrations', 'book']
food_state = project_state.models['migrations', 'food']
food_no_managers_state = project_state.models['migrations', 'foodnomanagers']
food_no_default_manager_state = project_state.models['migrations', 'foodnodefaultmanager']
food_order_manager_state = project_state.models['migrations', 'foodorderedmanagers']
book_index = models.Index(fields=['title'])
book_index.set_name_with_model(Book)
self.assertEqual(author_state.app_label, "migrations")
self.assertEqual(author_state.name, "Author")
self.assertEqual([x for x, y in author_state.fields], ["id", "name", "bio", "age"])
self.assertEqual(author_state.fields[1][1].max_length, 255)
self.assertIs(author_state.fields[2][1].null, False)
self.assertIs(author_state.fields[3][1].null, True)
self.assertEqual(
author_state.options,
{"unique_together": {("name", "bio")}, "index_together": {("bio", "age")}, "indexes": []}
)
self.assertEqual(author_state.bases, (models.Model, ))
self.assertEqual(book_state.app_label, "migrations")
self.assertEqual(book_state.name, "Book")
self.assertEqual([x for x, y in book_state.fields], ["id", "title", "author", "contributors"])
self.assertEqual(book_state.fields[1][1].max_length, 1000)
self.assertIs(book_state.fields[2][1].null, False)
self.assertEqual(book_state.fields[3][1].__class__.__name__, "ManyToManyField")
self.assertEqual(
book_state.options,
{"verbose_name": "tome", "db_table": "test_tome", "indexes": [book_index]},
)
self.assertEqual(book_state.bases, (models.Model, ))
self.assertEqual(author_proxy_state.app_label, "migrations")
self.assertEqual(author_proxy_state.name, "AuthorProxy")
self.assertEqual(author_proxy_state.fields, [])
self.assertEqual(author_proxy_state.options, {"proxy": True, "ordering": ["name"], "indexes": []})
self.assertEqual(author_proxy_state.bases, ("migrations.author", ))
self.assertEqual(sub_author_state.app_label, "migrations")
self.assertEqual(sub_author_state.name, "SubAuthor")
self.assertEqual(len(sub_author_state.fields), 2)
self.assertEqual(sub_author_state.bases, ("migrations.author", ))
# The default manager is used in migrations
self.assertEqual([name for name, mgr in food_state.managers], ['food_mgr'])
self.assertTrue(all(isinstance(name, six.text_type) for name, mgr in food_state.managers))
self.assertEqual(food_state.managers[0][1].args, ('a', 'b', 1, 2))
# No explicit managers defined. Migrations will fall back to the default
self.assertEqual(food_no_managers_state.managers, [])
# food_mgr is used in migration but isn't the default mgr, hence add the
# default
self.assertEqual([name for name, mgr in food_no_default_manager_state.managers],
['food_no_mgr', 'food_mgr'])
self.assertTrue(all(isinstance(name, six.text_type) for name, mgr in food_no_default_manager_state.managers))
self.assertEqual(food_no_default_manager_state.managers[0][1].__class__, models.Manager)
self.assertIsInstance(food_no_default_manager_state.managers[1][1], FoodManager)
self.assertEqual([name for name, mgr in food_order_manager_state.managers],
['food_mgr1', 'food_mgr2'])
self.assertTrue(all(isinstance(name, six.text_type) for name, mgr in food_order_manager_state.managers))
self.assertEqual([mgr.args for name, mgr in food_order_manager_state.managers],
[('a', 'b', 1, 2), ('x', 'y', 3, 4)])
def test_custom_default_manager_added_to_the_model_state(self):
"""
When the default manager of the model is a custom manager,
it needs to be added to the model state.
"""
new_apps = Apps(['migrations'])
custom_manager = models.Manager()
class Author(models.Model):
objects = models.TextField()
authors = custom_manager
class Meta:
app_label = 'migrations'
apps = new_apps
project_state = ProjectState.from_apps(new_apps)
author_state = project_state.models['migrations', 'author']
self.assertEqual(author_state.managers, [('authors', custom_manager)])
def test_custom_default_manager_named_objects_with_false_migration_flag(self):
"""
When a manager is added with a name of 'objects' but it does not
have `use_in_migrations = True`, no migration should be added to the
model state (#26643).
"""
new_apps = Apps(['migrations'])
class Author(models.Model):
objects = models.Manager()
class Meta:
app_label = 'migrations'
apps = new_apps
project_state = ProjectState.from_apps(new_apps)
author_state = project_state.models['migrations', 'author']
self.assertEqual(author_state.managers, [])
def test_no_duplicate_managers(self):
"""
When a manager is added with `use_in_migrations = True` and a parent
model had a manager with the same name and `use_in_migrations = True`,
the parent's manager shouldn't appear in the model state (#26881).
"""
new_apps = Apps(['migrations'])
class PersonManager(models.Manager):
use_in_migrations = True
class Person(models.Model):
objects = PersonManager()
class Meta:
abstract = True
class BossManager(PersonManager):
use_in_migrations = True
class Boss(Person):
objects = BossManager()
class Meta:
app_label = 'migrations'
apps = new_apps
project_state = ProjectState.from_apps(new_apps)
boss_state = project_state.models['migrations', 'boss']
self.assertEqual(boss_state.managers, [('objects', Boss.objects)])
def test_custom_default_manager(self):
new_apps = Apps(['migrations'])
class Author(models.Model):
manager1 = models.Manager()
manager2 = models.Manager()
class Meta:
app_label = 'migrations'
apps = new_apps
default_manager_name = 'manager2'
project_state = ProjectState.from_apps(new_apps)
author_state = project_state.models['migrations', 'author']
self.assertEqual(author_state.options['default_manager_name'], 'manager2')
self.assertEqual(author_state.managers, [('manager2', Author.manager1)])
def test_custom_base_manager(self):
new_apps = Apps(['migrations'])
class Author(models.Model):
manager1 = models.Manager()
manager2 = models.Manager()
class Meta:
app_label = 'migrations'
apps = new_apps
base_manager_name = 'manager2'
class Author2(models.Model):
manager1 = models.Manager()
manager2 = models.Manager()
class Meta:
app_label = 'migrations'
apps = new_apps
base_manager_name = 'manager1'
project_state = ProjectState.from_apps(new_apps)
author_state = project_state.models['migrations', 'author']
self.assertEqual(author_state.options['base_manager_name'], 'manager2')
self.assertEqual(author_state.managers, [
('manager1', Author.manager1),
('manager2', Author.manager2),
])
author2_state = project_state.models['migrations', 'author2']
self.assertEqual(author2_state.options['base_manager_name'], 'manager1')
self.assertEqual(author2_state.managers, [
('manager1', Author2.manager1),
])
def test_apps_bulk_update(self):
"""
StateApps.bulk_update() should update apps.ready to False and reset
the value afterwards.
"""
project_state = ProjectState()
apps = project_state.apps
with apps.bulk_update():
self.assertFalse(apps.ready)
self.assertTrue(apps.ready)
with self.assertRaises(ValueError):
with apps.bulk_update():
self.assertFalse(apps.ready)
raise ValueError()
self.assertTrue(apps.ready)
def test_render(self):
"""
Tests rendering a ProjectState into an Apps.
"""
project_state = ProjectState()
project_state.add_model(ModelState(
app_label="migrations",
name="Tag",
fields=[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=100)),
("hidden", models.BooleanField()),
],
))
project_state.add_model(ModelState(
app_label="migrations",
name="SubTag",
fields=[
('tag_ptr', models.OneToOneField(
'migrations.Tag',
models.CASCADE,
auto_created=True,
primary_key=True,
to_field='id',
serialize=False,
)),
("awesome", models.BooleanField()),
],
bases=("migrations.Tag",),
))
base_mgr = models.Manager()
mgr1 = FoodManager('a', 'b')
mgr2 = FoodManager('x', 'y', c=3, d=4)
project_state.add_model(ModelState(
app_label="migrations",
name="Food",
fields=[
("id", models.AutoField(primary_key=True)),
],
managers=[
# The ordering we really want is objects, mgr1, mgr2
('default', base_mgr),
('food_mgr2', mgr2),
(b'food_mgr1', mgr1),
]
))
new_apps = project_state.apps
self.assertEqual(new_apps.get_model("migrations", "Tag")._meta.get_field("name").max_length, 100)
self.assertIs(new_apps.get_model("migrations", "Tag")._meta.get_field("hidden").null, False)
self.assertEqual(len(new_apps.get_model("migrations", "SubTag")._meta.local_fields), 2)
Food = new_apps.get_model("migrations", "Food")
self.assertEqual([mgr.name for mgr in Food._meta.managers],
['default', 'food_mgr1', 'food_mgr2'])
self.assertTrue(all(isinstance(mgr.name, six.text_type) for mgr in Food._meta.managers))
self.assertEqual([mgr.__class__ for mgr in Food._meta.managers],
[models.Manager, FoodManager, FoodManager])
def test_render_model_inheritance(self):
class Book(models.Model):
title = models.CharField(max_length=1000)
class Meta:
app_label = "migrations"
apps = Apps()
class Novel(Book):
class Meta:
app_label = "migrations"
apps = Apps()
# First, test rendering individually
apps = Apps(["migrations"])
# We shouldn't be able to render yet
ms = ModelState.from_model(Novel)
with self.assertRaises(InvalidBasesError):
ms.render(apps)
# Once the parent model is in the app registry, it should be fine
ModelState.from_model(Book).render(apps)
ModelState.from_model(Novel).render(apps)
def test_render_model_with_multiple_inheritance(self):
class Foo(models.Model):
class Meta:
app_label = "migrations"
apps = Apps()
class Bar(models.Model):
class Meta:
app_label = "migrations"
apps = Apps()
class FooBar(Foo, Bar):
class Meta:
app_label = "migrations"
apps = Apps()
class AbstractSubFooBar(FooBar):
class Meta:
abstract = True
apps = Apps()
class SubFooBar(AbstractSubFooBar):
class Meta:
app_label = "migrations"
apps = Apps()
apps = Apps(["migrations"])
# We shouldn't be able to render yet
ms = ModelState.from_model(FooBar)
with self.assertRaises(InvalidBasesError):
ms.render(apps)
# Once the parent models are in the app registry, it should be fine
ModelState.from_model(Foo).render(apps)
self.assertSequenceEqual(ModelState.from_model(Foo).bases, [models.Model])
ModelState.from_model(Bar).render(apps)
self.assertSequenceEqual(ModelState.from_model(Bar).bases, [models.Model])
ModelState.from_model(FooBar).render(apps)
self.assertSequenceEqual(ModelState.from_model(FooBar).bases, ['migrations.foo', 'migrations.bar'])
ModelState.from_model(SubFooBar).render(apps)
self.assertSequenceEqual(ModelState.from_model(SubFooBar).bases, ['migrations.foobar'])
def test_render_project_dependencies(self):
"""
The ProjectState render method correctly renders models
to account for inter-model base dependencies.
"""
new_apps = Apps()
class A(models.Model):
class Meta:
app_label = "migrations"
apps = new_apps
class B(A):
class Meta:
app_label = "migrations"
apps = new_apps
class C(B):
class Meta:
app_label = "migrations"
apps = new_apps
class D(A):
class Meta:
app_label = "migrations"
apps = new_apps
class E(B):
class Meta:
app_label = "migrations"
apps = new_apps
proxy = True
class F(D):
class Meta:
app_label = "migrations"
apps = new_apps
proxy = True
# Make a ProjectState and render it
project_state = ProjectState()
project_state.add_model(ModelState.from_model(A))
project_state.add_model(ModelState.from_model(B))
project_state.add_model(ModelState.from_model(C))
project_state.add_model(ModelState.from_model(D))
project_state.add_model(ModelState.from_model(E))
project_state.add_model(ModelState.from_model(F))
final_apps = project_state.apps
self.assertEqual(len(final_apps.get_models()), 6)
# Now make an invalid ProjectState and make sure it fails
project_state = ProjectState()
project_state.add_model(ModelState.from_model(A))
project_state.add_model(ModelState.from_model(B))
project_state.add_model(ModelState.from_model(C))
project_state.add_model(ModelState.from_model(F))
with self.assertRaises(InvalidBasesError):
project_state.apps
def test_render_unique_app_labels(self):
"""
The ProjectState render method doesn't raise an
ImproperlyConfigured exception about unique labels if two dotted app
names have the same last part.
"""
class A(models.Model):
class Meta:
app_label = "django.contrib.auth"
class B(models.Model):
class Meta:
app_label = "vendor.auth"
# Make a ProjectState and render it
project_state = ProjectState()
project_state.add_model(ModelState.from_model(A))
project_state.add_model(ModelState.from_model(B))
self.assertEqual(len(project_state.apps.get_models()), 2)
def test_add_relations(self):
"""
#24573 - Adding relations to existing models should reload the
referenced models too.
"""
new_apps = Apps()
class A(models.Model):
class Meta:
app_label = 'something'
apps = new_apps
class B(A):
class Meta:
app_label = 'something'
apps = new_apps
class C(models.Model):
class Meta:
app_label = 'something'
apps = new_apps
project_state = ProjectState()
project_state.add_model(ModelState.from_model(A))
project_state.add_model(ModelState.from_model(B))
project_state.add_model(ModelState.from_model(C))
project_state.apps # We need to work with rendered models
old_state = project_state.clone()
model_a_old = old_state.apps.get_model('something', 'A')
model_b_old = old_state.apps.get_model('something', 'B')
model_c_old = old_state.apps.get_model('something', 'C')
# The relations between the old models are correct
self.assertIs(model_a_old._meta.get_field('b').related_model, model_b_old)
self.assertIs(model_b_old._meta.get_field('a_ptr').related_model, model_a_old)
operation = AddField('c', 'to_a', models.OneToOneField(
'something.A',
models.CASCADE,
related_name='from_c',
))
operation.state_forwards('something', project_state)
model_a_new = project_state.apps.get_model('something', 'A')
model_b_new = project_state.apps.get_model('something', 'B')
model_c_new = project_state.apps.get_model('something', 'C')
# All models have changed
self.assertIsNot(model_a_old, model_a_new)
self.assertIsNot(model_b_old, model_b_new)
self.assertIsNot(model_c_old, model_c_new)
# The relations between the old models still hold
self.assertIs(model_a_old._meta.get_field('b').related_model, model_b_old)
self.assertIs(model_b_old._meta.get_field('a_ptr').related_model, model_a_old)
# The relations between the new models correct
self.assertIs(model_a_new._meta.get_field('b').related_model, model_b_new)
self.assertIs(model_b_new._meta.get_field('a_ptr').related_model, model_a_new)
self.assertIs(model_a_new._meta.get_field('from_c').related_model, model_c_new)
self.assertIs(model_c_new._meta.get_field('to_a').related_model, model_a_new)
def test_remove_relations(self):
"""
#24225 - Relations between models are updated while
remaining the relations and references for models of an old state.
"""
new_apps = Apps()
class A(models.Model):
class Meta:
app_label = "something"
apps = new_apps
class B(models.Model):
to_a = models.ForeignKey(A, models.CASCADE)
class Meta:
app_label = "something"
apps = new_apps
def get_model_a(state):
return [mod for mod in state.apps.get_models() if mod._meta.model_name == 'a'][0]
project_state = ProjectState()
project_state.add_model(ModelState.from_model(A))
project_state.add_model(ModelState.from_model(B))
self.assertEqual(len(get_model_a(project_state)._meta.related_objects), 1)
old_state = project_state.clone()
operation = RemoveField("b", "to_a")
operation.state_forwards("something", project_state)
# Model from old_state still has the relation
model_a_old = get_model_a(old_state)
model_a_new = get_model_a(project_state)
self.assertIsNot(model_a_old, model_a_new)
self.assertEqual(len(model_a_old._meta.related_objects), 1)
self.assertEqual(len(model_a_new._meta.related_objects), 0)
# Same test for deleted model
project_state = ProjectState()
project_state.add_model(ModelState.from_model(A))
project_state.add_model(ModelState.from_model(B))
old_state = project_state.clone()
operation = DeleteModel("b")
operation.state_forwards("something", project_state)
model_a_old = get_model_a(old_state)
model_a_new = get_model_a(project_state)
self.assertIsNot(model_a_old, model_a_new)
self.assertEqual(len(model_a_old._meta.related_objects), 1)
self.assertEqual(len(model_a_new._meta.related_objects), 0)
def test_self_relation(self):
"""
#24513 - Modifying an object pointing to itself would cause it to be
rendered twice and thus breaking its related M2M through objects.
"""
class A(models.Model):
to_a = models.ManyToManyField('something.A', symmetrical=False)
class Meta:
app_label = "something"
def get_model_a(state):
return [mod for mod in state.apps.get_models() if mod._meta.model_name == 'a'][0]
project_state = ProjectState()
project_state.add_model((ModelState.from_model(A)))
self.assertEqual(len(get_model_a(project_state)._meta.related_objects), 1)
old_state = project_state.clone()
operation = AlterField(
model_name="a",
name="to_a",
field=models.ManyToManyField("something.A", symmetrical=False, blank=True)
)
# At this point the model would be rendered twice causing its related
# M2M through objects to point to an old copy and thus breaking their
# attribute lookup.
operation.state_forwards("something", project_state)
model_a_old = get_model_a(old_state)
model_a_new = get_model_a(project_state)
self.assertIsNot(model_a_old, model_a_new)
# The old model's _meta is still consistent
field_to_a_old = model_a_old._meta.get_field("to_a")
self.assertEqual(field_to_a_old.m2m_field_name(), "from_a")
self.assertEqual(field_to_a_old.m2m_reverse_field_name(), "to_a")
self.assertIs(field_to_a_old.related_model, model_a_old)
self.assertIs(field_to_a_old.remote_field.through._meta.get_field('to_a').related_model, model_a_old)
self.assertIs(field_to_a_old.remote_field.through._meta.get_field('from_a').related_model, model_a_old)
# The new model's _meta is still consistent
field_to_a_new = model_a_new._meta.get_field("to_a")
self.assertEqual(field_to_a_new.m2m_field_name(), "from_a")
self.assertEqual(field_to_a_new.m2m_reverse_field_name(), "to_a")
self.assertIs(field_to_a_new.related_model, model_a_new)
self.assertIs(field_to_a_new.remote_field.through._meta.get_field('to_a').related_model, model_a_new)
self.assertIs(field_to_a_new.remote_field.through._meta.get_field('from_a').related_model, model_a_new)
def test_equality(self):
"""
== and != are implemented correctly.
"""
# Test two things that should be equal
project_state = ProjectState()
project_state.add_model(ModelState(
"migrations",
"Tag",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=100)),
("hidden", models.BooleanField()),
],
{},
None,
))
project_state.apps # Fill the apps cached property
other_state = project_state.clone()
self.assertEqual(project_state, project_state)
self.assertEqual(project_state, other_state)
self.assertIs(project_state != project_state, False)
self.assertIs(project_state != other_state, False)
self.assertNotEqual(project_state.apps, other_state.apps)
# Make a very small change (max_len 99) and see if that affects it
project_state = ProjectState()
project_state.add_model(ModelState(
"migrations",
"Tag",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=99)),
("hidden", models.BooleanField()),
],
{},
None,
))
self.assertNotEqual(project_state, other_state)
self.assertIs(project_state == other_state, False)
def test_dangling_references_throw_error(self):
new_apps = Apps()
class Author(models.Model):
name = models.TextField()
class Meta:
app_label = "migrations"
apps = new_apps
class Publisher(models.Model):
name = models.TextField()
class Meta:
app_label = "migrations"
apps = new_apps
class Book(models.Model):
author = models.ForeignKey(Author, models.CASCADE)
publisher = models.ForeignKey(Publisher, models.CASCADE)
class Meta:
app_label = "migrations"
apps = new_apps
class Magazine(models.Model):
authors = models.ManyToManyField(Author)
class Meta:
app_label = "migrations"
apps = new_apps
# Make a valid ProjectState and render it
project_state = ProjectState()
project_state.add_model(ModelState.from_model(Author))
project_state.add_model(ModelState.from_model(Publisher))
project_state.add_model(ModelState.from_model(Book))
project_state.add_model(ModelState.from_model(Magazine))
self.assertEqual(len(project_state.apps.get_models()), 4)
# now make an invalid one with a ForeignKey
project_state = ProjectState()
project_state.add_model(ModelState.from_model(Book))
msg = (
"The field migrations.Book.author was declared with a lazy reference "
"to 'migrations.author', but app 'migrations' doesn't provide model 'author'.\n"
"The field migrations.Book.publisher was declared with a lazy reference "
"to 'migrations.publisher', but app 'migrations' doesn't provide model 'publisher'."
)
with self.assertRaisesMessage(ValueError, msg):
project_state.apps
# And another with ManyToManyField.
project_state = ProjectState()
project_state.add_model(ModelState.from_model(Magazine))
msg = (
"The field migrations.Magazine.authors was declared with a lazy reference "
"to 'migrations.author\', but app 'migrations' doesn't provide model 'author'.\n"
"The field migrations.Magazine_authors.author was declared with a lazy reference "
"to \'migrations.author\', but app 'migrations' doesn't provide model 'author'."
)
with self.assertRaisesMessage(ValueError, msg):
project_state.apps
# And now with multiple models and multiple fields.
project_state.add_model(ModelState.from_model(Book))
msg = (
"The field migrations.Book.author was declared with a lazy reference "
"to 'migrations.author', but app 'migrations' doesn't provide model 'author'.\n"
"The field migrations.Book.publisher was declared with a lazy reference "
"to 'migrations.publisher', but app 'migrations' doesn't provide model 'publisher'.\n"
"The field migrations.Magazine.authors was declared with a lazy reference "
"to 'migrations.author', but app 'migrations' doesn't provide model 'author'.\n"
"The field migrations.Magazine_authors.author was declared with a lazy reference "
"to 'migrations.author', but app 'migrations' doesn't provide model 'author'."
)
with self.assertRaisesMessage(ValueError, msg):
project_state.apps
def test_real_apps(self):
"""
Including real apps can resolve dangling FK errors.
This test relies on the fact that contenttypes is always loaded.
"""
new_apps = Apps()
class TestModel(models.Model):
ct = models.ForeignKey("contenttypes.ContentType", models.CASCADE)
class Meta:
app_label = "migrations"
apps = new_apps
# If we just stick it into an empty state it should fail
project_state = ProjectState()
project_state.add_model(ModelState.from_model(TestModel))
with self.assertRaises(ValueError):
project_state.apps
# If we include the real app it should succeed
project_state = ProjectState(real_apps=["contenttypes"])
project_state.add_model(ModelState.from_model(TestModel))
rendered_state = project_state.apps
self.assertEqual(
len([x for x in rendered_state.get_models() if x._meta.app_label == "migrations"]),
1,
)
def test_ignore_order_wrt(self):
"""
Makes sure ProjectState doesn't include OrderWrt fields when
making from existing models.
"""
new_apps = Apps()
class Author(models.Model):
name = models.TextField()
class Meta:
app_label = "migrations"
apps = new_apps
class Book(models.Model):
author = models.ForeignKey(Author, models.CASCADE)
class Meta:
app_label = "migrations"
apps = new_apps
order_with_respect_to = "author"
# Make a valid ProjectState and render it
project_state = ProjectState()
project_state.add_model(ModelState.from_model(Author))
project_state.add_model(ModelState.from_model(Book))
self.assertEqual(
[name for name, field in project_state.models["migrations", "book"].fields],
["id", "author"],
)
def test_manager_refer_correct_model_version(self):
"""
#24147 - Managers refer to the correct version of a
historical model
"""
project_state = ProjectState()
project_state.add_model(ModelState(
app_label="migrations",
name="Tag",
fields=[
("id", models.AutoField(primary_key=True)),
("hidden", models.BooleanField()),
],
managers=[
('food_mgr', FoodManager('a', 'b')),
('food_qs', FoodQuerySet.as_manager()),
]
))
old_model = project_state.apps.get_model('migrations', 'tag')
new_state = project_state.clone()
operation = RemoveField("tag", "hidden")
operation.state_forwards("migrations", new_state)
new_model = new_state.apps.get_model('migrations', 'tag')
self.assertIsNot(old_model, new_model)
self.assertIs(old_model, old_model.food_mgr.model)
self.assertIs(old_model, old_model.food_qs.model)
self.assertIs(new_model, new_model.food_mgr.model)
self.assertIs(new_model, new_model.food_qs.model)
self.assertIsNot(old_model.food_mgr, new_model.food_mgr)
self.assertIsNot(old_model.food_qs, new_model.food_qs)
self.assertIsNot(old_model.food_mgr.model, new_model.food_mgr.model)
self.assertIsNot(old_model.food_qs.model, new_model.food_qs.model)
def test_choices_iterator(self):
"""
#24483 - ProjectState.from_apps should not destructively consume
Field.choices iterators.
"""
new_apps = Apps(["migrations"])
choices = [('a', 'A'), ('b', 'B')]
class Author(models.Model):
name = models.CharField(max_length=255)
choice = models.CharField(max_length=255, choices=iter(choices))
class Meta:
app_label = "migrations"
apps = new_apps
ProjectState.from_apps(new_apps)
choices_field = Author._meta.get_field('choice')
self.assertEqual(list(choices_field.choices), choices)
class ModelStateTests(SimpleTestCase):
def test_custom_model_base(self):
state = ModelState.from_model(ModelWithCustomBase)
self.assertEqual(state.bases, (models.Model,))
def test_bound_field_sanity_check(self):
field = models.CharField(max_length=1)
field.model = models.Model
with self.assertRaisesMessage(ValueError, 'ModelState.fields cannot be bound to a model - "field" is.'):
ModelState('app', 'Model', [('field', field)])
def test_sanity_check_to(self):
field = models.ForeignKey(UnicodeModel, models.CASCADE)
with self.assertRaisesMessage(
ValueError,
'ModelState.fields cannot refer to a model class - "field.to" does. '
'Use a string reference instead.'
):
ModelState('app', 'Model', [('field', field)])
def test_sanity_check_through(self):
field = models.ManyToManyField('UnicodeModel')
field.remote_field.through = UnicodeModel
with self.assertRaisesMessage(
ValueError,
'ModelState.fields cannot refer to a model class - "field.through" does. '
'Use a string reference instead.'
):
ModelState('app', 'Model', [('field', field)])
def test_sanity_index_name(self):
field = models.IntegerField()
options = {'indexes': [models.Index(fields=['field'])]}
msg = "Indexes passed to ModelState require a name attribute. <Index: fields='field'> doesn't have one."
with self.assertRaisesMessage(ValueError, msg):
ModelState('app', 'Model', [('field', field)], options=options)
def test_fields_immutability(self):
"""
Rendering a model state doesn't alter its internal fields.
"""
apps = Apps()
field = models.CharField(max_length=1)
state = ModelState('app', 'Model', [('name', field)])
Model = state.render(apps)
self.assertNotEqual(Model._meta.get_field('name'), field)
def test_repr(self):
field = models.CharField(max_length=1)
state = ModelState('app', 'Model', [('name', field)], bases=['app.A', 'app.B', 'app.C'])
self.assertEqual(repr(state), "<ModelState: 'app.Model'>")
project_state = ProjectState()
project_state.add_model(state)
with self.assertRaisesMessage(InvalidBasesError, "Cannot resolve bases for [<ModelState: 'app.Model'>]"):
project_state.apps
@override_settings(TEST_SWAPPABLE_MODEL='migrations.SomeFakeModel')
def test_create_swappable(self):
"""
Tests making a ProjectState from an Apps with a swappable model
"""
new_apps = Apps(['migrations'])
class Author(models.Model):
name = models.CharField(max_length=255)
bio = models.TextField()
age = models.IntegerField(blank=True, null=True)
class Meta:
app_label = 'migrations'
apps = new_apps
swappable = 'TEST_SWAPPABLE_MODEL'
author_state = ModelState.from_model(Author)
self.assertEqual(author_state.app_label, 'migrations')
self.assertEqual(author_state.name, 'Author')
self.assertEqual([x for x, y in author_state.fields], ['id', 'name', 'bio', 'age'])
self.assertEqual(author_state.fields[1][1].max_length, 255)
self.assertIs(author_state.fields[2][1].null, False)
self.assertIs(author_state.fields[3][1].null, True)
self.assertEqual(author_state.options, {'swappable': 'TEST_SWAPPABLE_MODEL', 'indexes': []})
self.assertEqual(author_state.bases, (models.Model, ))
self.assertEqual(author_state.managers, [])
@override_settings(TEST_SWAPPABLE_MODEL='migrations.SomeFakeModel')
def test_custom_manager_swappable(self):
"""
Tests making a ProjectState from unused models with custom managers
"""
new_apps = Apps(['migrations'])
class Food(models.Model):
food_mgr = FoodManager('a', 'b')
food_qs = FoodQuerySet.as_manager()
food_no_mgr = NoMigrationFoodManager('x', 'y')
class Meta:
app_label = "migrations"
apps = new_apps
swappable = 'TEST_SWAPPABLE_MODEL'
food_state = ModelState.from_model(Food)
# The default manager is used in migrations
self.assertEqual([name for name, mgr in food_state.managers], ['food_mgr'])
self.assertEqual(food_state.managers[0][1].args, ('a', 'b', 1, 2))
@isolate_apps('migrations', 'django.contrib.contenttypes')
def test_order_with_respect_to_private_field(self):
class PrivateFieldModel(models.Model):
content_type = models.ForeignKey('contenttypes.ContentType', models.CASCADE)
object_id = models.PositiveIntegerField()
private = GenericForeignKey()
class Meta:
order_with_respect_to = 'private'
state = ModelState.from_model(PrivateFieldModel)
self.assertNotIn('order_with_respect_to', state.options)
@isolate_apps('migrations')
def test_abstract_model_children_inherit_indexes(self):
class Abstract(models.Model):
name = models.CharField(max_length=50)
class Meta:
app_label = 'migrations'
abstract = True
indexes = [models.indexes.Index(fields=['name'])]
class Child1(Abstract):
pass
class Child2(Abstract):
pass
child1_state = ModelState.from_model(Child1)
child2_state = ModelState.from_model(Child2)
index_names = [index.name for index in child1_state.options['indexes']]
self.assertEqual(index_names, ['migrations__name_b0afd7_idx'])
index_names = [index.name for index in child2_state.options['indexes']]
self.assertEqual(index_names, ['migrations__name_016466_idx'])
# Modifying the state doesn't modify the index on the model.
child1_state.options['indexes'][0].name = 'bar'
self.assertEqual(Child1._meta.indexes[0].name, 'migrations__name_b0afd7_idx')
@isolate_apps('migrations')
def test_explicit_index_name(self):
class TestModel(models.Model):
name = models.CharField(max_length=50)
class Meta:
app_label = 'migrations'
indexes = [models.indexes.Index(fields=['name'], name='foo_idx')]
model_state = ModelState.from_model(TestModel)
index_names = [index.name for index in model_state.options['indexes']]
self.assertEqual(index_names, ['foo_idx'])
class RelatedModelsTests(SimpleTestCase):
def setUp(self):
self.apps = Apps(['migrations.related_models_app'])
def create_model(self, name, foreign_keys=[], bases=(), abstract=False, proxy=False):
test_name = 'related_models_app'
assert not (abstract and proxy)
meta_contents = {
'abstract': abstract,
'app_label': test_name,
'apps': self.apps,
'proxy': proxy,
}
meta = type(str("Meta"), tuple(), meta_contents)
if not bases:
bases = (models.Model,)
body = {
'Meta': meta,
'__module__': "__fake__",
}
fname_base = fname = '%s_%%d' % name.lower()
for i, fk in enumerate(foreign_keys, 1):
fname = fname_base % i
body[fname] = fk
return type(name, bases, body)
def assertRelated(self, model, needle):
self.assertEqual(
get_related_models_recursive(model),
{(n._meta.app_label, n._meta.model_name) for n in needle},
)
def test_unrelated(self):
A = self.create_model("A")
B = self.create_model("B")
self.assertRelated(A, [])
self.assertRelated(B, [])
def test_direct_fk(self):
A = self.create_model("A", foreign_keys=[models.ForeignKey('B', models.CASCADE)])
B = self.create_model("B")
self.assertRelated(A, [B])
self.assertRelated(B, [A])
def test_direct_hidden_fk(self):
A = self.create_model("A", foreign_keys=[models.ForeignKey('B', models.CASCADE, related_name='+')])
B = self.create_model("B")
self.assertRelated(A, [B])
self.assertRelated(B, [A])
def test_fk_through_proxy(self):
A = self.create_model("A")
B = self.create_model("B", bases=(A,), proxy=True)
C = self.create_model("C", bases=(B,), proxy=True)
D = self.create_model("D", foreign_keys=[models.ForeignKey('C', models.CASCADE)])
self.assertRelated(A, [B, C, D])
self.assertRelated(B, [A, C, D])
self.assertRelated(C, [A, B, D])
self.assertRelated(D, [A, B, C])
def test_nested_fk(self):
A = self.create_model("A", foreign_keys=[models.ForeignKey('B', models.CASCADE)])
B = self.create_model("B", foreign_keys=[models.ForeignKey('C', models.CASCADE)])
C = self.create_model("C")
self.assertRelated(A, [B, C])
self.assertRelated(B, [A, C])
self.assertRelated(C, [A, B])
def test_two_sided(self):
A = self.create_model("A", foreign_keys=[models.ForeignKey('B', models.CASCADE)])
B = self.create_model("B", foreign_keys=[models.ForeignKey('A', models.CASCADE)])
self.assertRelated(A, [B])
self.assertRelated(B, [A])
def test_circle(self):
A = self.create_model("A", foreign_keys=[models.ForeignKey('B', models.CASCADE)])
B = self.create_model("B", foreign_keys=[models.ForeignKey('C', models.CASCADE)])
C = self.create_model("C", foreign_keys=[models.ForeignKey('A', models.CASCADE)])
self.assertRelated(A, [B, C])
self.assertRelated(B, [A, C])
self.assertRelated(C, [A, B])
def test_base(self):
A = self.create_model("A")
B = self.create_model("B", bases=(A,))
self.assertRelated(A, [B])
self.assertRelated(B, [A])
def test_nested_base(self):
A = self.create_model("A")
B = self.create_model("B", bases=(A,))
C = self.create_model("C", bases=(B,))
self.assertRelated(A, [B, C])
self.assertRelated(B, [A, C])
self.assertRelated(C, [A, B])
def test_multiple_bases(self):
A = self.create_model("A")
B = self.create_model("B")
C = self.create_model("C", bases=(A, B,))
self.assertRelated(A, [B, C])
self.assertRelated(B, [A, C])
self.assertRelated(C, [A, B])
def test_multiple_nested_bases(self):
A = self.create_model("A")
B = self.create_model("B")
C = self.create_model("C", bases=(A, B,))
D = self.create_model("D")
E = self.create_model("E", bases=(D,))
F = self.create_model("F", bases=(C, E,))
Y = self.create_model("Y")
Z = self.create_model("Z", bases=(Y,))
self.assertRelated(A, [B, C, D, E, F])
self.assertRelated(B, [A, C, D, E, F])
self.assertRelated(C, [A, B, D, E, F])
self.assertRelated(D, [A, B, C, E, F])
self.assertRelated(E, [A, B, C, D, F])
self.assertRelated(F, [A, B, C, D, E])
self.assertRelated(Y, [Z])
self.assertRelated(Z, [Y])
def test_base_to_base_fk(self):
A = self.create_model("A", foreign_keys=[models.ForeignKey('Y', models.CASCADE)])
B = self.create_model("B", bases=(A,))
Y = self.create_model("Y")
Z = self.create_model("Z", bases=(Y,))
self.assertRelated(A, [B, Y, Z])
self.assertRelated(B, [A, Y, Z])
self.assertRelated(Y, [A, B, Z])
self.assertRelated(Z, [A, B, Y])
def test_base_to_subclass_fk(self):
A = self.create_model("A", foreign_keys=[models.ForeignKey('Z', models.CASCADE)])
B = self.create_model("B", bases=(A,))
Y = self.create_model("Y")
Z = self.create_model("Z", bases=(Y,))
self.assertRelated(A, [B, Y, Z])
self.assertRelated(B, [A, Y, Z])
self.assertRelated(Y, [A, B, Z])
self.assertRelated(Z, [A, B, Y])
def test_direct_m2m(self):
A = self.create_model("A", foreign_keys=[models.ManyToManyField('B')])
B = self.create_model("B")
self.assertRelated(A, [A.a_1.rel.through, B])
self.assertRelated(B, [A, A.a_1.rel.through])
def test_direct_m2m_self(self):
A = self.create_model("A", foreign_keys=[models.ManyToManyField('A')])
self.assertRelated(A, [A.a_1.rel.through])
def test_intermediate_m2m_self(self):
A = self.create_model("A", foreign_keys=[models.ManyToManyField('A', through='T')])
T = self.create_model("T", foreign_keys=[
models.ForeignKey('A', models.CASCADE),
models.ForeignKey('A', models.CASCADE),
])
self.assertRelated(A, [T])
self.assertRelated(T, [A])
def test_intermediate_m2m(self):
A = self.create_model("A", foreign_keys=[models.ManyToManyField('B', through='T')])
B = self.create_model("B")
T = self.create_model("T", foreign_keys=[
models.ForeignKey('A', models.CASCADE),
models.ForeignKey('B', models.CASCADE),
])
self.assertRelated(A, [B, T])
self.assertRelated(B, [A, T])
self.assertRelated(T, [A, B])
def test_intermediate_m2m_extern_fk(self):
A = self.create_model("A", foreign_keys=[models.ManyToManyField('B', through='T')])
B = self.create_model("B")
Z = self.create_model("Z")
T = self.create_model("T", foreign_keys=[
models.ForeignKey('A', models.CASCADE),
models.ForeignKey('B', models.CASCADE),
models.ForeignKey('Z', models.CASCADE),
])
self.assertRelated(A, [B, T, Z])
self.assertRelated(B, [A, T, Z])
self.assertRelated(T, [A, B, Z])
self.assertRelated(Z, [A, B, T])
def test_intermediate_m2m_base(self):
A = self.create_model("A", foreign_keys=[models.ManyToManyField('B', through='T')])
B = self.create_model("B")
S = self.create_model("S")
T = self.create_model("T", foreign_keys=[
models.ForeignKey('A', models.CASCADE),
models.ForeignKey('B', models.CASCADE),
], bases=(S,))
self.assertRelated(A, [B, S, T])
self.assertRelated(B, [A, S, T])
self.assertRelated(S, [A, B, T])
self.assertRelated(T, [A, B, S])
def test_generic_fk(self):
A = self.create_model("A", foreign_keys=[
models.ForeignKey('B', models.CASCADE),
GenericForeignKey(),
])
B = self.create_model("B", foreign_keys=[
models.ForeignKey('C', models.CASCADE),
])
self.assertRelated(A, [B])
self.assertRelated(B, [A])
def test_abstract_base(self):
A = self.create_model("A", abstract=True)
B = self.create_model("B", bases=(A,))
self.assertRelated(A, [B])
self.assertRelated(B, [])
def test_nested_abstract_base(self):
A = self.create_model("A", abstract=True)
B = self.create_model("B", bases=(A,), abstract=True)
C = self.create_model("C", bases=(B,))
self.assertRelated(A, [B, C])
self.assertRelated(B, [C])
self.assertRelated(C, [])
def test_proxy_base(self):
A = self.create_model("A")
B = self.create_model("B", bases=(A,), proxy=True)
self.assertRelated(A, [B])
self.assertRelated(B, [])
def test_nested_proxy_base(self):
A = self.create_model("A")
B = self.create_model("B", bases=(A,), proxy=True)
C = self.create_model("C", bases=(B,), proxy=True)
self.assertRelated(A, [B, C])
self.assertRelated(B, [C])
self.assertRelated(C, [])
def test_multiple_mixed_bases(self):
A = self.create_model("A", abstract=True)
M = self.create_model("M")
P = self.create_model("P")
Q = self.create_model("Q", bases=(P,), proxy=True)
Z = self.create_model("Z", bases=(A, M, Q))
# M has a pointer O2O field p_ptr to P
self.assertRelated(A, [M, P, Q, Z])
self.assertRelated(M, [P, Q, Z])
self.assertRelated(P, [M, Q, Z])
self.assertRelated(Q, [M, P, Z])
self.assertRelated(Z, [M, P, Q])
| |
import unittest
import numpy as np
import pandas as pd
import scipy.stats as st
from os import path, getcwd
from ..graphs import GraphGroupScatter
from ..data import Vector
from ..analysis.exc import NoDataError
from ..data import UnequalVectorLengthError
class MyTestCase(unittest.TestCase):
@property
def save_path(self):
if getcwd().split('/')[-1] == 'test':
return './images/'
elif getcwd().split('/')[-1] == 'sci_analysis':
if path.exists('./setup.py'):
return './sci_analysis/test/images/'
else:
return './test/images/'
else:
'./'
def test_1_scatter_two_groups_default(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
grp = [1] * 100 + [2] * 100
cs_x = np.concatenate((input_1_x, input_2_x))
cs_y = np.concatenate((input_1_y, input_2_y))
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
self.assertTrue(GraphGroupScatter(input_array['a'], input_array['b'], groups=input_array['c'],
save_to='{}test_group_scatter_1'.format(self.save_path)))
def test_2_scatter_two_groups_no_fit(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
grp = [1] * 100 + [2] * 100
cs_x = np.concatenate((input_1_x, input_2_x))
cs_y = np.concatenate((input_1_y, input_2_y))
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
self.assertTrue(GraphGroupScatter(input_array['a'], input_array['b'], groups=input_array['c'], fit=False,
save_to='{}test_group_scatter_2'.format(self.save_path)))
def test_3_scatter_two_groups_no_points(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
grp = [1] * 100 + [2] * 100
cs_x = np.concatenate((input_1_x, input_2_x))
cs_y = np.concatenate((input_1_y, input_2_y))
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
self.assertTrue(GraphGroupScatter(input_array['a'], input_array['b'], groups=input_array['c'], points=False,
save_to='{}test_group_scatter_3'.format(self.save_path)))
def test_4_scatter_two_groups_highlight_one(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
grp = [1] * 100 + [2] * 100
cs_x = np.concatenate((input_1_x, input_2_x))
cs_y = np.concatenate((input_1_y, input_2_y))
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
self.assertTrue(GraphGroupScatter(input_array['a'], input_array['b'], groups=input_array['c'], highlight=[2],
save_to='{}test_group_scatter_4'.format(self.save_path)))
def test_5_scatter_three_groups_highlight_two(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
input_3_x = st.norm.rvs(size=100)
input_3_y = np.array([(x * 1.5) + st.norm.rvs(size=100)[0] for x in input_3_x]) - 0.5
grp = [1] * 100 + [2] * 100 + [3] * 100
cs_x = np.concatenate((input_1_x, input_2_x, input_3_x))
cs_y = np.concatenate((input_1_y, input_2_y, input_3_y))
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
self.assertTrue(GraphGroupScatter(input_array['a'], input_array['b'], groups=input_array['c'], highlight=[2, 3],
save_to='{}test_group_scatter_5'.format(self.save_path)))
def test_6_scatter_two_groups_highlight_one_no_points(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
grp = [1] * 100 + [2] * 100
cs_x = np.concatenate((input_1_x, input_2_x))
cs_y = np.concatenate((input_1_y, input_2_y))
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
self.assertTrue(GraphGroupScatter(input_array['a'], input_array['b'], groups=input_array['c'], highlight=[2],
points=False, save_to='{}test_group_scatter_6'.format(self.save_path)))
def test_7_scatter_two_groups_highlight_one_no_fit(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
grp = [1] * 100 + [2] * 100
cs_x = np.concatenate((input_1_x, input_2_x))
cs_y = np.concatenate((input_1_y, input_2_y))
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
self.assertTrue(GraphGroupScatter(input_array['a'], input_array['b'], groups=input_array['c'], highlight=[2],
fit=False, save_to='{}test_group_scatter_7'.format(self.save_path)))
def test_8_scatter_two_groups_highlight_one_scalar_num(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
grp = [1] * 100 + [2] * 100
cs_x = np.concatenate((input_1_x, input_2_x))
cs_y = np.concatenate((input_1_y, input_2_y))
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
self.assertTrue(GraphGroupScatter(input_array['a'], input_array['b'], groups=input_array['c'], highlight=2,
save_to='{}test_group_scatter_8'.format(self.save_path)))
def test_9_scatter_two_groups_string_names_highlight_one(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
grp = ['a'] * 100 + ['b'] * 100
cs_x = np.concatenate((input_1_x, input_2_x))
cs_y = np.concatenate((input_1_y, input_2_y))
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
self.assertTrue(GraphGroupScatter(input_array['a'], input_array['b'], groups=input_array['c'], highlight=['b'],
save_to='{}test_group_scatter_9'.format(self.save_path)))
def test_10_scatter_three_groups_string_names_highlight_scalar_string(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
input_3_x = st.norm.rvs(size=100)
input_3_y = np.array([(x * 1.5) + st.norm.rvs(size=100)[0] for x in input_3_x]) - 0.5
grp = ['a'] * 100 + ['b'] * 100 + ['c'] * 100
cs_x = np.concatenate((input_1_x, input_2_x, input_3_x))
cs_y = np.concatenate((input_1_y, input_2_y, input_3_y))
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
self.assertTrue(GraphGroupScatter(input_array['a'], input_array['b'], groups=input_array['c'], highlight='bc',
save_to='{}test_group_scatter_10'.format(self.save_path)))
def test_11_scatter_three_groups_invalid_highlight_groups(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
input_3_x = st.norm.rvs(size=100)
input_3_y = np.array([(x * 1.5) + st.norm.rvs(size=100)[0] for x in input_3_x]) - 0.5
grp = ['a'] * 100 + ['b'] * 100 + ['c'] * 100
cs_x = np.concatenate((input_1_x, input_2_x, input_3_x))
cs_y = np.concatenate((input_1_y, input_2_y, input_3_y))
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
self.assertTrue(GraphGroupScatter(input_array['a'], input_array['b'], groups=input_array['c'],
highlight=['z', 'y', 'x'],
save_to='{}test_group_scatter_11'.format(self.save_path)))
def test_12_scatter_two_groups_no_boxplot_borders(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
grp = ['a'] * 100 + ['b'] * 100
cs_x = np.concatenate((input_1_x, input_2_x))
cs_y = np.concatenate((input_1_y, input_2_y))
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
self.assertTrue(GraphGroupScatter(input_array['a'], input_array['b'], groups=input_array['c'],
boxplot_borders=False,
save_to='{}test_group_scatter_12'.format(self.save_path)))
def test_13_scatter_two_groups_title(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
grp = ['a'] * 100 + ['b'] * 100
cs_x = np.concatenate((input_1_x, input_2_x))
cs_y = np.concatenate((input_1_y, input_2_y))
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
self.assertTrue(GraphGroupScatter(input_array['a'], input_array['b'], groups=input_array['c'],
title='Title Test', save_to='{}test_group_scatter_13'.format(self.save_path)))
def test_14_scatter_two_groups_labels(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
grp = ['a'] * 100 + ['b'] * 100
cs_x = np.concatenate((input_1_x, input_2_x))
cs_y = np.concatenate((input_1_y, input_2_y))
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
self.assertTrue(GraphGroupScatter(input_array['a'], input_array['b'], groups=input_array['c'], xname='Test x',
yname='Test y', save_to='{}test_group_scatter_14'.format(self.save_path)))
def test_15_scatter_three_groups_auto_named(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
input_3_x = st.norm.rvs(size=100)
input_3_y = np.array([(x * 1.5) + st.norm.rvs(size=100)[0] for x in input_3_x]) - 0.5
grp = ['a'] * 100 + ['b'] * 100 + ['c'] * 100
cs_x = np.concatenate((input_1_x, input_2_x, input_3_x))
cs_y = np.concatenate((input_1_y, input_2_y, input_3_y))
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
self.assertTrue(GraphGroupScatter(input_array['a'], input_array['b'],
save_to='{}test_group_scatter_15'.format(self.save_path)))
def test_16_scatter_one_group_default(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
grp = ['a'] * 100
input_array = pd.DataFrame({'a': input_1_x, 'b': input_1_y, 'c': grp})
self.assertTrue(GraphGroupScatter(input_array['a'], input_array['b'], groups=input_array['c'],
save_to='{}test_group_scatter_16'.format(self.save_path)))
def test_17_scatter_three_groups_vector_input_default(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
input_3_x = st.norm.rvs(size=100)
input_3_y = np.array([(x * 1.5) + st.norm.rvs(size=100)[0] for x in input_3_x]) - 0.5
grp = ['a'] * 100 + ['b'] * 100 + ['c'] * 100
cs_x = np.concatenate((input_1_x, input_2_x, input_3_x))
cs_y = np.concatenate((input_1_y, input_2_y, input_3_y))
input_array = Vector(cs_x, other=cs_y, groups=grp)
self.assertTrue(GraphGroupScatter(input_array, save_to='{}test_group_scatter_17'.format(self.save_path)))
def test_18_scatter_three_groups_vector_input_highlight_one(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
input_3_x = st.norm.rvs(size=100)
input_3_y = np.array([(x * 1.5) + st.norm.rvs(size=100)[0] for x in input_3_x]) - 0.5
grp = ['a'] * 100 + ['b'] * 100 + ['c'] * 100
cs_x = np.concatenate((input_1_x, input_2_x, input_3_x))
cs_y = np.concatenate((input_1_y, input_2_y, input_3_y))
input_array = Vector(cs_x, other=cs_y, groups=grp)
self.assertTrue(GraphGroupScatter(input_array, highlight=['b'],
save_to='{}test_group_scatter_18'.format(self.save_path)))
def test_19_scatter_one_group_matplotlib_bug(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=3)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
grp = ['a'] * 3
input_array = pd.DataFrame({'a': input_1_x, 'b': input_1_y, 'c': grp})
self.assertTrue(GraphGroupScatter(input_array['a'], input_array['b'], groups=input_array['c'],
save_to='{}test_group_scatter_19'.format(self.save_path)))
def test_20_scatter_two_groups_matplotlib_bug(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=4)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
grp = [1] * 4 + [2] * 100
cs_x = np.concatenate((input_1_x, input_2_x))
cs_y = np.concatenate((input_1_y, input_2_y))
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
self.assertTrue(GraphGroupScatter(input_array['a'], input_array['b'], groups=input_array['c'],
save_to='{}test_group_scatter_20'.format(self.save_path)))
def test_21_scatter_two_groups_unequal_x_and_y_size(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x if x > 0.0]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x if x > 0.0]
grp = [1] * 100 + [2] * 100
cs_x = np.concatenate((input_1_x, input_2_x))
cs_y = np.concatenate((input_1_y, input_2_y))
self.assertRaises(UnequalVectorLengthError, lambda: GraphGroupScatter(cs_x, cs_y, groups=grp))
def test_22_scatter_two_groups_wrong_group_size(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
grp = [1, 2]
cs_x = np.concatenate((input_1_x, input_2_x))
cs_y = np.concatenate((input_1_y, input_2_y))
self.assertRaises(UnequalVectorLengthError, lambda: GraphGroupScatter(cs_x, cs_y, groups=grp))
def test_23_no_data(self):
"""Test the case where there's no data."""
self.assertRaises(NoDataError, lambda: GraphGroupScatter([], []))
def test_24_scatter_three_groups_different_sizes(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=1)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=10)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
input_3_x = st.norm.rvs(size=100)
input_3_y = np.array([(x * 1.5) + st.norm.rvs(size=100)[0] for x in input_3_x]) - 0.5
grp = ['a'] * 1 + ['b'] * 10 + ['c'] * 100
cs_x = np.concatenate((input_1_x, input_2_x, input_3_x))
cs_y = np.concatenate((input_1_y, input_2_y, input_3_y))
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
self.assertTrue(GraphGroupScatter(input_array['a'], input_array['b'], groups=input_array['c'],
save_to='{}test_group_scatter_24'.format(self.save_path)))
def test_25_scatter_two_groups_no_ydata(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
input_3_x = st.norm.rvs(size=100)
input_3_y = np.array([(x * 1.5) + st.norm.rvs(size=100)[0] for x in input_3_x]) - 0.5
grp = ['a'] * 100 + ['b'] * 100 + ['c'] * 100
cs_x = np.concatenate((input_1_x, input_2_x, input_3_x))
cs_y = np.concatenate((input_1_y, input_2_y, input_3_y))
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
self.assertRaises(AttributeError, lambda: GraphGroupScatter(input_array['a'], groups=input_array['c']))
def test_26_scatter_three_groups_long_group_names(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
input_3_x = st.norm.rvs(size=100)
input_3_y = np.array([(x * 1.5) + st.norm.rvs(size=100)[0] for x in input_3_x]) - 0.5
grp = ['11111111111111111111'] * 100 + ['222222222222222222222'] * 100 + ['3333333333333333333333'] * 100
cs_x = np.concatenate((input_1_x, input_2_x, input_3_x))
cs_y = np.concatenate((input_1_y, input_2_y, input_3_y))
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
self.assertTrue(GraphGroupScatter(input_array['a'], input_array['b'], groups=input_array['c'],
save_to='{}test_group_scatter_26'.format(self.save_path)))
def test_27_scatter_two_groups_negative_corr(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [2 - (x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
grp = [1] * 100 + [2] * 100
cs_x = np.concatenate((input_1_x, input_2_x))
cs_y = np.concatenate((input_1_y, input_2_y))
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
self.assertTrue(GraphGroupScatter(input_array['a'], input_array['b'], groups=input_array['c'],
save_to='{}test_group_scatter_27'.format(self.save_path)))
def test_28_scatter_two_groups_labels(self):
"""Test the case where labels are provided."""
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
input_labels_array = np.random.choice(list('ABCDE'), size=200)
grp = [1] * 100 + [2] * 100
cs_x = np.concatenate((input_1_x, input_2_x))
cs_y = np.concatenate((input_1_y, input_2_y))
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
self.assertTrue(
GraphGroupScatter(
input_array['a'],
input_array['b'],
groups=input_array['c'],
labels=input_labels_array,
highlight=['A'],
save_to='{}test_group_scatter_28'.format(self.save_path)
)
)
def test_29_scatter_two_groups_labels_and_group_highlight(self):
"""Test the case where labels and groups are highlighted."""
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
input_labels_array = np.random.choice(list('ABCDE'), size=220)
grp = [1] * 110 + [2] * 110
cs_x = np.concatenate((input_1_x, input_2_x))
cs_y = np.concatenate((input_1_y, input_2_y))
indicies_x = list(np.random.randint(0, 199, 20))
indicies_y = list(np.random.randint(0, 199, 20))
for i in indicies_x:
cs_x = np.insert(cs_x, i, np.nan, axis=0)
for i in indicies_y:
cs_y = np.insert(cs_y, i, np.nan, axis=0)
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
self.assertTrue(
GraphGroupScatter(
input_array['a'],
input_array['b'],
groups=input_array['c'],
labels=input_labels_array,
highlight=[1],
save_to='{}test_group_scatter_29'.format(self.save_path)
)
)
def test_30_groupscatter_dataframe(self):
"""Tests graphscater with dataframe input."""
np.random.seed(987654321)
df = pd.DataFrame(np.random.randn(100, 2), columns=list('xy'))
df['labels'] = np.random.choice(list('ABCDE'), len(df)).tolist()
df['groups'] = np.random.choice(list('XYZ'), len(df)).tolist()
self.assertTrue(
GraphGroupScatter(
df['x'],
df['y'],
groups=df['groups'],
labels=df['labels'],
highlight=['A'],
save_to='{}test_group_scatter_30'.format(self.save_path)
)
)
def test_31_groupscatter_labels_no_highlight(self):
"""Test the case where labels are given, but no highlights specified."""
np.random.seed(987654321)
df = pd.DataFrame(np.random.randn(100, 2), columns=list('xy'))
df['labels'] = np.random.choice(list('ABCDE'), len(df)).tolist()
df['groups'] = np.random.choice(list('XYZ'), len(df)).tolist()
self.assertTrue(
GraphGroupScatter(
df['x'],
df['y'],
groups=df['groups'],
labels=df['labels'],
save_to='{}test_group_scatter_31'.format(self.save_path)
)
)
def test_32_groupscatter_labels_invalid_labels_and_groups(self):
"""Test the case where all the highlights are not in groups or labels."""
np.random.seed(987654321)
df = pd.DataFrame(np.random.randn(100, 2), columns=list('xy'))
df['labels'] = np.random.choice(list('ABCDE'), len(df)).tolist()
df['groups'] = np.random.choice(list('XYZ'), len(df)).tolist()
self.assertTrue(
GraphGroupScatter(
df['x'],
df['y'],
groups=df['groups'],
labels=df['labels'],
highlight=['XX', 2, 34],
save_to='{}test_group_scatter_32'.format(self.save_path)
)
)
def test_33_groupscatter_labels_individual_highlight(self):
"""Test the case where individual points are highlighted with one overrunning."""
np.random.seed(987654321)
df = pd.DataFrame(np.random.randn(100, 2), columns=list('xy'))
df['labels'] = np.random.randint(10000, 50000, size=100)
df['groups'] = np.random.choice(list('XYZ'), len(df)).tolist()
self.assertTrue(
GraphGroupScatter(
df['x'],
df['y'],
groups=df['groups'],
labels=df['labels'],
highlight=df[df['x'] > 2]['labels'].tolist(),
save_to='{}test_group_scatter_33'.format(self.save_path)
)
)
def test_34_groupscatter_labels_individual_no_borders(self):
"""Test the case where individual points are highlighted with one overrunning and no boxplot borders."""
np.random.seed(987654321)
df = pd.DataFrame(np.random.randn(100, 2), columns=list('xy'))
df['labels'] = np.random.randint(10000, 50000, size=100)
df['groups'] = np.random.choice(list('XYZ'), len(df)).tolist()
self.assertTrue(
GraphGroupScatter(
df['x'],
df['y'],
boxplot_borders=False,
groups=df['groups'],
labels=df['labels'],
highlight=df[df['x'] > 2]['labels'].tolist(),
save_to='{}test_group_scatter_34'.format(self.save_path)
)
)
if __name__ == '__main__':
unittest.main()
| |
from ..adapters.oracle import Oracle
from .._compat import integer_types, basestring
from .base import SQLDialect
from . import dialects, sqltype_for
import re
@dialects.register_for(Oracle)
class OracleDialect(SQLDialect):
false_exp = "1=0"
true_exp = "1=1"
@sqltype_for("string")
def type_string(self):
return "VARCHAR2(%(length)s)"
@sqltype_for("text")
def type_text(self):
return "CLOB"
@sqltype_for("integer")
def type_integer(self):
return "INT"
@sqltype_for("bigint")
def type_bigint(self):
return "NUMBER"
@sqltype_for("double")
def type_double(self):
return "BINARY_DOUBLE"
@sqltype_for("time")
def type_time(self):
return "TIME(8)"
@sqltype_for("datetime")
def type_datetime(self):
return "DATE"
@sqltype_for("id")
def type_id(self):
return "NUMBER PRIMARY KEY"
@sqltype_for("reference")
def type_reference(self):
return (
"NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY "
+ "(%(field_name)s) REFERENCES %(foreign_key)s ON DELETE "
+ "%(on_delete_action)s"
)
@sqltype_for("reference FK")
def type_reference_fk(self):
return (
", CONSTRAINT FK_%(constraint_name)s FOREIGN KEY "
+ "(%(field_name)s) REFERENCES %(foreign_key)s "
+ "ON DELETE %(on_delete_action)s"
)
@sqltype_for("reference TFK")
def type_reference_tfk(self):
return (
" CONSTRAINT FK_%(constraint_name)s_PK FOREIGN KEY "
+ "(%(field_name)s) REFERENCES %(foreign_table)s"
+ "(%(foreign_key)s) ON DELETE %(on_delete_action)s"
)
def left_join(self, val, query_env={}):
if not isinstance(val, basestring):
val = self.expand(val, query_env=query_env)
return "LEFT OUTER JOIN %s" % val
@property
def random(self):
return "dbms_random.value"
def cast(self, first, second, query_env={}):
if second == "CLOB":
return "TO_CHAR(%s)" % self.expand(first, query_env=query_env)
return "CAST(%s)" % self._as(first, second, query_env)
def mod(self, first, second, query_env={}):
return "MOD(%s,%s)" % (
self.expand(first, query_env=query_env),
self.expand(second, first.type, query_env=query_env),
)
def extract(self, first, what, query_env={}):
if what == "hour":
return "TO_CHAR(%s, 'HH24')" % self.expand(first, query_env=query_env)
if what == "minute":
return "TO_CHAR(%s, 'MI')" % self.expand(first, query_env=query_env)
if what == "second":
return "TO_CHAR(%s, 'SS')" % self.expand(first, query_env=query_env)
return "EXTRACT(%s FROM %s)" % (what, self.expand(first, query_env=query_env))
def epoch(self, val, query_env={}):
return "(%s - DATE '1970-01-01')*24*60*60" % self.expand(
val, query_env=query_env
)
def quote(self, val):
if not (val[0] == '"' and val[-1] == '"'):
return self.quote_template % val
return val
def _as(self, first, second, query_env={}):
return "%s %s" % (self.expand(first, query_env), self.quote(second))
def alias(self, original, new):
return "%s %s" % (original, self.quote(new))
def writing_alias(self, table):
return self.sql_fullref(table)
def sqlsafe(self, field):
if field._table is None:
raise SyntaxError("Field %s is not bound to any table" % field.name)
return self.quote(field._table.sql_shortref) + "." + self.quote(field._rname)
def longname(self, field):
if field._table is None:
raise SyntaxError("Field %s is not bound to any table" % field.name)
return self.quote(field._table._tablename) + "." + self.quote(field.name)
def sql_fullref(self, table):
if table._tablename == table._dalname:
return self.quote(table._rname)
return self.adapter.sqlsafe_table(table._tablename, table._rname)
def trigger_name(self, tablename):
return "%s_trigger" % tablename
def sequence_name(self, tablename):
if tablename[0] == '"':
# manually written quotes, typically in case-sensitive rname
tablename = tablename[1:-1]
# truncate to max length
return self.quote(("%s_sequence" % tablename)[0:29])
def constraint_name(self, table, fieldname):
if table[0] == '"':
# manually written quotes, typically in case-sensitive rname
table = table[1:-1]
constraint_name = super(OracleDialect, self).constraint_name(table, fieldname)
if len(constraint_name) > 30:
constraint_name = "%s_%s__constraint" % (table[:10], fieldname[:7])
return constraint_name
def primary_key(self, key):
if len(re.split(",\s*", key)) > 1:
return "PRIMARY KEY(%s)" % ", ".join(
[self.quote(k) for k in re.split(",\s*", key)]
)
return "PRIMARY KEY(%s)" % key
def not_null(self, default, field_type):
return "DEFAULT %s NOT NULL" % self.adapter.represent(default, field_type)
def not_null(self, default, field_type):
return "NOT NULL DEFAULT %s" % self.adapter.represent(default, field_type)
def eq(self, first, second=None, query_env={}):
if (first.type == "text" or first.type[:4] == "list") and second:
return "(TO_CHAR(%s) = %s)" % (
self.expand(first, query_env=query_env),
self.expand(second, first.type, query_env=query_env),
)
return super(OracleDialect, self).eq(first, second, query_env)
def regexp(self, first, second, query_env={}):
return "REGEXP_LIKE(%s, %s)" % (
self.expand(first, query_env=query_env),
self.expand(second, "string", query_env=query_env),
)
def insert(self, table, fields, values):
return "INSERT INTO %s(%s) VALUES (%s);" % (self.quote(table), fields, values)
def insert_empty(self, table):
return "INSERT INTO %s VALUES (DEFAULT);" % table
def _select_aux(self, sql, fields, attributes, colnames):
return super._select_aux(sql, fields, attributes, colnames)
def select(
self,
fields,
tables,
where=None,
groupby=None,
having=None,
orderby=None,
limitby=None,
distinct=False,
for_update=False,
):
dst, whr, grp, order, limit, offset, upd = "", "", "", "", "", "", ""
if distinct is True:
dst = " DISTINCT"
elif distinct:
dst = " DISTINCT ON (%s)" % distinct
if where:
whr = " %s" % self.where(where)
if groupby:
grp = " GROUP BY %s" % groupby
if having:
grp += " HAVING %s" % having
if orderby:
order = " ORDER BY %s" % orderby
if limitby:
(lmin, lmax) = limitby
if whr:
whr2 = whr + " AND w_row > %i" % lmin
else:
whr2 = self.where("w_row > %i" % lmin)
return """
SELECT%s * FROM (
SELECT w_tmp.*, ROWNUM w_row FROM (
SELECT %s FROM %s%s%s%s
) w_tmp
) WHERE w_row<=%i and w_row>%i
""" % (
dst,
fields,
tables,
whr,
grp,
order,
lmax,
lmin,
)
if for_update:
upd = " FOR UPDATE"
return "SELECT%s %s FROM %s%s%s%s%s%s%s;" % (
dst,
fields,
tables,
whr,
grp,
order,
limit,
offset,
upd,
)
def drop_table(self, table, mode):
sequence_name = table._sequence_name
if mode and mode.upper() == "CASCADE":
mode = "CASCADE CONSTRAINTS"
drops = [
"DROP TABLE %s %s;" % (self.quote(table._rname), mode),
]
if "_id" in table:
drops.append("DROP SEQUENCE %s;" % sequence_name)
return drops
| |
"""
Decorator module by Michele Simionato <michelesimionato@libero.it>
Copyright Michele Simionato, distributed under the terms of the BSD License (see below).
http://www.phyast.pitt.edu/~micheles/python/documentation.html
Included in NLTK for its support of a nice memoization decorator.
"""
__docformat__ = 'restructuredtext en'
## The basic trick is to generate the source code for the decorated function
## with the right signature and to evaluate it.
## Uncomment the statement 'print >> sys.stderr, func_src' in _decorator
## to understand what is going on.
__all__ = ["decorator", "new_wrapper", "getinfo"]
import sys
# Hack to keep NLTK's "tokenize" module from colliding with the "tokenize" in
# the Python standard library.
old_sys_path = sys.path[:]
sys.path = [p for p in sys.path if "nltk" not in p]
import inspect
sys.path = old_sys_path
try:
set
except NameError:
from sets import Set as set
def getinfo(func):
"""
Returns an info dictionary containing:
- name (the name of the function : str)
- argnames (the names of the arguments : list)
- defaults (the values of the default arguments : tuple)
- signature (the signature : str)
- doc (the docstring : str)
- module (the module name : str)
- dict (the function __dict__ : str)
>>> def f(self, x=1, y=2, *args, **kw): pass
>>> info = getinfo(f)
>>> info["name"]
'f'
>>> info["argnames"]
['self', 'x', 'y', 'args', 'kw']
>>> info["defaults"]
(1, 2)
>>> info["signature"]
'self, x, y, *args, **kw'
"""
assert inspect.ismethod(func) or inspect.isfunction(func)
regargs, varargs, varkwargs, defaults = inspect.getargspec(func)
argnames = list(regargs)
if varargs:
argnames.append(varargs)
if varkwargs:
argnames.append(varkwargs)
signature = inspect.formatargspec(regargs, varargs, varkwargs, defaults,
formatvalue=lambda value: "")[1:-1]
return dict(name=func.__name__, argnames=argnames, signature=signature,
defaults = func.func_defaults, doc=func.__doc__,
module=func.__module__, dict=func.__dict__,
globals=func.func_globals, closure=func.func_closure)
# akin to functools.update_wrapper
def update_wrapper(wrapper, model, infodict=None):
infodict = infodict or getinfo(model)
try:
wrapper.__name__ = infodict['name']
except: # Python version < 2.4
pass
wrapper.__doc__ = infodict['doc']
wrapper.__module__ = infodict['module']
wrapper.__dict__.update(infodict['dict'])
wrapper.func_defaults = infodict['defaults']
wrapper.undecorated = model
return wrapper
def new_wrapper(wrapper, model):
"""
An improvement over functools.update_wrapper. The wrapper is a generic
callable object. It works by generating a copy of the wrapper with the
right signature and by updating the copy, not the original.
Moreovoer, 'model' can be a dictionary with keys 'name', 'doc', 'module',
'dict', 'defaults'.
"""
if isinstance(model, dict):
infodict = model
else: # assume model is a function
infodict = getinfo(model)
assert not '_wrapper_' in infodict["argnames"], (
'"_wrapper_" is a reserved argument name!')
src = "lambda %(signature)s: _wrapper_(%(signature)s)" % infodict
funcopy = eval(src, dict(_wrapper_=wrapper))
return update_wrapper(funcopy, model, infodict)
# helper used in decorator_factory
def __call__(self, func):
return new_wrapper(lambda *a, **k : self.call(func, *a, **k), func)
def decorator_factory(cls):
"""
Take a class with a ``.caller`` method and return a callable decorator
object. It works by adding a suitable __call__ method to the class;
it raises a TypeError if the class already has a nontrivial __call__
method.
"""
attrs = set(dir(cls))
if '__call__' in attrs:
raise TypeError('You cannot decorate a class with a nontrivial '
'__call__ method')
if 'call' not in attrs:
raise TypeError('You cannot decorate a class without a '
'.call method')
cls.__call__ = __call__
return cls
def decorator(caller):
"""
General purpose decorator factory: takes a caller function as
input and returns a decorator with the same attributes.
A caller function is any function like this::
def caller(func, *args, **kw):
# do something
return func(*args, **kw)
Here is an example of usage:
>>> @decorator
... def chatty(f, *args, **kw):
... print "Calling %r" % f.__name__
... return f(*args, **kw)
>>> chatty.__name__
'chatty'
>>> @chatty
... def f(): pass
...
>>> f()
Calling 'f'
decorator can also take in input a class with a .caller method; in this
case it converts the class into a factory of callable decorator objects.
See the documentation for an example.
"""
if inspect.isclass(caller):
return decorator_factory(caller)
def _decorator(func): # the real meat is here
infodict = getinfo(func)
argnames = infodict['argnames']
assert not ('_call_' in argnames or '_func_' in argnames), (
'You cannot use _call_ or _func_ as argument names!')
src = "lambda %(signature)s: _call_(_func_, %(signature)s)" % infodict
# import sys; print >> sys.stderr, src # for debugging purposes
dec_func = eval(src, dict(_func_=func, _call_=caller))
return update_wrapper(dec_func, func, infodict)
return update_wrapper(_decorator, caller)
def getattr_(obj, name, default_thunk):
"Similar to .setdefault in dictionaries."
try:
return getattr(obj, name)
except AttributeError:
default = default_thunk()
setattr(obj, name, default)
return default
@decorator
def memoize(func, *args):
dic = getattr_(func, "memoize_dic", dict)
# memoize_dic is created at the first call
if args in dic:
return dic[args]
else:
result = func(*args)
dic[args] = result
return result
if __name__ == "__main__":
import doctest; doctest.testmod()
########################## LEGALESE ###############################
## Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## Redistributions in bytecode form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
## INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
## BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
## OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
## ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
## TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
## USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
## DAMAGE.
| |
"""
I/O for Gmsh's msh format (version 4.1, as used by Gmsh 4.2.2+), cf.
<http://gmsh.info/doc/texinfo/gmsh.html#MSH-file-format>.
"""
from functools import partial
import numpy as np
from .._common import cell_data_from_raw, num_nodes_per_cell, raw_from_cell_data, warn
from .._exceptions import ReadError, WriteError
from .._mesh import CellBlock, Mesh
from .common import (
_fast_forward_over_blank_lines,
_fast_forward_to_end_block,
_gmsh_to_meshio_order,
_gmsh_to_meshio_type,
_meshio_to_gmsh_order,
_meshio_to_gmsh_type,
_read_data,
_read_physical_names,
_write_data,
_write_physical_names,
)
c_int = np.dtype("i")
c_size_t = np.dtype("P")
c_double = np.dtype("d")
def _size_type(data_size):
return np.dtype(f"u{data_size}")
def read_buffer(f, is_ascii: bool, data_size):
# The format is specified at
# <http://gmsh.info/doc/texinfo/gmsh.html#MSH-file-format>.
# Initialize the optional data fields
points = []
cells = None
field_data = {}
cell_data_raw = {}
cell_tags = {}
point_data = {}
physical_tags = None
bounding_entities = None
cell_sets = {}
periodic = None
while True:
# fast-forward over blank lines
line, is_eof = _fast_forward_over_blank_lines(f)
if is_eof:
break
if line[0] != "$":
raise ReadError(f"Unexpected line {repr(line)}")
environ = line[1:].strip()
if environ == "PhysicalNames":
_read_physical_names(f, field_data)
elif environ == "Entities":
# Read physical tags and information on bounding entities.
# The information is passed to the processing of elements.
physical_tags, bounding_entities = _read_entities(f, is_ascii, data_size)
elif environ == "Nodes":
points, point_tags, point_entities = _read_nodes(f, is_ascii, data_size)
elif environ == "Elements":
cells, cell_tags, cell_sets = _read_elements(
f,
point_tags,
physical_tags,
bounding_entities,
is_ascii,
data_size,
field_data,
)
elif environ == "Periodic":
periodic = _read_periodic(f, is_ascii, data_size)
elif environ == "NodeData":
_read_data(f, "NodeData", point_data, data_size, is_ascii)
elif environ == "ElementData":
_read_data(f, "ElementData", cell_data_raw, data_size, is_ascii)
else:
# From
# <http://gmsh.info/doc/texinfo/gmsh.html#MSH-file-format>:
# ```
# Any section with an unrecognized header is simply ignored: you can thus
# add comments in a .msh file by putting them e.g. inside a
# $Comments/$EndComments section.
# ```
# skip environment
_fast_forward_to_end_block(f, environ)
if cells is None:
raise ReadError("$Element section not found.")
cell_data = cell_data_from_raw(cells, cell_data_raw)
cell_data.update(cell_tags)
# Add node entity information to the point data
point_data.update({"gmsh:dim_tags": point_entities})
return Mesh(
points,
cells,
point_data=point_data,
cell_data=cell_data,
field_data=field_data,
cell_sets=cell_sets,
gmsh_periodic=periodic,
)
def _read_entities(f, is_ascii: bool, data_size):
# Read the entity section. Return physical tags of the entities, and (for
# entities of dimension > 0) the bounding entities (so points that form
# the boundary of a line etc).
# Note that the bounding box of the entities is disregarded. Adding this
# is not difficult, but for the moment, the entropy of adding more data
# does not seem warranted.
fromfile = partial(np.fromfile, sep=" " if is_ascii else "")
c_size_t = _size_type(data_size)
physical_tags = ({}, {}, {}, {})
bounding_entities = ({}, {}, {}, {})
number = fromfile(f, c_size_t, 4) # dims 0, 1, 2, 3
for d, n in enumerate(number):
for _ in range(n):
(tag,) = fromfile(f, c_int, 1)
fromfile(f, c_double, 3 if d == 0 else 6) # discard bounding-box
(num_physicals,) = fromfile(f, c_size_t, 1)
physical_tags[d][tag] = list(fromfile(f, c_int, num_physicals))
if d > 0:
# Number of bounding entities
num_BREP_ = fromfile(f, c_size_t, 1)[0]
# Store bounding entities
bounding_entities[d][tag] = fromfile(f, c_int, num_BREP_)
_fast_forward_to_end_block(f, "Entities")
return physical_tags, bounding_entities
def _read_nodes(f, is_ascii: bool, data_size):
# Read node data: Node coordinates and tags.
# Also find the entities of the nodes, and store this as point_data.
# Note that entity tags are 1-offset within each dimension, thus it is
# necessary to keep track of both tag and dimension of the entity
fromfile = partial(np.fromfile, sep=" " if is_ascii else "")
c_size_t = _size_type(data_size)
# numEntityBlocks numNodes minNodeTag maxNodeTag (all size_t)
num_entity_blocks, total_num_nodes, _, _ = fromfile(f, c_size_t, 4)
points = np.empty((total_num_nodes, 3), dtype=float)
tags = np.empty(total_num_nodes, dtype=int)
dim_tags = np.empty((total_num_nodes, 2), dtype=int)
# To save the entity block id for each node, initialize an array here,
# populate it with num_nodes
idx = 0
for _ in range(num_entity_blocks):
# entityDim(int) entityTag(int) parametric(int) numNodes(size_t)
dim, entity_tag, parametric = fromfile(f, c_int, 3)
if parametric != 0:
raise ReadError("parametric nodes not implemented")
num_nodes = int(fromfile(f, c_size_t, 1)[0])
# From <http://gmsh.info/doc/texinfo/gmsh.html#MSH-file-format>:
# > [...] tags can be "sparse", i.e., do not have to constitute a continuous
# > list of numbers (the format even allows them to not be ordered).
#
# Following https://github.com/nschloe/meshio/issues/388, we read the tags and
# populate the points array accordingly, thereby preserving the order of indices
# of nodes/points.
ixx = slice(idx, idx + num_nodes)
tags[ixx] = fromfile(f, c_size_t, num_nodes) - 1
# Store the point densely and in the order in which they appear in the file.
# x(double) y(double) z(double) (* numNodes)
points[ixx] = fromfile(f, c_double, num_nodes * 3).reshape((num_nodes, 3))
# Entity tag and entity dimension of the nodes. Stored as point-data.
dim_tags[ixx, 0] = dim
dim_tags[ixx, 1] = entity_tag
idx += num_nodes
_fast_forward_to_end_block(f, "Nodes")
return points, tags, dim_tags
def _read_elements(
f, point_tags, physical_tags, bounding_entities, is_ascii, data_size, field_data
):
fromfile = partial(np.fromfile, sep=" " if is_ascii else "")
c_size_t = _size_type(data_size)
# numEntityBlocks numElements minElementTag maxElementTag (all size_t)
num_entity_blocks, _, _, _ = fromfile(f, c_size_t, 4)
data = []
cell_data = {}
cell_sets = {k: [None] * num_entity_blocks for k in field_data.keys()}
for k in range(num_entity_blocks):
# entityDim(int) entityTag(int) elementType(int) numElements(size_t)
dim, tag, type_ele = fromfile(f, c_int, 3)
(num_ele,) = fromfile(f, c_size_t, 1)
for physical_name, cell_set in cell_sets.items():
cell_set[k] = np.arange(
num_ele
if (
physical_tags
and field_data[physical_name][1] == dim
and field_data[physical_name][0] in physical_tags[dim][tag]
)
else 0,
dtype=type(num_ele),
)
tpe = _gmsh_to_meshio_type[type_ele]
num_nodes_per_ele = num_nodes_per_cell[tpe]
d = fromfile(f, c_size_t, int(num_ele * (1 + num_nodes_per_ele))).reshape(
(num_ele, -1)
)
# Find physical tag, if defined; else it is None.
pt = None if not physical_tags else physical_tags[dim][tag]
# Bounding entities (of lower dimension) if defined. Else it is None.
if dim > 0 and bounding_entities: # Points have no boundaries
be = bounding_entities[dim][tag]
else:
be = None
data.append((pt, be, tag, tpe, d))
_fast_forward_to_end_block(f, "Elements")
# Inverse point tags
inv_tags = np.full(np.max(point_tags) + 1, -1, dtype=int)
inv_tags[point_tags] = np.arange(len(point_tags))
# Note that the first column in the data array is the element tag; discard it.
data = [
(physical_tag, bound_entity, geom_tag, tpe, inv_tags[d[:, 1:] - 1])
for physical_tag, bound_entity, geom_tag, tpe, d in data
]
cells = []
for physical_tag, bound_entity, geom_tag, key, values in data:
cells.append(CellBlock(key, _gmsh_to_meshio_order(key, values)))
if physical_tag:
if "gmsh:physical" not in cell_data:
cell_data["gmsh:physical"] = []
cell_data["gmsh:physical"].append(
np.full(len(values), physical_tag[0], int)
)
if "gmsh:geometrical" not in cell_data:
cell_data["gmsh:geometrical"] = []
cell_data["gmsh:geometrical"].append(np.full(len(values), geom_tag, int))
# The bounding entities is stored in the cell_sets.
if bounding_entities:
if "gmsh:bounding_entities" not in cell_sets:
cell_sets["gmsh:bounding_entities"] = []
cell_sets["gmsh:bounding_entities"].append(bound_entity)
return cells, cell_data, cell_sets
def _read_periodic(f, is_ascii, data_size):
fromfile = partial(np.fromfile, sep=" " if is_ascii else "")
c_size_t = _size_type(data_size)
periodic = []
# numPeriodicLinks(size_t)
num_periodic = int(fromfile(f, c_size_t, 1)[0])
for _ in range(num_periodic):
# entityDim(int) entityTag(int) entityTagMaster(int)
edim, stag, mtag = fromfile(f, c_int, 3)
# numAffine(size_t) value(double) ...
num_affine = int(fromfile(f, c_size_t, 1)[0])
affine = fromfile(f, c_double, num_affine)
# numCorrespondingNodes(size_t)
num_nodes = int(fromfile(f, c_size_t, 1)[0])
# nodeTag(size_t) nodeTagMaster(size_t) ...
slave_master = fromfile(f, c_size_t, num_nodes * 2).reshape(-1, 2)
slave_master = slave_master - 1 # Subtract one, Python is 0-based
periodic.append([edim, (stag, mtag), affine, slave_master])
_fast_forward_to_end_block(f, "Periodic")
return periodic
def write(filename, mesh, float_fmt=".16e", binary=True):
"""Writes msh files, cf.
<http://gmsh.info/doc/texinfo/gmsh.html#MSH-file-format>.
"""
# Filter the point data: gmsh:dim_tags are tags, the rest is actual point data.
point_data = {}
for key, d in mesh.point_data.items():
if key not in ["gmsh:dim_tags"]:
point_data[key] = d
# Split the cell data: gmsh:physical and gmsh:geometrical are tags, the rest is
# actual cell data.
tag_data = {}
cell_data = {}
for key, d in mesh.cell_data.items():
if key in ["gmsh:physical", "gmsh:geometrical", "cell_tags"]:
tag_data[key] = d
else:
cell_data[key] = d
with open(filename, "wb") as fh:
file_type = 1 if binary else 0
data_size = c_size_t.itemsize
fh.write(b"$MeshFormat\n")
fh.write(f"4.1 {file_type} {data_size}\n".encode())
if binary:
np.array([1], dtype=c_int).tofile(fh)
fh.write(b"\n")
fh.write(b"$EndMeshFormat\n")
if mesh.field_data:
_write_physical_names(fh, mesh.field_data)
_write_entities(
fh, mesh.cells, tag_data, mesh.cell_sets, mesh.point_data, binary
)
_write_nodes(fh, mesh.points, mesh.cells, mesh.point_data, float_fmt, binary)
_write_elements(fh, mesh.cells, tag_data, binary)
if mesh.gmsh_periodic is not None:
_write_periodic(fh, mesh.gmsh_periodic, float_fmt, binary)
for name, dat in point_data.items():
_write_data(fh, "NodeData", name, dat, binary)
cell_data_raw = raw_from_cell_data(cell_data)
for name, dat in cell_data_raw.items():
_write_data(fh, "ElementData", name, dat, binary)
def _write_entities(fh, cells, tag_data, cell_sets, point_data, binary):
"""Write entity section in a .msh file.
The entity section links up to three kinds of information:
1) The geometric objects represented in the mesh.
2) Physical tags of geometric objects. This data will be a subset
of that represented in 1)
3) Which geometric objects form the boundary of this object.
The boundary is formed of objects with dimension 1 less than
the current one. A boundary can only be specified for objects of
dimension at least 1.
The entities of all geometric objects is pulled from
point_data['gmsh:dim_tags']. For details, see the function _write_nodes().
Physical tags are specified as tag_data, while the boundary of a geometric
object is specified in cell_sets.
"""
# The data format for the entities section is
#
# numPoints(size_t) numCurves(size_t)
# numSurfaces(size_t) numVolumes(size_t)
# pointTag(int) X(double) Y(double) Z(double)
# numPhysicalTags(size_t) physicalTag(int) ...
# ...
# curveTag(int) minX(double) minY(double) minZ(double)
# maxX(double) maxY(double) maxZ(double)
# numPhysicalTags(size_t) physicalTag(int) ...
# numBoundingPoints(size_t) pointTag(int) ...
# ...
# surfaceTag(int) minX(double) minY(double) minZ(double)
# maxX(double) maxY(double) maxZ(double)
# numPhysicalTags(size_t) physicalTag(int) ...
# numBoundingCurves(size_t) curveTag(int) ...
# ...
# volumeTag(int) minX(double) minY(double) minZ(double)
# maxX(double) maxY(double) maxZ(double)
# numPhysicalTags(size_t) physicalTag(int) ...
# numBoundngSurfaces(size_t) surfaceTag(int) ...
# Both nodes and cells have entities, but the cell entities are a subset of
# the nodes. The reason is (if the inner workings of Gmsh has been correctly
# understood) that node entities are assigned to all
# objects necessary to specify the geometry whereas only cells of Physical
# objcets (gmsh jargon) are present among the cell entities.
# The entities section must therefore be built on the node-entities, if
# these are available. If this is not the case, we leave this section blank.
# TODO: Should this give a warning?
if "gmsh:dim_tags" not in point_data:
return
fh.write(b"$Entities\n")
# Array of entity tag (first row) and dimension (second row) per node.
# We need to combine the two, since entity tags are reset for each dimension.
# Uniquify, so that each row in node_dim_tags represent a unique entity
node_dim_tags = np.unique(point_data["gmsh:dim_tags"], axis=0)
# Write number of entities per dimension
num_occ = np.bincount(node_dim_tags[:, 0], minlength=4)
if num_occ.size > 4:
raise ValueError("Encountered entity with dimension > 3")
if binary:
num_occ.astype(c_size_t).tofile(fh)
else:
fh.write(f"{num_occ[0]} {num_occ[1]} {num_occ[2]} {num_occ[3]}\n".encode())
# Array of dimension and entity tag per cell. Will be compared with the
# similar not array.
cell_dim_tags = np.empty((len(cells), 2), dtype=int)
for ci, cell_block in enumerate(cells):
cell_dim_tags[ci] = [
cell_block.dim,
tag_data["gmsh:geometrical"][ci][0],
]
# We will only deal with bounding entities if this information is available
has_bounding_elements = "gmsh:bounding_entities" in cell_sets
# The node entities form a superset of cell entities. Write entity information
# based on nodes, supplement with cell information when there is a matcihng
# cell block.
for dim, tag in node_dim_tags:
# Find the matching cell block, if it exists
matching_cell_block = np.where(
np.logical_and(cell_dim_tags[:, 0] == dim, cell_dim_tags[:, 1] == tag)
)[0]
if matching_cell_block.size > 1:
# It is not 100% clear if this is not permissible, but the current
# implementation for sure does not allow it.
raise ValueError("Encountered non-unique CellBlock dim_tag")
# The information to be written varies according to entity dimension,
# whether entity has a physical tag, and between ascii and binary.
# The resulting code is a bit ugly, but no simpler and clean option
# seems possible.
# Entity tag
if binary:
np.array([tag], dtype=c_int).tofile(fh)
else:
fh.write(f"{tag} ".encode())
# Min-max coordinates for the entity. For now, simply put zeros here,
# and hope that gmsh does not complain. To expand this, the point
# coordinates must be made available to this function; the bounding
# box can then be found by a min-max over the points of the matching
# cell.
if dim == 0:
# Bounding box is a point
if binary:
np.zeros(3, dtype=c_double).tofile(fh)
else:
fh.write(b"0 0 0 ")
else:
# Bounding box has six coordinates
if binary:
np.zeros(6, dtype=c_double).tofile(fh)
else:
fh.write(b"0 0 0 0 0 0 ")
# If there is a corresponding cell block, write physical tags (if any)
# and bounding entities (if any)
if matching_cell_block.size > 0:
# entity has a physical tag, write this
# ASSUMPTION: There is a single physical tag for this
physical_tag = tag_data["gmsh:physical"][matching_cell_block[0]][0]
if binary:
np.array([1], dtype=c_size_t).tofile(fh)
np.array([physical_tag], dtype=c_int).tofile(fh)
else:
fh.write(f"1 {physical_tag} ".encode())
else:
# The number of physical tags is zero
if binary:
np.array([0], dtype=c_size_t).tofile(fh)
else:
fh.write(b"0 ")
if dim > 0:
# Entities not of the lowest dimension can have their
# bounding elements (of dimension one less) specified
if has_bounding_elements and matching_cell_block.size > 0:
# The bounding element should be a list
bounds = cell_sets["gmsh:bounding_entities"][matching_cell_block[0]]
num_bounds = len(bounds)
if num_bounds > 0:
if binary:
np.array(num_bounds, dtype=c_size_t).tofile(fh)
np.array(bounds, dtype=c_int).tofile(fh)
else:
fh.write(f"{num_bounds} ".encode())
for bi in bounds:
fh.write(f"{bi} ".encode())
fh.write(b"\n")
else:
# Register that there are no bounding elements
if binary:
np.array([0], dtype=c_size_t).tofile(fh)
else:
fh.write(b"0\n")
else:
# Register that there are no bounding elements
if binary:
np.array([0], dtype=c_size_t).tofile(fh)
else:
fh.write(b"0\n")
else:
# If ascii, enforce line change
if not binary:
fh.write(b"\n")
if binary:
fh.write(b"\n")
# raise NotImplementedError
fh.write(b"$EndEntities\n")
def _write_nodes(fh, points, cells, point_data, float_fmt, binary):
"""Write node information.
If data on dimension and tags of the geometric entities which the nodes belong to
is available available, the nodes will be grouped accordingly. This data is
specified as point_data, using the key 'gmsh:dim_tags' and data as an
num_points x 2 numpy array (first column is the dimension of the geometric entity
of this node, second is the tag).
If dim_tags are not available, all nodes will be assigned the same tag of 0. This
only makes sense if a single cell block is present in the mesh; an error will be
raised if len(cells) > 1.
"""
if points.shape[1] == 2:
# msh4 requires 3D points, but 2D points given.
# Appending 0 third component.
points = np.column_stack([points, np.zeros_like(points[:, 0])])
fh.write(b"$Nodes\n")
# The format for the nodes section is
#
# $Nodes
# numEntityBlocks(size_t) numNodes(size_t) minNodeTag(size_t) maxNodeTag(size_t)
# entityDim(int) entityTag(int) parametric(int; 0 or 1)
# numNodesInBlock(size_t)
# nodeTag(size_t)
# ...
# x(double) y(double) z(double)
# < u(double; if parametric and entityDim >= 1) >
# < v(double; if parametric and entityDim >= 2) >
# < w(double; if parametric and entityDim == 3) >
# ...
# ...
# $EndNodes
#
n = points.shape[0]
min_tag = 1
max_tag = n
is_parametric = 0
# If node (entity) tag and dimension is available, we make a list of unique
# combinations thereof, and a map from the full node set to the unique
# set.
if "gmsh:dim_tags" in point_data:
# reverse_index_map maps from all nodes to their respective representation in
# (the uniquified) node_dim_tags. This approach works for general orderings of
# the nodes
node_dim_tags, reverse_index_map = np.unique(
point_data["gmsh:dim_tags"],
axis=0,
return_inverse=True,
)
else:
# If entity information is not provided, we will assign the same entity for all
# nodes. This only makes sense if the cells are of a single type
if len(cells) != 1:
raise WriteError(
"Specify entity information (gmsh:dim_tags in point_data) "
+ "to deal with more than one cell type. "
)
dim = cells[0].dim
tag = 0
node_dim_tags = np.array([[dim, tag]])
# All nodes map to the (single) dimension-entity object
reverse_index_map = np.full(n, 0, dtype=int)
num_blocks = node_dim_tags.shape[0]
# First write preamble
if binary:
if points.dtype != c_double:
warn(f"Binary Gmsh needs c_double points (got {points.dtype}). Converting.")
points = points.astype(c_double)
np.array([num_blocks, n, min_tag, max_tag], dtype=c_size_t).tofile(fh)
else:
fh.write(f"{num_blocks} {n} {min_tag} {max_tag}\n".encode())
for j in range(num_blocks):
dim, tag = node_dim_tags[j]
node_tags = np.where(reverse_index_map == j)[0]
num_points_this = node_tags.size
if binary:
np.array([dim, tag, is_parametric], dtype=c_int).tofile(fh)
np.array([num_points_this], dtype=c_size_t).tofile(fh)
(node_tags + 1).astype(c_size_t).tofile(fh)
points[node_tags].tofile(fh)
else:
fh.write(f"{dim} {tag} {is_parametric} {num_points_this}\n".encode())
(node_tags + 1).astype(c_size_t).tofile(fh, "\n", "%d")
fh.write(b"\n")
np.savetxt(fh, points[node_tags], delimiter=" ", fmt="%" + float_fmt)
if binary:
fh.write(b"\n")
fh.write(b"$EndNodes\n")
def _write_elements(fh, cells, tag_data, binary: bool) -> None:
"""write the $Elements block
$Elements
numEntityBlocks(size_t)
numElements(size_t) minElementTag(size_t) maxElementTag(size_t)
entityDim(int) entityTag(int) elementType(int; see below) numElementsInBlock(size_t)
elementTag(size_t) nodeTag(size_t) ...
...
...
$EndElements
"""
fh.write(b"$Elements\n")
total_num_cells = sum(len(c) for c in cells)
num_blocks = len(cells)
min_element_tag = 1
max_element_tag = total_num_cells
if binary:
np.array(
[num_blocks, total_num_cells, min_element_tag, max_element_tag],
dtype=c_size_t,
).tofile(fh)
tag0 = 1
for ci, cell_block in enumerate(cells):
node_idcs = _meshio_to_gmsh_order(cell_block.type, cell_block.data)
if node_idcs.dtype != c_size_t:
# Binary Gmsh needs c_size_t. Converting."
node_idcs = node_idcs.astype(c_size_t)
# entityDim(int) entityTag(int) elementType(int)
# numElementsBlock(size_t)
# The entity tag should be equal within a CellBlock
if "gmsh:geometrical" in tag_data:
entity_tag = tag_data["gmsh:geometrical"][ci][0]
else:
entity_tag = 0
cell_type = _meshio_to_gmsh_type[cell_block.type]
np.array([cell_block.dim, entity_tag, cell_type], dtype=c_int).tofile(fh)
n = node_idcs.shape[0]
np.array([n], dtype=c_size_t).tofile(fh)
if node_idcs.dtype != c_size_t:
warn(
f"Binary Gmsh cells need c_size_t (got {node_idcs.dtype}). "
+ "Converting."
)
node_idcs = node_idcs.astype(c_size_t)
np.column_stack(
[
np.arange(tag0, tag0 + n, dtype=c_size_t),
# increment indices by one to conform with gmsh standard
node_idcs + 1,
]
).tofile(fh)
tag0 += n
fh.write(b"\n")
else:
fh.write(
"{} {} {} {}\n".format(
num_blocks, total_num_cells, min_element_tag, max_element_tag
).encode()
)
tag0 = 1
for ci, cell_block in enumerate(cells):
node_idcs = _meshio_to_gmsh_order(cell_block.type, cell_block.data)
# entityDim(int) entityTag(int) elementType(int) numElementsBlock(size_t)
# The entity tag should be equal within a CellBlock
if "gmsh:geometrical" in tag_data:
entity_tag = tag_data["gmsh:geometrical"][ci][0]
else:
entity_tag = 0
cell_type = _meshio_to_gmsh_type[cell_block.type]
n = len(cell_block.data)
fh.write(f"{cell_block.dim} {entity_tag} {cell_type} {n}\n".encode())
np.savetxt(
fh,
# Gmsh indexes from 1 not 0
np.column_stack([np.arange(tag0, tag0 + n), node_idcs + 1]),
"%d",
" ",
)
tag0 += n
fh.write(b"$EndElements\n")
def _write_periodic(fh, periodic, float_fmt: str, binary: bool) -> None:
"""write the $Periodic block
specified as
$Periodic
numPeriodicLinks(size_t)
entityDim(int) entityTag(int) entityTagMaster(int)
numAffine(size_t) value(double) ...
numCorrespondingNodes(size_t)
nodeTag(size_t) nodeTagMaster(size_t)
...
...
$EndPeriodic
"""
def tofile(fh, value, dtype, **kwargs):
ary = np.array(value, dtype=dtype)
if binary:
ary.tofile(fh)
else:
ary = np.atleast_2d(ary)
fmt = float_fmt if dtype == c_double else "d"
fmt = "%" + kwargs.pop("fmt", fmt)
np.savetxt(fh, ary, fmt=fmt, **kwargs)
fh.write(b"$Periodic\n")
tofile(fh, len(periodic), c_size_t)
for dim, (stag, mtag), affine, slave_master in periodic:
tofile(fh, [dim, stag, mtag], c_int)
if affine is None or len(affine) == 0:
tofile(fh, 0, c_size_t)
else:
tofile(fh, len(affine), c_size_t, newline=" ")
tofile(fh, affine, c_double, fmt=float_fmt)
slave_master = np.array(slave_master, dtype=c_size_t)
slave_master = slave_master.reshape(-1, 2)
slave_master = slave_master + 1 # Add one, Gmsh is 1-based
tofile(fh, len(slave_master), c_size_t)
tofile(fh, slave_master, c_size_t)
if binary:
fh.write(b"\n")
fh.write(b"$EndPeriodic\n")
| |
# -*- coding: utf-8 -*-
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2022
# ryanss <ryanssdev@icloud.com> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from datetime import date
from dateutil.easter import easter
from dateutil.relativedelta import relativedelta as rd, MO, SA, FR, WE, TU
from holidays.constants import JAN, MAR, APR, MAY, JUN, AUG, SEP, OCT, NOV, DEC
from holidays.constants import SAT, SUN, WEEKEND
from holidays.holiday_base import HolidayBase
class Australia(HolidayBase):
country = "AU"
subdivisions = ["ACT", "NSW", "NT", "QLD", "SA", "TAS", "VIC", "WA"]
def __init__(self, **kwargs):
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
# ACT: Holidays Act 1958
# NSW: Public Holidays Act 2010
# NT: Public Holidays Act 2013
# QLD: Holidays Act 1983
# SA: Holidays Act 1910
# TAS: Statutory Holidays Act 2000
# VIC: Public Holidays Act 1993
# WA: Public and Bank Holidays Act 1972
# TODO do more research on history of Aus holidays
# New Year's Day
name = "New Year's Day"
jan1 = date(year, JAN, 1)
self[jan1] = name
if self.observed and jan1.weekday() in WEEKEND:
self[jan1 + rd(weekday=MO)] = name + " (Observed)"
# Australia Day
jan26 = date(year, JAN, 26)
if year >= 1935:
if self.subdiv == "NSW" and year < 1946:
name = "Anniversary Day"
else:
name = "Australia Day"
self[jan26] = name
if self.observed and year >= 1946 and jan26.weekday() in WEEKEND:
self[jan26 + rd(weekday=MO)] = name + " (Observed)"
elif year >= 1888 and self.subdiv != "SA":
name = "Anniversary Day"
self[jan26] = name
# Adelaide Cup
if self.subdiv == "SA":
name = "Adelaide Cup"
if year >= 2006:
# subject to proclamation ?!?!
self[date(year, MAR, 1) + rd(weekday=MO(+2))] = name
else:
self[date(year, MAR, 1) + rd(weekday=MO(+3))] = name
# Canberra Day
# Info from https://www.timeanddate.com/holidays/australia/canberra-day
# and https://en.wikipedia.org/wiki/Canberra_Day
if self.subdiv == "ACT" and year >= 1913:
name = "Canberra Day"
if year >= 1913 and year <= 1957:
self[date(year, MAR, 12)] = name
elif year >= 1958 and year <= 2007:
self[date(year, MAR, 1) + rd(weekday=MO(+3))] = name
elif year >= 2008 and year != 2012:
self[date(year, MAR, 1) + rd(weekday=MO(+2))] = name
elif year == 2012:
self[date(year, MAR, 12)] = name
# Easter
self[easter(year) + rd(weekday=FR(-1))] = "Good Friday"
if self.subdiv in ("ACT", "NSW", "NT", "QLD", "SA", "VIC"):
self[easter(year) + rd(weekday=SA(-1))] = "Easter Saturday"
if self.subdiv in ("ACT", "NSW", "QLD", "VIC"):
self[easter(year)] = "Easter Sunday"
self[easter(year) + rd(weekday=MO)] = "Easter Monday"
# Anzac Day
if year > 1920:
name = "Anzac Day"
apr25 = date(year, APR, 25)
self[apr25] = name
if self.observed:
if apr25.weekday() == SAT and self.subdiv in ("WA", "NT"):
self[apr25 + rd(weekday=MO)] = name + " (Observed)"
elif apr25.weekday() == SUN and self.subdiv in (
"ACT",
"QLD",
"SA",
"WA",
"NT",
):
self[apr25 + rd(weekday=MO)] = name + " (Observed)"
# Western Australia Day
if self.subdiv == "WA" and year > 1832:
if year >= 2015:
name = "Western Australia Day"
else:
name = "Foundation Day"
self[date(year, JUN, 1) + rd(weekday=MO(+1))] = name
# Sovereign's Birthday
if year >= 1952:
name = "Queen's Birthday"
elif year > 1901:
name = "King's Birthday"
if year >= 1936:
name = "Queen's Birthday"
if self.subdiv == "QLD":
if year == 2012:
self[date(year, JUN, 11)] = "Queen's Diamond Jubilee"
if year < 2016 and year != 2012:
dt = date(year, JUN, 1) + rd(weekday=MO(+2))
self[dt] = name
else:
dt = date(year, OCT, 1) + rd(weekday=MO)
self[dt] = name
elif self.subdiv == "WA":
# by proclamation ?!?!
self[date(year, OCT, 1) + rd(weekday=MO(-1))] = name
elif self.subdiv in ("NSW", "VIC", "ACT", "SA", "NT", "TAS"):
dt = date(year, JUN, 1) + rd(weekday=MO(+2))
self[dt] = name
elif year > 1911:
self[date(year, JUN, 3)] = name # George V
elif year > 1901:
self[date(year, NOV, 9)] = name # Edward VII
# Picnic Day
if self.subdiv == "NT":
name = "Picnic Day"
self[date(year, AUG, 1) + rd(weekday=MO)] = name
# Bank Holiday
if self.subdiv == "NSW":
if year >= 1912:
name = "Bank Holiday"
self[date(year, 8, 1) + rd(weekday=MO)] = name
# Labour Day
name = "Labour Day"
if self.subdiv in ("NSW", "ACT", "SA"):
self[date(year, OCT, 1) + rd(weekday=MO)] = name
elif self.subdiv == "WA":
self[date(year, MAR, 1) + rd(weekday=MO)] = name
elif self.subdiv == "VIC":
self[date(year, MAR, 1) + rd(weekday=MO(+2))] = name
elif self.subdiv == "QLD":
if 2013 <= year <= 2015:
self[date(year, OCT, 1) + rd(weekday=MO)] = name
else:
self[date(year, MAY, 1) + rd(weekday=MO)] = name
elif self.subdiv == "NT":
name = "May Day"
self[date(year, MAY, 1) + rd(weekday=MO)] = name
elif self.subdiv == "TAS":
name = "Eight Hours Day"
self[date(year, MAR, 1) + rd(weekday=MO(+2))] = name
# Family & Community Day
if self.subdiv == "ACT":
name = "Family & Community Day"
if 2007 <= year <= 2009:
self[date(year, NOV, 1) + rd(weekday=TU)] = name
elif year == 2010:
# first Monday of the September/October school holidays
# moved to the second Monday if this falls on Labour day
# TODO need a formula for the ACT school holidays then
# http://www.cmd.act.gov.au/communication/holidays
self[date(year, SEP, 26)] = name
elif year == 2011:
self[date(year, OCT, 10)] = name
elif year == 2012:
self[date(year, OCT, 8)] = name
elif year == 2013:
self[date(year, SEP, 30)] = name
elif year == 2014:
self[date(year, SEP, 29)] = name
elif year == 2015:
self[date(year, SEP, 28)] = name
elif year == 2016:
self[date(year, SEP, 26)] = name
elif year == 2017:
self[date(year, SEP, 25)] = name
# Reconciliation Day
if self.subdiv == "ACT":
name = "Reconciliation Day"
if year >= 2018:
self[date(year, 5, 27) + rd(weekday=MO)] = name
if self.subdiv == "VIC":
# Grand Final Day
if year == 2020:
# Rescheduled due to COVID-19
self[date(year, OCT, 23)] = "Grand Final Day"
elif year == 2021:
# Rescheduled due to COVID-19
self[date(year, SEP, 24)] = "Grand Final Day"
elif year >= 2015:
self[date(year, SEP, 24) + rd(weekday=FR)] = "Grand Final Day"
# Melbourne Cup
self[date(year, NOV, 1) + rd(weekday=TU)] = "Melbourne Cup"
# The Royal Queensland Show (Ekka)
# The Show starts on the first Friday of August - providing this is
# not prior to the 5th - in which case it will begin on the second
# Friday. The Wednesday during the show is a public holiday.
if self.subdiv == "QLD":
name = "The Royal Queensland Show"
if year == 2020:
self[date(year, AUG, 14)] = name
if year == 2021:
self[date(year, OCT, 29)] = name
else:
self[
date(year, AUG, 5) + rd(weekday=FR) + rd(weekday=WE)
] = name
# Christmas Day
name = "Christmas Day"
dec25 = date(year, DEC, 25)
self[dec25] = name
if self.observed and dec25.weekday() in WEEKEND:
self[date(year, DEC, 27)] = name + " (Observed)"
# Boxing Day
if self.subdiv == "SA":
name = "Proclamation Day"
else:
name = "Boxing Day"
dec26 = date(year, DEC, 26)
self[dec26] = name
if self.observed and dec26.weekday() in WEEKEND:
self[date(year, DEC, 28)] = name + " (Observed)"
class AU(Australia):
pass
class AUS(Australia):
pass
| |
#!/usr/bin/env python
# Copyright 2014 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Compiles all *.proto files it finds into *_pb2.py."""
from __future__ import print_function
import logging
import optparse
import os
import re
import shutil
import subprocess
import sys
import tempfile
# Directory with this file.
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
# Minimally required protoc version.
MIN_SUPPORTED_PROTOC_VERSION = (3, 17, 3)
# Maximally supported protoc version.
MAX_SUPPORTED_PROTOC_VERSION = (3, 17, 3)
# Printed if protoc is missing or too old.
PROTOC_INSTALL_HELP = (
"Could not find working protoc (%s <= ver <= %s) in PATH." %
(
'.'.join(map(str, MIN_SUPPORTED_PROTOC_VERSION)),
'.'.join(map(str, MAX_SUPPORTED_PROTOC_VERSION)),
))
# Paths that should not be searched for *.proto.
IGNORED_PATHS = [
re.compile(r'.*(/|\\)third_party(/|\\)?'),
]
def is_ignored(path):
"""True if |path| matches any regexp in IGNORED_PATHS."""
return any(b.match(path) for b in IGNORED_PATHS)
def find_proto_files(path):
"""Recursively searches for *.proto files, yields absolute paths to them."""
path = os.path.abspath(path)
for dirpath, dirnames, filenames in os.walk(path, followlinks=True):
# Skip hidden and ignored directories
skipped = [
x for x in dirnames
if x[0] == '.' or is_ignored(os.path.join(dirpath, x))
]
for dirname in skipped:
dirnames.remove(dirname)
# Yield *.proto files.
for name in filenames:
if name.endswith('.proto'):
yield os.path.join(dirpath, name)
def get_protoc():
"""Returns protoc executable path (maybe relative to PATH)."""
return 'protoc.exe' if sys.platform == 'win32' else 'protoc'
def compile_proto(proto_file, proto_path, output_path=None):
"""Invokes 'protoc', compiling single *.proto file into *_pb2.py file.
Args:
proto_file: the file to compile.
proto_path: the root of proto file directory tree.
output_path: the root of the output directory tree.
Defaults to `proto_path`.
Returns:
The path of the generated _pb2.py file.
"""
output_path = output_path or proto_path
cmd = [get_protoc()]
cmd.append('--proto_path=%s' % proto_path)
# Reuse embedded google protobuf.
root = os.path.dirname(os.path.dirname(os.path.dirname(THIS_DIR)))
cmd.append('--proto_path=%s' % os.path.join(root, 'client', 'third_party'))
cmd.append('--python_out=%s' % output_path)
cmd.append('--prpc-python_out=%s' % output_path)
cmd.append(proto_file)
logging.debug('Running %s', cmd)
env = os.environ.copy()
env['PATH'] = os.pathsep.join([THIS_DIR, env.get('PATH', '')])
subprocess.check_call(cmd, env=env)
return proto_file.replace('.proto', '_pb2.py').replace(proto_path,
output_path)
def check_proto_compiled(proto_file, proto_path):
"""Return True if *_pb2.py on disk is up to date."""
# Missing?
expected_path = proto_file.replace('.proto', '_pb2.py')
if not os.path.exists(expected_path):
return False
# Helper to read contents of a file.
def read(path):
with open(path, 'r') as f:
return f.read()
# Compile *.proto into temp file to compare the result with existing file.
tmp_dir = tempfile.mkdtemp()
try:
try:
compiled = compile_proto(proto_file, proto_path, output_path=tmp_dir)
except subprocess.CalledProcessError:
return False
return read(compiled) == read(expected_path)
finally:
shutil.rmtree(tmp_dir)
def compile_all_files(root_dir, proto_path):
"""Compiles all *.proto files it recursively finds in |root_dir|."""
root_dir = os.path.abspath(root_dir)
success = True
for path in find_proto_files(root_dir):
try:
compile_proto(path, proto_path)
except subprocess.CalledProcessError:
print('Failed to compile: %s' % path[len(root_dir) + 1:], file=sys.stderr)
success = False
return success
def check_all_files(root_dir, proto_path):
"""Returns True if all *_pb2.py files on disk are up to date."""
root_dir = os.path.abspath(root_dir)
success = True
for path in find_proto_files(root_dir):
if not check_proto_compiled(path, proto_path):
print(
'Need to recompile file: %s' % path[len(root_dir) + 1:],
file=sys.stderr)
success = False
return success
def get_protoc_version():
"""Returns the version of installed 'protoc', or None if not found."""
cmd = [get_protoc(), '--version']
try:
logging.debug('Running %s', cmd)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
out, _ = proc.communicate()
if proc.returncode:
logging.debug('protoc --version returned %d', proc.returncode)
return None
except OSError as err:
logging.debug('Failed to run protoc --version: %s', err)
return None
match = re.match('libprotoc (.*)', out)
if not match:
logging.debug('Unexpected output of protoc --version: %s', out)
return None
return tuple(map(int, match.group(1).split('.')))
def main(args, app_dir=None):
parser = optparse.OptionParser(
description=sys.modules['__main__'].__doc__,
usage='%prog [options]' + ('' if app_dir else ' <root dir>'))
parser.add_option(
'-c', '--check', action='store_true',
help='Only check that all *.proto files are up to date')
parser.add_option('-v', '--verbose', action='store_true')
parser.add_option(
'--proto_path',
help=(
'Used to calculate relative paths of proto files in the registry. '
'Defaults to the input directory.'
))
options, args = parser.parse_args(args)
logging.basicConfig(level=logging.DEBUG if options.verbose else logging.ERROR)
root_dir = None
if not app_dir:
if len(args) != 1:
parser.error('Expecting single argument')
root_dir = args[0]
else:
if args:
parser.error('Unexpected arguments')
root_dir = app_dir
# Ensure protoc compiler is up-to-date.
protoc_version = get_protoc_version()
if protoc_version is None or protoc_version < MIN_SUPPORTED_PROTOC_VERSION:
if protoc_version:
existing = '.'.join(map(str, protoc_version))
expected = '.'.join(map(str, MIN_SUPPORTED_PROTOC_VERSION))
print(
'protoc version is too old (%s), expecting at least %s.\n' %
(existing, expected),
file=sys.stderr)
sys.stderr.write(PROTOC_INSTALL_HELP)
return 1
# Make sure protoc produces code compatible with vendored libprotobuf.
if protoc_version > MAX_SUPPORTED_PROTOC_VERSION:
existing = '.'.join(map(str, protoc_version))
expected = '.'.join(map(str, MAX_SUPPORTED_PROTOC_VERSION))
print(
'protoc version is too new (%s), expecting at most %s.\n' % (existing,
expected),
file=sys.stderr)
sys.stderr.write(PROTOC_INSTALL_HELP)
return 1
proto_path = os.path.abspath(options.proto_path or root_dir)
if options.check:
success = check_all_files(root_dir, proto_path)
else:
success = compile_all_files(root_dir, proto_path)
return int(not success)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_factory_ops.constant."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import ragged
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedConstOpTest(ragged_test_util.RaggedTensorTestCase,
parameterized.TestCase):
@parameterized.parameters(
#=========================================================================
# 0-dimensional tensors.
dict(pylist=b'x', expected_shape=()),
#=========================================================================
# 1-dimensional tensors.
dict(pylist=[1, 2, 3], expected_shape=(3,)),
#=========================================================================
# 2-dimensional tensors.
dict(pylist=[[1, 2, 3], [4], [5, 6]], expected_shape=(3, None)),
dict(pylist=[[1, 2, 3], [4, 5, 6], [7, 8, 9]], expected_shape=(3, None)),
#=========================================================================
# 3-dimensional tensors.
dict(
pylist=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]],
expected_shape=(3, None, None)),
dict(
pylist=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]],
ragged_rank=1,
expected_shape=(3, None, 2)),
dict(
pylist=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]],
inner_shape=(2,),
expected_shape=(3, None, 2)),
dict(
pylist=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]],
ragged_rank=1,
inner_shape=(2,),
expected_shape=(3, None, 2)),
# 3-dimensional tensors with numpy arrays
dict(
pylist=[[[1, 2], np.array([3, np.array(4)])],
np.array([]), [[5, 6], [7, 8], [9, 0]]],
expected_shape=(3, None, None)),
dict(
pylist=[[[1, 2], np.array([3, np.array(4)])],
np.array([]), [[5, 6], [7, 8], [9, 0]]],
ragged_rank=1,
expected_shape=(3, None, 2)),
dict(
pylist=[[[1, 2], np.array([3, np.array(4)])],
np.array([]), [[5, 6], [7, 8], [9, 0]]],
inner_shape=(2,),
expected_shape=(3, None, 2)),
dict(
pylist=[[[1, 2], np.array([3, np.array(4)])],
np.array([]), [[5, 6], [7, 8], [9, 0]]],
ragged_rank=1,
inner_shape=(2,),
expected_shape=(3, None, 2)),
#=========================================================================
# 4-dimensional tensors.
dict(
pylist=[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[2, 4], [6, 8]], [[1, 5], [7, 9]]]],
expected_shape=(2, None, None, None)),
dict(
pylist=[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[2, 4], [6, 8]], [[1, 5], [7, 9]]]],
ragged_rank=1,
expected_shape=(2, None, 2, 2)),
dict(
pylist=[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[2, 4], [6, 8]], [[1, 5], [7, 9]]]],
inner_shape=(2,),
expected_shape=(2, None, None, 2)),
dict(
pylist=[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[2, 4], [6, 8]], [[1, 5], [7, 9]]]],
inner_shape=(2, 2),
expected_shape=(2, None, 2, 2)),
# 4-dimensional tensors with numpy arrays
dict(
pylist=np.array([[[np.array([1, 2]), [3, 4]], [[5, 6], [7, 8]]],
np.array([[[2, 4], [6, 8]], [[1, 5], [7, 9]]])]),
expected_shape=(2, None, None, None)),
#=========================================================================
# Empty tensors (no scalar values) w/ default ragged_rank and inner_shape
dict(pylist=[], expected_shape=(0,)),
dict(pylist=[[], [], np.array([])], expected_shape=(3, None)),
dict(
pylist=[[[], []], [], [[], [[]]]],
expected_shape=(3, None, None, None)),
dict(
pylist=np.array([np.array([[], []]),
np.array([]), [[], [[]]]]),
expected_shape=(3, None, None, None)),
#=========================================================================
# Empty tensors (no scalar values) w/ explicit ragged_rank or inner_shape
dict(pylist=[], ragged_rank=1, expected_shape=(0, None)),
dict(pylist=[], ragged_rank=2, expected_shape=(0, None, None)),
dict(pylist=[], inner_shape=(0, 100, 20), expected_shape=(0, 100, 20)),
dict(
pylist=[],
ragged_rank=1,
inner_shape=(100, 20),
expected_shape=(0, None, 100, 20)),
dict(
pylist=[],
ragged_rank=2,
inner_shape=(100, 20),
expected_shape=(0, None, None, 100, 20)),
dict(pylist=[[], [], []], ragged_rank=2, expected_shape=(3, None, None)),
dict(pylist=[], inner_shape=(0,), expected_shape=(0,)),
dict(pylist=[[]], inner_shape=(1, 0), expected_shape=(1, 0)),
dict(
pylist=np.array([]),
ragged_rank=1,
inner_shape=(100, 20),
expected_shape=(0, None, 100, 20)),
#=========================================================================
# default/inferred dtypes
dict(pylist=[], expected_dtype=dtypes.float32),
dict(pylist=[[[], [[[]], []]]], expected_dtype=dtypes.float32),
dict(pylist=[[1, 2], [3], [4, 5, 6]], expected_dtype=dtypes.int32),
dict(pylist=[[1., 2.], [], [4., 5., 6.]], expected_dtype=dtypes.float32),
dict(pylist=[[1, 2], [3.], [4, 5, 6]], expected_dtype=dtypes.float32),
dict(pylist=[[b'a', b'b'], [b'c']], expected_dtype=dtypes.string),
dict(pylist=[[True]], expected_dtype=dtypes.bool),
dict(
pylist=[np.array([1, 2]), np.array([3.]), [4, 5, 6]],
expected_dtype=dtypes.float32),
#=========================================================================
# explicit dtypes
dict(pylist=[], dtype=dtypes.float32),
dict(pylist=[], dtype=dtypes.string),
dict(pylist=[[1, 2], [3], [4, 5, 6]], dtype=dtypes.int64),
dict(pylist=[[1, 2], [3], [4, 5, 6]], dtype=dtypes.int32),
dict(pylist=[[1, 2], [3], [4, 5, 6]], dtype=dtypes.float32),
dict(pylist=[[1., 2.], [3.], [4., 5., 6.]], dtype=dtypes.float16),
dict(pylist=[[1., 2.], [3.], [4., 5., 6.]], dtype=dtypes.float32),
dict(
pylist=[[b'a', b'b'], [b'c'], [b'd', b'e', b'f']],
dtype=dtypes.string),
)
def testRaggedConst(self,
pylist,
dtype=None,
ragged_rank=None,
inner_shape=None,
expected_shape=None,
expected_dtype=None):
"""Tests that `ragged_const(pylist).eval().tolist() == pylist`.
Args:
pylist: The `pylist` argument for `ragged_const()`.
dtype: The `dtype` argument for `ragged_const()`. If not None, then also
test that the resulting ragged tensor has this `dtype`.
ragged_rank: The `ragged_rank` argument for `ragged_const()`. If not
None, then also test that the resulting ragged tensor has this
`ragged_rank`.
inner_shape: The `inner_shape` argument for `ragged_const()`. If not
None, then also test that the resulting ragged tensor has this
`inner_shape`.
expected_shape: The expected shape for the resulting ragged tensor.
expected_dtype: The expected dtype for the resulting ragged tensor (used
to test default/inferred types when dtype=None).
"""
rt = ragged_factory_ops.constant(
pylist, dtype=dtype, ragged_rank=ragged_rank, inner_shape=inner_shape)
# Normalize the pylist, i.e., convert all np.arrays to list.
# E.g., [np.array((1,2))] --> [[1,2]]
pylist = self._normalize_pylist(pylist)
# If dtype was explicitly specified, check it.
if dtype is not None:
self.assertEqual(rt.dtype, dtype)
if expected_dtype is not None:
self.assertEqual(rt.dtype, expected_dtype)
# If ragged_rank was explicitly specified, check it.
if ragged_rank is not None:
if isinstance(rt, ragged_tensor.RaggedTensor):
self.assertEqual(rt.ragged_rank, ragged_rank)
else:
self.assertEqual(0, ragged_rank)
# If inner_shape was explicitly specified, check it.
if inner_shape is not None:
if isinstance(rt, ragged_tensor.RaggedTensor):
self.assertEqual(rt.flat_values.shape.as_list()[1:], list(inner_shape))
else:
self.assertEqual(rt.shape.as_list(), list(inner_shape))
if expected_shape is not None:
self.assertEqual(tuple(rt.shape.as_list()), expected_shape)
self.assertRaggedEqual(rt, pylist)
@parameterized.parameters(
dict(
pylist=12,
ragged_rank=1,
exception=ValueError,
message='Invalid pylist=12: incompatible with ragged_rank=1'),
dict(
pylist=12,
inner_shape=(1,),
exception=ValueError,
message='Invalid pylist=12: incompatible with '
'dim\\(inner_shape\\)=1'),
dict(
pylist=[[[1], [2]]],
ragged_rank=-1,
exception=ValueError,
message='Invalid ragged_rank=-1: must be nonnegative'),
dict(
pylist=[[1, [2]]],
exception=ValueError,
message='all scalar values must have the same nesting depth'),
dict(
pylist=[[[1]], [[[2]]]],
exception=ValueError,
message='all scalar values must have the same nesting depth'),
dict(
pylist=[[1], [[]]],
exception=ValueError,
message='Invalid pylist=.*: empty list nesting is greater '
'than scalar value nesting'),
dict(
pylist=[1, 2, 3],
ragged_rank=1,
exception=ValueError,
message='pylist has scalar values depth 1, but ragged_rank=1 '
'requires scalar value depth greater than 1'),
dict(
pylist=[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
ragged_rank=2,
exception=ValueError,
message='pylist has scalar values depth 2, but ragged_rank=2 '
'requires scalar value depth greater than 2'),
dict(pylist=[1, 2, 3], inner_shape=(1, 1), exception=TypeError),
dict(
pylist=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
inner_shape=(2, 2),
ragged_rank=1,
exception=ValueError,
message='Invalid pylist=.*: incompatible with ragged_rank=1 and '
'dim\\(inner_shape\\)=2'),
dict(
pylist=[[[1, 2], [3, 4]], [[5, 6], [7, 8, 9]]],
ragged_rank=1,
exception=ValueError,
message='inner values have inconsistent shape'),
dict(
pylist=[[[], [[]]]],
ragged_rank=1,
exception=ValueError,
message='inner values have inconsistent shape'),
)
def testRaggedConstError(self,
pylist,
dtype=None,
ragged_rank=None,
inner_shape=None,
exception=None,
message=None):
"""Tests that `ragged_const()` raises an expected exception."""
self.assertRaisesRegexp(
exception,
message,
ragged_factory_ops.constant,
pylist,
dtype=dtype,
ragged_rank=ragged_rank,
inner_shape=inner_shape)
@parameterized.parameters([
dict(pylist=9, scalar_depth=0, max_depth=0),
dict(pylist=[9], scalar_depth=1, max_depth=1),
dict(pylist=[1, 2, 3], scalar_depth=1, max_depth=1),
dict(pylist=[[1], [2]], scalar_depth=2, max_depth=2),
dict(pylist=[[[1], [2]], [[3]]], scalar_depth=3, max_depth=3),
dict(pylist=[], scalar_depth=None, max_depth=1),
dict(pylist=[[]], scalar_depth=None, max_depth=2),
dict(pylist=[[], [], []], scalar_depth=None, max_depth=2),
dict(pylist=[[[], []], [[], [[[]]]], []], scalar_depth=None, max_depth=5),
dict(
pylist=[1, [2]],
exception=ValueError,
message='all scalar values must have the same nesting depth'),
dict(
pylist=[[1], 2],
exception=ValueError,
message='all scalar values must have the same nesting depth'),
dict(
pylist=[[[[1]], []], [[2]]],
exception=ValueError,
message='all scalar values must have the same nesting depth'),
])
def testScalarAndMaxDepthHelper(self,
pylist,
scalar_depth=None,
max_depth=None,
exception=None,
message=None):
"""Tests for the _find_scalar_and_max_depth helper function."""
if exception is not None:
self.assertRaisesRegexp(exception, message,
ragged_factory_ops._find_scalar_and_max_depth,
pylist)
else:
self.assertEqual(
ragged_factory_ops._find_scalar_and_max_depth(pylist),
(scalar_depth, max_depth))
@parameterized.parameters([
dict(pylist=[[1], [2, 3]], ragged_rank=1, inner_shape=()),
dict(
pylist=[[[1], [2]], [[3], [4], [5]]], ragged_rank=1,
inner_shape=(1,)),
dict(pylist=[[[1], [2]], [[3], [4], [5]]], ragged_rank=2, inner_shape=()),
dict(
pylist=[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [2, 4, 6]]]],
ragged_rank=1,
inner_shape=(2, 3)),
dict(
pylist=[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [2, 4, 6]]]],
ragged_rank=2,
inner_shape=(3,)),
dict(
pylist=[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [2, 4, 6]]]],
ragged_rank=3,
inner_shape=()),
dict(
pylist=[[[1], [2, 3]]],
ragged_rank=1,
exception=ValueError,
message='inner values have inconsistent shape'),
dict(
pylist=[[[1], [[2]]]],
ragged_rank=1,
exception=ValueError,
message='inner values have inconsistent shape'),
dict(
pylist=[[[[1]], [2]]],
ragged_rank=1,
exception=ValueError,
message='inner values have inconsistent shape'),
])
def testDefaultInnerShapeForPylistHelper(self,
pylist,
ragged_rank,
inner_shape=None,
exception=None,
message=None):
"""Tests for the _default_inner_shape_for_pylist helper function."""
if exception is not None:
self.assertRaisesRegexp(
exception, message,
ragged.ragged_factory_ops._default_inner_shape_for_pylist, pylist,
ragged_rank)
else:
self.assertEqual(
ragged.ragged_factory_ops._default_inner_shape_for_pylist(
pylist, ragged_rank), inner_shape)
if __name__ == '__main__':
googletest.main()
| |
"""
The qiprofile imaging Mongodb data model.
"""
import re
import decimal
from numbers import Number
import mongoengine
from mongoengine import (fields, signals, ValidationError)
from .common import (Encounter, Outcome, TumorExtent)
class Point(mongoengine.EmbeddedDocument):
"""The 3D point in the volume voxel space."""
x = fields.IntField()
"""
The x dimension value in the image coordinate system.
"""
y = fields.IntField()
"""
The y dimension value in the image coordinate system.
"""
z = fields.IntField()
"""
The z dimension value in the image coordinate system.
"""
class Image(mongoengine.EmbeddedDocument):
"""The image file encapsulation."""
meta = dict(allow_inheritance=True)
name = fields.StringField(required=True)
"""
The image file base name. The client has the responsibility
of determining the image file based on the image store. For example,
a NIfTI scan volume XNAT 1.6 archive 3D volume image location is as
# follows::
/path/to/archive/<project>/arc001/<experiment>/SCANS/<scan>/NIFTI/<name>
where:
* *project* is the XNAT project name
* *experiment* is the XNAT experiment label
* *scan* is the scan number
* *name* is the volume image file base name
"""
metadata = fields.DictField()
"""Additional image properties, e.g. average intensity."""
class LabelMap(Image):
"""A label map with an optional associated color lookup table."""
color_table = fields.StringField()
"""
The color map lookup table file base name relative to the XNAT
archive ROI resource location.
"""
class Region(mongoengine.EmbeddedDocument):
"""The 3D region in volume voxel space."""
mask = fields.EmbeddedDocumentField(Image)
"""
The binary mask file relative to the image store ROI resource
location.
"""
resource = fields.StringField(required=True)
"""The region imaging store resource name, e.g. ``roi``."""
label_map = fields.EmbeddedDocumentField(LabelMap)
"""The region overlay :class:`LabelMap` object."""
centroid = mongoengine.EmbeddedDocumentField(Point)
"""The region centroid."""
class Resource(mongoengine.EmbeddedDocument):
"""The image store file access abstraction."""
meta = dict(allow_inheritance=True)
name = fields.StringField(required=True)
"""The image store name used to access the resource."""
class SingleImageResource(Resource):
"""A resource with one file."""
meta = dict(allow_inheritance=True)
image = fields.EmbeddedDocumentField(Image)
"""The sole resource image."""
class MultiImageResource(Resource):
"""A resource with several files."""
meta = dict(allow_inheritance=True)
images = fields.ListField(
field=fields.EmbeddedDocumentField(Image)
)
"""The resource images."""
class ImageSequence(Outcome):
"""The Scan or Registration."""
meta = dict(allow_inheritance=True)
time_series = fields.EmbeddedDocumentField(SingleImageResource)
"""The 4D time series resource."""
volumes = fields.EmbeddedDocumentField(MultiImageResource)
"""The 3D volumes resource."""
class Protocol(mongoengine.Document):
"""
The image acquisition or processing protocol abstract class.
"""
meta = dict(allow_inheritance=True, collection='qiprofile_protocol')
technique = fields.StringField(required=True)
"""
The acquisition or processing technique, e.g. ``T1`` for a
T1-weighted scan or ``ANTs`` for an ANTs registration.
The REST update client is responsible for ensuring that technique
synonyms resolve to the same technique value, e.g. scans with
descriptions including ``T1`` and ``T1 AXIAL`` should both resolve
to technique ``T1``. The acquisition and processing details are
stored in the configuration rather than embedded in the technique.
Protocol common constraints
---------------------------
Clients are required to enforce the following constraints:
* Protocols are unique and immutable. Clients are required
to search for an existing protocol with the same content prior
to creating a new ``Protocol`` database object.
* Protocol techniques are disjoint by referencing class, i.e.
a technique value cannot occur in protocol database objects
referenced by instances of different classes. For example,
a `Registration`` instance cannot reference a protocol
with technique ``T2``, since that is a scan protocol reserved
technique per the specialization constraints below.
Protocol specialization constraints
-----------------------------------
**Scan**
Scans with the same protocol and image dimensions are directly
comparable, e.g. in comparing modeling results across subjects
or sessions.
The recommended technique controlled values include, but are not
limited to, the following:
* ``T1`` - T1-weighted
* ``T2`` - T2-weighted
* ``DW`` - diffusion-weighted
* ``PD`` - proton density
"""
configuration = fields.DictField()
"""
The acquisition or processing input parameter
{*section*\ : {*option*\ : *value*\ }} dictionary,
e.g.::
{
'FLIRT': {'bins': 640, 'cost_func': 'normcorr'},
'FNIRT' : {'in_fwhm': [10,6,2,2], 'ref_fwhm': [10,6,2,2]}
}
The acquisition configuration is gathered from the PACS and
should contain parameters of interest for display or inference.
The processing configuration is gathered from the pipeline and
should be sufficient for reproducing the result in the context
of the pipeline. The sections are pipeline task interfaces or
other pipeline input groupings.
:Note: MongoDB does not permit dotted dictionary keys. Thus, e.g.,
'fsl.FNIRT' is not allowed in the preceding example.
"""
class Registration(ImageSequence):
"""
The patient image registration that results from processing a scan.
"""
protocol = fields.ReferenceField(Protocol, required=True)
"""The registration protocol."""
class Scan(ImageSequence):
"""
The the concrete subclass of the abstract :class:`ImageSequence`
class for scans.
"""
number = fields.IntField(required=True)
"""
The scan number. In the XNAT image store, each scan is
identified by a number unique within the session.
"""
protocol = fields.ReferenceField(Protocol, required=True)
"""The scan acquisition protocol."""
bolus_arrival_index = fields.IntField()
"""
The bolus arrival volume index, or None if this is not a
DCE scan.
"""
rois = fields.ListField(field=fields.EmbeddedDocumentField(Region))
"""
The image regions of interest. For a scan with ROIs, there is
one ROI per scan tumor. The rois list order is the same as the
:class:`qirest-client.model.clinical.PathologyReport`
``tumors`` list order.
"""
registrations = fields.ListField(
field=fields.EmbeddedDocumentField(Registration)
)
"""
The registrations performed on the scan.
"""
class Modeling(Outcome):
"""
The pharmicokinetic modeling run on an image sequence.
"""
class ParameterResult(mongoengine.EmbeddedDocument):
"""The output for a given modeling run result parameter."""
image = fields.EmbeddedDocumentField(Image)
"""
The voxel-wise mapping file name relative to the XNAT
modeling archive resource location. The image metadata
should include *average*, the average modeling result
value across all voxels.
"""
label_map = fields.EmbeddedDocumentField(LabelMap)
"""The label map overlay NIfTI file."""
class Source(mongoengine.EmbeddedDocument):
"""
This Modeling.Source embedded class works around the following
mongoengine limitation:
* mongoengine does not allow heterogeneous collections, i.e.
a domain model Document subclass cannot have subclasses.
Furthermore, the domain model Document class cannot be
an inner class.
Consequently, the Modeling.source field cannot represent an
abstract superclass with subclasses RegistrationSource
and ScanSource. The work-around is to introduce this Source
embedded document disambiguation by creating a disjunction
object that can either hold a *scan* reference or a
*registration* reference.
"""
scan = fields.ReferenceField(Protocol)
registration = fields.ReferenceField(Protocol)
protocol = fields.ReferenceField(Protocol, required=True)
"""The modeling protocol."""
source = fields.EmbeddedDocumentField(Source, required=True)
"""
The modeling source protocol.
Since a given :class`Session` contains only one :class:`ImageSequence`
per source protocol, the image sequence on which modeling is performed
is determined by the source protocol. Specifying the source as a
protocol rather than the specific scan or registration allows modeling
to be embedded in the :class`Session` document rather than the
:class:`SessionDetail`.
"""
resource = fields.StringField(required=True)
"""The modeling imaging store resource name, e.g. ``pk_R3y9``."""
result = fields.DictField(
field=mongoengine.EmbeddedDocumentField(ParameterResult)
)
"""
The modeling {*parameter*: *result*} dictionary, where:
- *parameter* is the lower-case underscore parameter key, e.g.
``k_trans``.
- *result* is the corresponding :class:`ParameterResult`
The parameters are determined by the :class:`Protocol`
technique. For example, the `OHSU QIN modeling workflow`_ includes
the following outputs for the FXL (`Tofts standard`_) model and the
FXR (`shutter speed`_) model:
- *fxl_k_trans*, *fxr_k_trans*: the |Ktrans| vascular permeability
transfer constant
- *delta_k_trans*: the FXR-FXL |Ktrans| difference
- *fxl_v_e*, *fxr_v_e*: the |ve| extravascular extracellular volume
fraction
- *fxr_tau_i*: the |taui| intracellular |H2O| mean lifetime
- *fxl_chi_sq*, *fxr_chi_sq*: the |chisq| intensity goodness of fit
The REST client is responsible for anticipating and interpreting the
meaning of the *parameter* based on the modeling technique. For
example, if the image store has a session modeling resource
``pk_h7Jtl`` which includes the following files::
k_trans.nii.gz
k_trans_overlay.nii.gz
chi_sq.nii.gz
chi_sq_overlay.nii.gz
then a REST database update client might calculate the average |Ktrans|
and |chisq| values and populate the REST database as follows::
from qirest_client.helpers import database
// The scan protocol.
t1 = database.get_or_create(Protocol, dict(scan_type='T1'))
// The modeling protocol.
tofts = database.get_or_create(Protocol,
dict(technique='Tofts'))
// The modeling results.
ktrans_label_map = LabelMap(filename='k_trans_overlay.nii.gz',
color_table='jet.txt')
ktrans = Modeling.ParameterResult(name='k_trans.nii.gz',
average=k_trans_avg,
label_map=ktrans_label_map)
chisq_label_map = LabelMap(filename='k_trans_overlay.nii.gz',
color_table='jet.txt')
chisq = Modeling.ParameterResult(name='chi_sq.nii.gz',
average=chi_sq_avg,
label_map=chisq_label_map)
result = dict(ktrans=ktrans, chisq=chisq)
// The modeling object.
session.modeling = Modeling(protocol=tofts, source=t1,
resource='pk_h7Jtl', result=result)
It is then the responsibility of an imaging web app REST read client
to interpret the modeling result dictionary items and display them
appropriately.
.. reST substitutions:
.. include:: <isogrk3.txt>
.. |H2O| replace:: H\ :sub:`2`\ O
.. |Ktrans| replace:: K\ :sup:`trans`
.. |ve| replace:: v\ :sub:`e`
.. |taui| replace:: |tau|\ :sub:`i`
.. |chisq| replace:: |chi|\ :sup:`2`
.. _OHSU QIN modeling workflow: http://qipipe.readthedocs.org/en/latest/api/pipeline.html#modeling
.. _Tofts standard: http://onlinelibrary.wiley.com/doi/10.1002/(SICI)1522-2586(199909)10:3%3C223::AID-JMRI2%3E3.0.CO;2-S/abstract
.. _shutter speed: http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2582583
"""
def __str__(self):
return "Modeling %s" % self.resource
class SessionDetail(mongoengine.Document):
"""The MR session detailed content."""
meta = dict(collection='qiprofile_session_detail')
scans = fields.ListField(field=mongoengine.EmbeddedDocumentField(Scan))
"""The list of scans."""
class Session(Encounter):
"""The MR session (a.k.a. *study* in DICOM terminology)."""
modelings = fields.ListField(
field=fields.EmbeddedDocumentField(Modeling)
)
"""The modeling performed on the session."""
tumor_extents = fields.ListField(
field=fields.EmbeddedDocumentField(TumorExtent)
)
"""The tumor extents measured from the scan."""
preview = fields.EmbeddedDocumentField(SingleImageResource)
"""
The scan graphical display preview image resource object.
The preview is a a Session resource rather than a Scan
resource so that the client does not need to load the
SessionDetail in order to access the preview image.
"""
acquisition_parameters = fields.DictField;
"""
The image acquisition parameters dictionary.
The dictionary includes DICOM metadata that is common to
all of the captured scan images.
"""
detail = fields.ReferenceField(SessionDetail)
"""The session detail reference."""
@classmethod
def pre_delete(cls, sender, document, **kwargs):
"""Cascade delete this Session's detail."""
if self.detail:
self.detail.delete()
signals.pre_delete.connect(Session.pre_delete, sender=Session)
| |
#!/usr/bin/python -u
"""Checks codeDeps dependencies are correctly declared."""
# Copyright 2011, 2012, 2013, 2014 Matt Shannon
# This file is part of codedep.
# See `License` for details of license and warranty.
import os
import sys
import argparse
import inspect
import _ast
import ast
import symtable
import importlib
def peekIter(itr):
"""Returns an iterator which can peek one element ahead.
If the input iterator represents a sequence of current elements, then the
returned iterator represents a sequence of (current, next) elements.
"""
itr = iter(itr)
elem = itr.next()
done = False
while not done:
try:
elemNext = itr.next()
except StopIteration:
elemNext = None
done = True
yield elem, elemNext
elem = elemNext
def attachAstAndSymtab(nodes, symtab, depth=0):
"""Walks an AST and a symtable at the same time, linking them.
A scope_depth attribute is added to each symtab giving the number of levels
of nested scope above the current symtab.
A symtab_current_scope attribute is added to each AST node giving the
symtab for the scope that is current at that node.
"""
symtab.scope_depth = depth
symtabChildrenLeft = list(reversed(symtab.get_children()))
for node in nodes:
attachAstAndSymtabSub(node, symtab, symtabChildrenLeft, depth=depth)
assert not symtabChildrenLeft
def attachAstAndSymtabSub(node, symtab, symtabChildrenLeft, depth):
node.symtab_current_scope = symtab
if isinstance(node, (_ast.FunctionDef, _ast.ClassDef, _ast.Lambda,
_ast.GeneratorExp)):
# new scope introduced, and used in some of the children
# (FIXME : order of descent into new scopes may not be correct (that
# is, may disagree with ordering used by symtable) for complicated
# nested scope cases. Need to think about.)
if isinstance(node, _ast.FunctionDef):
subNodesOldScope = node.args.defaults + node.decorator_list
subNodesNewScope = node.args.args + node.body
# node.args itself would otherwise be missed out
node.args.symtab_current_scope = symtab
elif isinstance(node, _ast.ClassDef):
subNodesOldScope = node.bases + node.decorator_list
subNodesNewScope = node.body
elif isinstance(node, _ast.Lambda):
subNodesOldScope = node.args.defaults
subNodesNewScope = node.args.args + [node.body]
# node.args itself would otherwise be missed out
node.args.symtab_current_scope = symtab
elif isinstance(node, (_ast.SetComp, _ast.DictComp,
_ast.GeneratorExp)):
subNodesOldScope = [ subNode.iter for subNode in node.generators ]
subNodesNewScope = (
([node.key, node.value] if isinstance(node, _ast.DictComp)
else [node.elt]) +
[ subNode.target for subNode in node.generators ] +
[ subSubNode
for subNode in node.generators
for subSubNode in subNode.ifs ]
)
# each node in node.generators would otherwise be missed out
for subNode in node.generators:
subNode.symtab_current_scope = symtab
for subNode in subNodesOldScope:
attachAstAndSymtabSub(subNode, symtab, symtabChildrenLeft,
depth=depth)
symtabChild = symtabChildrenLeft.pop()
if isinstance(node, (_ast.FunctionDef, _ast.ClassDef)):
assert symtabChild.get_name() == node.name
attachAstAndSymtab(subNodesNewScope, symtabChild, depth=(depth + 1))
else:
for subNode in ast.iter_child_nodes(node):
attachAstAndSymtabSub(subNode, symtab, symtabChildrenLeft,
depth=depth)
def isGlobal(symtab, symName):
"""Returns True if symbol referred to by symName in symtab is global.
This is provided to work around the fact that Symbol.is_global() is False
for module-level variables accessed from module-level.
"""
sym = symtab.lookup(symName)
return symtab.get_type() == 'module' or sym.is_global()
def findGlobalUses(node, onLoadGlobalFound):
curr_symtab = node.symtab_current_scope
if True and (isinstance(node, _ast.Call) and
isinstance(node.func, _ast.Name) and
node.func.id == 'codeDeps'):
# ignore names present in arguments to codeDeps
for subNode in ast.iter_child_nodes(node):
findGlobalUses(subNode, lambda name: ())
elif isinstance(node, _ast.Name) and isGlobal(curr_symtab, node.id):
if isinstance(node.ctx, (_ast.Load, _ast.AugLoad)):
onLoadGlobalFound(node.id)
# no children
elif True and (isinstance(node, _ast.Attribute) and
isinstance(node.value, _ast.Name) and
isGlobal(curr_symtab, node.value.id)):
if isinstance(node.ctx, (_ast.Load, _ast.AugLoad)):
onLoadGlobalFound(node.value.id+'.'+node.attr)
else:
onLoadGlobalFound(node.value.id)
# all children have already been dealt with
else:
for subNode in ast.iter_child_nodes(node):
findGlobalUses(subNode, onLoadGlobalFound)
def assignsNames(node):
if isinstance(node, (_ast.FunctionDef, _ast.ClassDef)):
return [node.name]
elif isinstance(node, _ast.Assign):
ret = []
for subNode in node.targets:
if isinstance(subNode, _ast.Name):
assert isinstance(subNode.ctx, _ast.Store)
ret.append(subNode.id)
return ret
else:
return []
def simpleAssignToName(node):
"""If a simple assignment to a name, return name, otherwise None."""
if isinstance(node, _ast.Assign) and len(node.targets) == 1:
targetNode = node.targets[0]
if isinstance(targetNode, _ast.Name):
return targetNode.id
else:
return None
else:
return None
def prettyPrintBisqueDepsStanza(deps, init='@', maxLineLength=80):
if not deps:
return '%scodeDeps()' % init
else:
ret = '%scodeDeps(%s)' % (init, ', '.join(deps))
if len(ret) <= maxLineLength:
return ret
else:
ret = ''
currLine = '%scodeDeps(%s,' % (init, deps[0])
for dep in deps[1:]:
if len(currLine) + len(dep) + 2 <= maxLineLength:
currLine += (' %s,' % dep)
else:
ret += (currLine+'\n')
currLine = ' %s,' % dep
ret += (currLine[:-1]+'\n)')
return ret
def getSrcRootDirs(moduleNames):
srcRootDirs = []
for moduleName in moduleNames:
module = importlib.import_module(moduleName)
moduleFile = os.path.abspath(inspect.getsourcefile(module))
srcRootDir = os.path.dirname(moduleFile)
srcRootDirs.append(srcRootDir)
return srcRootDirs
def main(argv):
parser = argparse.ArgumentParser(
description='Checks codeDeps dependencies are correctly declared.',
)
parser.add_argument(
'--inc_deps_on', dest='depModuleNames', metavar='DMOD',
action='append', default=[],
# FIXME : explain what target directory means
help=('adds the directory containing module DMOD to the list of target'
' directories (option can be repeated)')
)
parser.add_argument(
'moduleName', metavar='MOD',
help='name of module to check (e.g. "foo.bar")'
)
args = parser.parse_args(argv[1:])
srcRootDirs = getSrcRootDirs(args.depModuleNames)
for depModuleName, srcRootDir in zip(args.depModuleNames, srcRootDirs):
sys.stderr.write('(adding srcRootDir = %s for module %s)\n' %
(srcRootDir, depModuleName))
module = importlib.import_module(args.moduleName)
moduleFile = os.path.abspath(inspect.getsourcefile(module))
moduleFileContents = file(moduleFile).read()
moduleFileLines = moduleFileContents.split('\n')
assert moduleFileLines[-1] == ''
moduleFileLines = moduleFileLines[:-1]
sys.stderr.write('(checking module %s from %s)\n' % (args.moduleName, moduleFile))
nodeModule = ast.parse(moduleFileContents, moduleFile, 'exec')
symtab = symtable.symtable(moduleFileContents, moduleFile, 'exec')
attachAstAndSymtab(nodeModule.body, symtab)
sys.stderr.write('\n')
sys.stderr.write('FINDING GLOBALS:\n')
loadGlobalss = []
for node in nodeModule.body:
loadGlobals = []
def onLoadGlobalFound(name):
if '.' in name:
nameLeft, _ = name.split('.', 1)
nameLeftObj = eval(nameLeft, vars(module))
if inspect.ismodule(nameLeftObj):
loadGlobals.append(name)
else:
loadGlobals.append(nameLeft)
else:
loadGlobals.append(name)
findGlobalUses(node, onLoadGlobalFound)
loadGlobals = sorted(set(loadGlobals))
loadGlobalss.append(loadGlobals)
sys.stderr.write('\n')
sys.stderr.write('REWIRING DEPS FOR PRIVATE VARIABLES:\n')
privateDeps = dict()
for nodeIndex, node in enumerate(nodeModule.body):
# expand any private variables which are in loadGlobals for this node
loadGlobals = loadGlobalss[nodeIndex]
newLoadGlobals = set()
for name in loadGlobals:
if name.startswith('_') and not name.startswith('__'):
if name in privateDeps:
newLoadGlobals.update(privateDeps[name])
else:
newLoadGlobals.add(name)
sys.stderr.write('NOTE: treating %s as non-private\n' %
name)
else:
newLoadGlobals.add(name)
loadGlobalss[nodeIndex] = sorted(newLoadGlobals)
# add current node to privateDeps if appropriate
nameAssignedTo = simpleAssignToName(node)
if nameAssignedTo is not None and nameAssignedTo.startswith('_'):
# statement is simple assignment to a private variable,
# i.e. of the form '_bla = ...'
privateDeps[nameAssignedTo] = loadGlobals
sys.stderr.write('will expand %s to %s\n' %
(nameAssignedTo, loadGlobals))
sys.stderr.write('\n')
sys.stderr.write('RESOLVING LOCATIONS:\n')
namesDefinedInModule = set([ subNode
for node in nodeModule.body
for subNode in assignsNames(node) ])
names = set()
for node, loadGlobals in zip(nodeModule.body, loadGlobalss):
names.update(loadGlobals)
namesWithinRoot = set()
for name in names:
if name in namesDefinedInModule:
namesWithinRoot.add(name)
elif name in ('True', 'False'):
pass
else:
if '.' in name:
nameLeft, _ = name.split('.', 1)
nameLeftObj = eval(nameLeft, vars(module))
assert inspect.ismodule(nameLeftObj)
if hasattr(nameLeftObj, '__file__'):
sourceFileRel = inspect.getsourcefile(nameLeftObj)
else:
# built-in module (according to code in inspect.py)
sourceFileRel = None
else:
try:
nameObj = eval(name, vars(module))
except NameError:
sys.stderr.write('NOTE: %s refers to nothing'
' (ignoring)\n' % name)
nameObj = None
if nameObj is None:
sourceFileRel = None
elif True and (inspect.isbuiltin(nameObj) or
(getattr(nameObj, '__module__', None) ==
'__builtin__')):
sourceFileRel = None
elif True and (inspect.ismodule(nameObj) and
not hasattr(nameObj, '__file__')):
# built-in module (according to code in inspect.py)
sourceFileRel = None
elif True and (inspect.isclass(nameObj) and
not hasattr(sys.modules.get(nameObj.__module__),
'__file__')):
# built-in class (according to code in inspect.py)
sourceFileRel = None
else:
try:
sourceFileRel = inspect.getsourcefile(nameObj)
except TypeError:
sourceFileRel = None
sys.stderr.write('NOTE: %s had no source file\n' %
name)
if sourceFileRel is not None:
sourceFile = os.path.abspath(sourceFileRel)
if any([
os.path.commonprefix([srcRootDir,
sourceFile]) == srcRootDir
for srcRootDir in srcRootDirs
]):
namesWithinRoot.add(name)
sys.stderr.write('\n')
sys.stderr.write('RESULTS:\n')
sys.stderr.write('\n')
namesNotYetDefined = set(namesDefinedInModule)
def nameToString(name):
if name in namesNotYetDefined:
return 'ForwardRef(lambda: %s)' % name
else:
return name
# print stuff which occurs before the first node
bodyStartLine = ((nodeModule.body[0].lineno - 1) if nodeModule.body
else len(moduleFileLines))
for line in moduleFileLines[:bodyStartLine]:
print line
for (node, nextNode), loadGlobals in zip(peekIter(nodeModule.body),
loadGlobalss):
# (module docstrings which last more than one line seem to have
# col_offset -1)
assert node.col_offset == 0 or node.col_offset == -1
if nextNode is not None:
assert nextNode.col_offset == 0
startLine = node.lineno - 1
endLine = ((nextNode.lineno - 1) if nextNode is not None
else len(moduleFileLines))
assert 0 <= startLine < endLine <= len(moduleFileLines)
sourceLines = moduleFileLines[startLine:endLine]
if isinstance(node, (_ast.FunctionDef, _ast.ClassDef)):
sortedDeps = [ nameToString(name)
for name in loadGlobals
if name in namesWithinRoot and name != node.name ]
# work around the fact there is no completely reliable way to get
# the line number of the first non-decorator line from the AST
# alone
defLineOffset = None
for lineOffset, line in enumerate(sourceLines):
if line.startswith('def ') or line.startswith('class '):
defLineOffset = lineOffset
break
assert defLineOffset is not None
assert all([ (dec.lineno - 1) < startLine + defLineOffset
for dec in node.decorator_list ])
codeDepsPrinted = False
# print decorator lines
for subNode, nextSubNode in peekIter(node.decorator_list):
if True and (isinstance(subNode, _ast.Call) and
isinstance(subNode.func, _ast.Name) and
subNode.func.id == 'codeDeps'):
# print new codeDeps stanza in place of old one
print prettyPrintBisqueDepsStanza(sortedDeps)
codeDepsPrinted = True
else:
startSubOffset = subNode.lineno - 1 - startLine
endSubOffset = ((nextSubNode.lineno - 1 - startLine)
if nextSubNode is not None
else defLineOffset)
assert (
0 <= startSubOffset < endSubOffset <= len(sourceLines)
)
for line in sourceLines[startSubOffset:endSubOffset]:
print line
if not codeDepsPrinted:
# print codeDeps stanza just before def
print prettyPrintBisqueDepsStanza(sortedDeps)
codeDepsPrinted = True
# print rest of function / class
for line in sourceLines[defLineOffset:]:
print line
else:
sortedDeps = [ nameToString(name)
for name in loadGlobals
if name in namesWithinRoot ]
if sortedDeps:
nameAssignedTo = simpleAssignToName(node)
if nameAssignedTo != None and nameAssignedTo.startswith('_'):
# node is a simple assignment to a private variable, so
# don't need a codeDeps line
for line in sourceLines:
print line
else:
if nameAssignedTo != None and node:
nodeValue = node.value
hadToRemoveExisting = False
if True and (isinstance(nodeValue, _ast.Call) and
isinstance(nodeValue.func, _ast.Call) and
isinstance(nodeValue.func.func,
_ast.Name) and
nodeValue.func.func.id == 'codeDeps'):
# remove existing codeDeps stanza
# (FIXME : assumes there is an arg (and that it
# comes before kwargs, etc, but I think that's
# safe))
nodeValue = nodeValue.args[0]
hadToRemoveExisting = True
currLineOffset = nodeValue.lineno - 1 - startLine
restOfCurrLine = (
sourceLines[currLineOffset][nodeValue.col_offset:]
)
restOfLines = sourceLines[(currLineOffset + 1):]
if not hadToRemoveExisting:
# if codeDeps is already present, then user must
# have added, so no need to warn
print ('# FIXME : to examine manually (assumes'
' original RHS is function or class)')
print prettyPrintBisqueDepsStanza(
sortedDeps,
init=(nameAssignedTo+' = ')
)+'('
print ' %s' % restOfCurrLine
for line in restOfLines:
print line
if not hadToRemoveExisting:
print ')'
else:
print ('# FIXME : to examine manually --'
' codeDeps(%s)' % (', '.join(sortedDeps)))
for line in sourceLines:
print line
else:
for line in sourceLines:
print line
namesNotYetDefined.difference_update(assignsNames(node))
if __name__ == '__main__':
main(sys.argv)
| |
import unittest
from cpuinfo import *
import helpers
class MockDataSource(object):
bits = '64bit'
cpu_count = 4
is_windows = False
arch_string_raw = 'aarch64'
uname_string_raw = 'x86_64'
can_cpuid = False
@staticmethod
def has_proc_cpuinfo():
return True
@staticmethod
def has_lscpu():
return True
@staticmethod
def has_cpufreq_info():
return True
@staticmethod
def cat_proc_cpuinfo():
returncode = 0
output = r'''
processor : 0
BogoMIPS : 2.00
Features : fp asimd crc32
CPU implementer : 0x41
CPU architecture: 8
CPU variant : 0x0
CPU part : 0xd03
CPU revision : 4
processor : 1
BogoMIPS : 2.00
Features : fp asimd crc32
CPU implementer : 0x41
CPU architecture: 8
CPU variant : 0x0
CPU part : 0xd03
CPU revision : 4
processor : 2
BogoMIPS : 2.00
Features : fp asimd crc32
CPU implementer : 0x41
CPU architecture: 8
CPU variant : 0x0
CPU part : 0xd03
CPU revision : 4
processor : 3
BogoMIPS : 2.00
Features : fp asimd crc32
CPU implementer : 0x41
CPU architecture: 8
CPU variant : 0x0
CPU part : 0xd03
CPU revision : 4
Hardware : ODROID-C2
Revision : 020c
'''
return returncode, output
@staticmethod
def lscpu():
returncode = 0
output = r'''
Architecture: aarch64
Byte Order: Little Endian
CPU(s): 4
On-line CPU(s) list: 0-3
Thread(s) per core: 1
Core(s) per socket: 1
Socket(s): 4
CPU max MHz: 1536.0000
CPU min MHz: 100.0000
'''
return returncode, output
@staticmethod
def cpufreq_info():
returncode = 0
output = r'''
cpufrequtils 008: cpufreq-info (C) Dominik Brodowski 2004-2009
Report errors and bugs to cpufreq@vger.kernel.org, please.
analyzing CPU 0:
driver: meson_cpufreq
CPUs which run at the same hardware frequency: 0 1 2 3
CPUs which need to have their frequency coordinated by software: 0 1 2 3
maximum transition latency: 200 us.
hardware limits: 100.0 MHz - 1.54 GHz
available frequency steps: 100.0 MHz, 250 MHz, 500 MHz, 1000 MHz, 1.30 GHz, 1.54 GHz
available cpufreq governors: hotplug, interactive, conservative, ondemand, userspace, powersave, performance
current policy: frequency should be within 100.0 MHz and 1.54 GHz.
The governor "interactive" may decide which speed to use
within this range.
current CPU frequency is 1.54 GHz.
cpufreq stats: 100.0 MHz:0.00%, 250 MHz:0.00%, 500 MHz:0.00%, 1000 MHz:0.00%, 1.30 GHz:0.00%, 1.54 GHz:100.00% (439)
analyzing CPU 1:
driver: meson_cpufreq
CPUs which run at the same hardware frequency: 0 1 2 3
CPUs which need to have their frequency coordinated by software: 0 1 2 3
maximum transition latency: 200 us.
hardware limits: 100.0 MHz - 1.54 GHz
available frequency steps: 100.0 MHz, 250 MHz, 500 MHz, 1000 MHz, 1.30 GHz, 1.54 GHz
available cpufreq governors: hotplug, interactive, conservative, ondemand, userspace, powersave, performance
current policy: frequency should be within 100.0 MHz and 1.54 GHz.
The governor "interactive" may decide which speed to use
within this range.
current CPU frequency is 1.54 GHz.
cpufreq stats: 100.0 MHz:0.00%, 250 MHz:0.00%, 500 MHz:0.00%, 1000 MHz:0.00%, 1.30 GHz:0.00%, 1.54 GHz:100.00% (439)
analyzing CPU 2:
driver: meson_cpufreq
CPUs which run at the same hardware frequency: 0 1 2 3
CPUs which need to have their frequency coordinated by software: 0 1 2 3
maximum transition latency: 200 us.
hardware limits: 100.0 MHz - 1.54 GHz
available frequency steps: 100.0 MHz, 250 MHz, 500 MHz, 1000 MHz, 1.30 GHz, 1.54 GHz
available cpufreq governors: hotplug, interactive, conservative, ondemand, userspace, powersave, performance
current policy: frequency should be within 100.0 MHz and 1.54 GHz.
The governor "interactive" may decide which speed to use
within this range.
current CPU frequency is 1.54 GHz.
cpufreq stats: 100.0 MHz:0.00%, 250 MHz:0.00%, 500 MHz:0.00%, 1000 MHz:0.00%, 1.30 GHz:0.00%, 1.54 GHz:100.00% (439)
analyzing CPU 3:
driver: meson_cpufreq
CPUs which run at the same hardware frequency: 0 1 2 3
CPUs which need to have their frequency coordinated by software: 0 1 2 3
maximum transition latency: 200 us.
hardware limits: 100.0 MHz - 1.54 GHz
available frequency steps: 100.0 MHz, 250 MHz, 500 MHz, 1000 MHz, 1.30 GHz, 1.54 GHz
available cpufreq governors: hotplug, interactive, conservative, ondemand, userspace, powersave, performance
current policy: frequency should be within 100.0 MHz and 1.54 GHz.
The governor "interactive" may decide which speed to use
within this range.
current CPU frequency is 1.54 GHz.
cpufreq stats: 100.0 MHz:0.00%, 250 MHz:0.00%, 500 MHz:0.00%, 1000 MHz:0.00%, 1.30 GHz:0.00%, 1.54 GHz:100.00% (439)
'''
return returncode, output
class TestLinux_Odroid_C2_Aarch_64(unittest.TestCase):
def setUp(self):
helpers.backup_data_source(cpuinfo)
helpers.monkey_patch_data_source(cpuinfo, MockDataSource)
def tearDown(self):
helpers.restore_data_source(cpuinfo)
'''
Make sure calls return the expected number of fields.
'''
def test_returns(self):
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_registry()))
self.assertEqual(4, len(cpuinfo._get_cpu_info_from_cpufreq_info()))
self.assertEqual(4, len(cpuinfo._get_cpu_info_from_lscpu()))
self.assertEqual(2, len(cpuinfo._get_cpu_info_from_proc_cpuinfo()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_sysctl()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_kstat()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_dmesg()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_cat_var_run_dmesg_boot()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_ibm_pa_features()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_sysinfo()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_cpuid()))
self.assertEqual(13, len(cpuinfo._get_cpu_info_internal()))
def test_get_cpu_info_from_cpufreq_info(self):
info = cpuinfo._get_cpu_info_from_cpufreq_info()
self.assertEqual('1.5400 GHz', info['hz_advertised_friendly'])
self.assertEqual('1.5400 GHz', info['hz_actual_friendly'])
self.assertEqual((1540000000, 0), info['hz_advertised'])
self.assertEqual((1540000000, 0), info['hz_actual'])
def test_get_cpu_info_from_lscpu(self):
info = cpuinfo._get_cpu_info_from_lscpu()
self.assertEqual('1.5360 GHz', info['hz_advertised_friendly'])
self.assertEqual('1.5360 GHz', info['hz_actual_friendly'])
self.assertEqual((1536000000, 0), info['hz_advertised'])
self.assertEqual((1536000000, 0), info['hz_actual'])
def test_get_cpu_info_from_proc_cpuinfo(self):
info = cpuinfo._get_cpu_info_from_proc_cpuinfo()
self.assertEqual('ODROID-C2', info['hardware_raw'])
self.assertEqual(
['asimd', 'crc32', 'fp'],
info['flags']
)
def test_all(self):
info = cpuinfo._get_cpu_info_internal()
self.assertEqual('ODROID-C2', info['hardware_raw'])
self.assertEqual('1.5400 GHz', info['hz_advertised_friendly'])
self.assertEqual('1.5400 GHz', info['hz_actual_friendly'])
self.assertEqual((1540000000, 0), info['hz_advertised'])
self.assertEqual((1540000000, 0), info['hz_actual'])
self.assertEqual('ARM_8', info['arch'])
self.assertEqual(64, info['bits'])
self.assertEqual(4, info['count'])
self.assertEqual('aarch64', info['arch_string_raw'])
self.assertEqual(
['asimd', 'crc32', 'fp'],
info['flags']
)
| |
import pytest
import time
from .common import create_kubeconfig
from .common import CLUSTER_MEMBER
from .common import CLUSTER_OWNER
from .common import PROJECT_MEMBER
from .common import PROJECT_OWNER
from .common import PROJECT_READ_ONLY
from .common import get_client_for_token
from .common import delete_node
from .common import get_node_details
from .common import get_user_client
from .common import get_user_client_and_cluster
from .common import execute_kubectl_cmd
from .common import if_test_rbac
from .common import random_name
from .common import random_test_name
from .common import rbac_get_user_token_by_role
from .common import validate_cluster_state
from .common import wait_for_condition
from .conftest import wait_for_cluster_delete
from rancher import ApiError
from .test_rke_cluster_provisioning import DO_ACCESSKEY
from .test_rke_cluster_provisioning import evaluate_clustername
from .test_rke_cluster_provisioning import get_custom_host_registration_cmd
from .test_rke_cluster_provisioning import HOST_NAME
from .test_rke_cluster_provisioning import random_node_name
from .test_rke_cluster_provisioning import rke_config
from .test_rke_cluster_provisioning import wait_for_cluster_node_count
from lib.aws import AmazonWebServices
cluster_detail = {"cluster": None, "client": None}
cluster_node_template = {"cluster": None, "node_pools": None,
"node_template": None, "do_cloud_credential": None,
"label_value": None, "test_label": None}
cluster_custom = {"cluster": None, "test_label": None,
"label_value": None, "aws_node": None}
custom_cluster_add_edit = {"cluster": None, "aws_node": []}
cluster_node_template_2 = {"cluster": [], "node_template": []}
roles = [CLUSTER_MEMBER, CLUSTER_OWNER, PROJECT_OWNER, PROJECT_MEMBER,
PROJECT_READ_ONLY]
def test_node_label_add():
test_label = random_name()
label_value = random_name()
# get node details
client, node = get_node_details(cluster_detail["cluster"], cluster_detail["client"])
# add label through API
node_labels = node.labels.data_dict()
node_labels[test_label] = label_value
client.update(node, labels=node_labels)
# Label should be added
wait_for_condition(client, node, check_label_added(test_label), None, 10)
validate_label_set_on_node(client, node, test_label, label_value)
# delete label
del node_labels[test_label]
client.update(node, labels=node_labels)
def test_node_label_edit():
test_label = random_name()
label_value = random_name()
# get node details
client, node = get_node_details(cluster_detail["cluster"], cluster_detail["client"])
# add label through API
node_labels = node.labels.data_dict()
node_labels[test_label] = label_value
client.update(node, labels=node_labels)
time.sleep(2)
# Label should be added
node = client.reload(node)
wait_for_condition(client, node, check_label_added(test_label), None, 10)
validate_label_set_on_node(client, node, test_label, label_value)
# edit label through API
node = client.reload(node)
node_labels = node.labels.data_dict()
new_value = random_name()
node_labels[test_label] = new_value
client.update(node, labels=node_labels)
node = client.reload(node)
time.sleep(2)
# Label should be added
wait_for_condition(client, node, check_label_added(test_label), None, 10)
validate_label_set_on_node(client, node, test_label, new_value)
# delete label
del node_labels[test_label]
client.update(node, labels=node_labels)
def test_node_label_delete():
test_label = random_name()
label_value = random_name()
# get node details
client, node = get_node_details(cluster_detail["cluster"], cluster_detail["client"])
# add labels on node
node_labels = node.labels.data_dict()
node_labels[test_label] = label_value
client.update(node, labels=node_labels)
time.sleep(2)
# Label should be added
wait_for_condition(client, node, check_label_added(test_label), None, 10)
validate_label_set_on_node(client, node, test_label, label_value)
# delete label
del node_labels[test_label]
client.update(node, labels=node_labels)
time.sleep(2)
# label should be deleted
wait_for_condition(client, node, check_label_removed(test_label), None, 10)
validate_label_deleted_on_node(client, node, test_label)
def test_node_label_kubectl_add():
test_label = random_name()
label_value = random_name()
# get node details
client, node = get_node_details(cluster_detail["cluster"], cluster_detail["client"])
node_name = node.nodeName
# add label on node
command = "label nodes " + node_name + " " + test_label + "=" + label_value
print(command)
execute_kubectl_cmd(command, False)
time.sleep(2)
# Label should be added
node = client.reload(node)
wait_for_condition(client, node, check_label_added(test_label), None, 10)
validate_label_set_on_node(client, node, test_label, label_value)
# remove label
node = client.reload(node)
node_labels = node.labels.data_dict()
del node_labels[test_label]
client.update(node, labels=node_labels)
def test_node_label_kubectl_edit():
test_label = random_name()
label_value = random_name()
# get node details
client, node = get_node_details(cluster_detail["cluster"], cluster_detail["client"])
node_name = node.nodeName
# add label on node
command = "label nodes " + node_name + " " + test_label + "=" + label_value
print(command)
execute_kubectl_cmd(command, False)
time.sleep(2)
# Label should be added
node = client.reload(node)
wait_for_condition(client, node, check_label_added(test_label), None, 10)
validate_label_set_on_node(client, node, test_label, label_value)
# edit label through kubectl
new_value = random_name()
command = "label nodes " + node_name + " " + \
test_label + "=" + new_value + " --overwrite"
print(command)
execute_kubectl_cmd(command, False)
node = client.reload(node)
time.sleep(2)
# New Label should be added
wait_for_condition(client, node, check_label_added(test_label), None, 10)
validate_label_set_on_node(client, node, test_label, label_value=new_value)
# remove label
node = client.reload(node)
node_labels = node.labels.data_dict()
del node_labels[test_label]
client.update(node, labels=node_labels)
def test_node_label_kubectl_delete():
test_label = random_name()
label_value = random_name()
# get node details
client, node = get_node_details(cluster_detail["cluster"], cluster_detail["client"])
node_name = node.nodeName
# add label on node
command = "label nodes " + node_name + " " + test_label + "=" + label_value
print(command)
execute_kubectl_cmd(command, False)
time.sleep(2)
# Label should be added
wait_for_condition(client, node, check_label_added(test_label), None, 10)
validate_label_set_on_node(client, node, test_label, label_value)
# remove label through kubectl
command = " label node " + node_name + " " + test_label + "-"
execute_kubectl_cmd(command, False)
time.sleep(2)
# label should be deleted
wait_for_condition(client, node, check_label_removed(test_label), None, 10)
validate_label_deleted_on_node(client, node, test_label)
def test_node_label_k_add_a_delete_k_add():
"""Add via kubectl, Delete via API, Add via kubectl"""
test_label = random_name()
label_value = random_name()
# get node details
client, node = get_node_details(cluster_detail["cluster"], cluster_detail["client"])
node_name = node.nodeName
command = "label nodes " + node_name + " " + test_label + "=" + label_value
print(command)
execute_kubectl_cmd(command, False)
# Label should be added
wait_for_condition(client, node, check_label_added(test_label), None, 10)
validate_label_set_on_node(client, node, test_label, label_value)
# delete label
node = client.reload(node)
node_labels = node.labels.data_dict()
del node_labels[test_label]
client.update(node, labels=node_labels)
# label should be deleted
wait_for_condition(client, node, check_label_removed(test_label), None, 10)
validate_label_deleted_on_node(client, node, test_label)
# Add label via kubectl
execute_kubectl_cmd(command, False)
# Label should be added
wait_for_condition(client, node, check_label_added(test_label), None, 10)
validate_label_set_on_node(client, node, test_label, label_value)
# clean up label
node = client.reload(node)
node_labels = node.labels.data_dict()
del node_labels[test_label]
client.update(node, labels=node_labels)
def test_node_label_k_add_a_edit_k_edit():
"""Add via kubectl, edit via API, edit via kubectl"""
test_label = random_name()
label_value = random_name()
# get node details
client, node = get_node_details(cluster_detail["cluster"], cluster_detail["client"])
node_name = node.nodeName
command = "label nodes " + node_name + " " + test_label + "=" + label_value
execute_kubectl_cmd(command, False)
# Label should be added
node = client.reload(node)
wait_for_condition(client, node, check_label_added(test_label), None, 10)
validate_label_set_on_node(client, node, test_label, label_value)
# edit label through API
node = client.reload(node)
node_labels = node.labels.data_dict()
new_value = random_name()
node_labels[test_label] = new_value
client.update(node, labels=node_labels)
time.sleep(2)
# Label should be added
node = client.reload(node)
wait_for_condition(client, node, check_label_added(test_label), None, 10)
validate_label_set_on_node(client, node, test_label, new_value)
# edit label through kubectl
new_value_2 = random_name()
command = "label nodes " + node_name + " " + \
test_label + "=" + new_value_2 + " --overwrite"
print(command)
execute_kubectl_cmd(command, False)
time.sleep(2)
# New Label should be added
node = client.reload(node)
wait_for_condition(client, node, check_label_added(test_label), None, 10)
validate_label_set_on_node(client, node, test_label, new_value_2)
# remove label
node = client.reload(node)
node_labels = node.labels.data_dict()
del node_labels[test_label]
client.update(node, labels=node_labels)
def test_node_label_a_add_k_delete_a_add():
"""Add via API, Delete via kubectl, Add via API"""
test_label = random_name()
label_value = random_name()
# get node details
client, node = get_node_details(cluster_detail["cluster"], cluster_detail["client"])
node_name = node.nodeName
node_labels = node.labels.data_dict()
node_labels[test_label] = label_value
client.update(node, labels=node_labels)
time.sleep(2)
# Label should be added
node = client.reload(node)
wait_for_condition(client, node, check_label_added(test_label), None, 10)
validate_label_set_on_node(client, node, test_label, label_value)
# delete label
command = " label node " + node_name + " " + test_label + "-"
execute_kubectl_cmd(command, False)
time.sleep(2)
# label should be deleted
node = client.reload(node)
wait_for_condition(client, node, check_label_removed(test_label), None, 10)
validate_label_deleted_on_node(client, node, test_label)
# Add label via API
node = client.reload(node)
node_labels = node.labels.data_dict()
node_labels[test_label] = label_value
client.update(node, labels=node_labels)
time.sleep(2)
# Label should be added
wait_for_condition(client, node, check_label_added(test_label), None, 10)
validate_label_set_on_node(client, node, test_label, label_value)
# clean up label
node = client.reload(node)
node_labels = node.labels.data_dict()
del node_labels[test_label]
client.update(node, labels=node_labels)
def test_node_label_a_add_k_edit_a_edit():
"""Add via API, Edit via kubectl, Edit via API"""
test_label = random_name()
label_value = random_name()
# get node details
client, node = get_node_details(cluster_detail["cluster"], cluster_detail["client"])
node_name = node.nodeName
node_labels = node.labels.data_dict()
node_labels[test_label] = label_value
client.update(node, labels=node_labels)
time.sleep(2)
# Label should be added
node = client.reload(node)
wait_for_condition(client, node, check_label_added(test_label), None, 10)
validate_label_set_on_node(client, node, test_label, label_value)
# edit label through kubectl
new_value = random_name()
command = "label nodes " + node_name + " " + \
test_label + "=" + new_value + " --overwrite"
print(command)
execute_kubectl_cmd(command, False)
node = client.reload(node)
time.sleep(2)
# New Label should be added
wait_for_condition(client, node, check_label_added(test_label), None, 10)
validate_label_set_on_node(client, node, test_label, label_value=new_value)
# edit label through API
node = client.reload(node)
node_labels = node.labels.data_dict()
new_value_2 = random_name()
node_labels[test_label] = new_value_2
client.update(node, labels=node_labels)
node = client.reload(node)
time.sleep(2)
# Label should be added
wait_for_condition(client, node, check_label_added(test_label), None, 10)
validate_label_set_on_node(client, node, test_label, new_value_2)
# clean up label
node = client.reload(node)
node_labels = node.labels.data_dict()
del node_labels[test_label]
client.update(node, labels=node_labels)
def test_node_label_custom_add_edit_addnode():
""" Create a custom cluster, Add node labels via register command
Edit via API and change the existing label value ONLY
Add a control plane node with label same as the ORIGINAL one
And check the labels on all the nodes."""
test_label = random_name()
label_value = random_name()
cluster_custom["test_label"] = test_label
cluster_custom["label_value"] = label_value
client = cluster_detail["client"]
node_roles = [["worker", "controlplane", "etcd"]]
aws_nodes_list = []
cluster, aws_nodes = \
create_custom_node_label(node_roles, test_label, label_value, True)
create_kubeconfig(cluster)
for node in aws_nodes:
aws_nodes_list.append(node)
nodes = client.list_node(clusterId=cluster.id).data
node = nodes[0]
validate_label_set_on_node(client, node, test_label, label_value)
node_name_1 = node.nodeName
# edit label through API
node = client.reload(node)
node_labels = node.labels.data_dict()
new_value_2 = random_name()
node_labels[test_label] = new_value_2
client.update(node, labels=node_labels)
# cluster will go into updating state
cluster = validate_cluster_state(client, cluster, True,
intermediate_state="updating",
nodes_not_in_active_state=[])
node = client.reload(node)
# Label should be added
validate_label_set_on_node(client, node, test_label, new_value_2)
# add a control plane node with original label
aws_nodes = \
aws_nodes = \
AmazonWebServices().create_multiple_nodes(
1, random_test_name(HOST_NAME))
for node in aws_nodes:
aws_nodes_list.append(node)
aws_node = aws_nodes[0]
docker_run_cmd = get_custom_host_registration_cmd(client, cluster,
["controlplane"],
aws_node)
docker_run_cmd = \
docker_run_cmd + " --label " + test_label + "=" + label_value
aws_node.execute_command(docker_run_cmd)
wait_for_cluster_node_count(client, cluster, 2)
cluster = validate_cluster_state(client, cluster,
intermediate_state="updating")
nodes = client.list_node(clusterId=cluster.id).data
# cluster cleanup
custom_cluster_add_edit["cluster"] = cluster
custom_cluster_add_edit["aws_node"] = aws_nodes_list
for node in nodes:
if node.nodeName != node_name_1:
validate_label_set_on_node(client, node, test_label, label_value)
else:
validate_label_set_on_node(client, node, test_label, new_value_2)
def test_node_label_node_template_add():
"""
This test validates label added through node template,
add label on node template, and validates the label
is available on the scaled up node
:return: None
"""
client = cluster_detail["client"]
cluster = cluster_node_template["cluster"]
create_kubeconfig(cluster)
nodes = client.list_node(clusterId=cluster.id).data
# get existing nodes info
existing_labels = {}
for node in nodes:
existing_labels[node.nodeName] = {}
existing_labels[node.nodeName] = node.labels.data_dict()
test_label = random_name()
label_value = random_name()
# create a node template with a label
node_template_new, do_cloud_credential = \
create_node_template_label(client, test_label, label_value)
# Add a node in cluster
cluster, node_pools = add_node_cluster(node_template_new, cluster)
nodes = client.list_node(clusterId=cluster.id).data
# validate labels on nodes
for node in nodes:
if node.nodeName not in existing_labels.keys():
# check if label is set on node
validate_label_set_on_node(client, node, test_label, label_value)
else:
# check if the labels on the existing nodes are intact
assert existing_labels[node.nodeName] == node.labels.data_dict(), \
"Labels on existing nodes have changed"
@pytest.mark.run(after='test_node_label_node_template_add')
def test_node_label_node_template_edit():
"""
This test validates label added through node template,
edit label on node template, and validates new label
is available on the scaled up node
:param remove_resource: to delete the resource
:return:
"""
client = cluster_detail["client"]
cluster = cluster_node_template["cluster"]
node_template = cluster_node_template["node_template"]
do_cloud_credential = cluster_node_template["do_cloud_credential"]
test_label = cluster_node_template["test_label"]
create_kubeconfig(cluster)
nodes = client.list_node(clusterId=cluster.id).data
existing_labels = {}
for node in nodes:
existing_labels[node.nodeName] = {}
existing_labels[node.nodeName] = node.labels.data_dict()
template_label = node_template.labels.data_dict()
assert test_label in template_label, \
"Label is NOT available on the node template"
new_value = random_name()
template_label[test_label] = new_value
node_template_new = client.update(node_template, labels=template_label,
cloudCredentialId=do_cloud_credential.id,
digitaloceanConfig=
{"region": "nyc3",
"size": "2gb",
"image": "ubuntu-16-04-x64"})
node_template_new = client.wait_success(node_template_new)
assert test_label in node_template_new["labels"], \
"Label is not set on node template"
assert node_template_new["labels"][test_label] == new_value
# Add a node in cluster
cluster, node_pools = add_node_cluster(node_template_new, cluster)
nodes = client.list_node(clusterId=cluster.id).data
"""check original label on the first node,
and the new label on the added node"""
# validate labels on nodes
for node in nodes:
if node.nodeName not in existing_labels.keys():
# check if label is set on node
validate_label_set_on_node(client, node, test_label, new_value)
else:
# check if the labels on the existing nodes are intact
assert existing_labels[node.nodeName] == node.labels.data_dict(), \
"Labels on existing nodes have changed"
@pytest.mark.run(after='test_node_label_node_template_edit')
def test_node_label_node_template_delete():
"""
This test validates label added through node template,
delete label on node template, and validates the label
is NOT available on the scaled up node
:return: None
"""
client = cluster_detail["client"]
cluster = cluster_node_template["cluster"]
node_template = cluster_node_template["node_template"]
do_cloud_credential = cluster_node_template["do_cloud_credential"]
test_label = cluster_node_template["test_label"]
create_kubeconfig(cluster_node_template["cluster"])
nodes = client.list_node(clusterId=cluster.id).data
existing_labels = {}
for node in nodes:
existing_labels[node.nodeName] = {}
existing_labels[node.nodeName] = node.labels.data_dict()
# delete label in node template
template_label = node_template.labels.data_dict()
del template_label[test_label]
# update node template
node_template_new = client.update(node_template, labels=template_label,
cloudCredentialId=do_cloud_credential.id,
digitaloceanConfig=
{"region": "nyc3",
"size": "2gb",
"image": "ubuntu-16-04-x64"})
node_template_new = client.wait_success(node_template_new)
assert test_label not in node_template_new["labels"], \
"Label is available on the node template"
# Add a node in cluster with new node template
cluster, node_pools = add_node_cluster(node_template_new, cluster)
nodes = client.list_node(clusterId=cluster.id).data
# validate labels on nodes
for node in nodes:
if node.nodeName not in existing_labels.keys():
node_labels = node.labels.data_dict()
assert test_label not in node_labels, \
"Label is NOT deleted on the node"
else:
# check if the labels on the existing nodes are intact
assert existing_labels[node.nodeName] == node.labels.data_dict(), \
"Labels on existing nodes have changed"
def test_node_label_node_template_edit_api():
"""
This test validates edit of label via API
which is added through node template
:return: None
"""
test_label = random_name()
label_value = random_name()
cluster, node_pools, node_template, do_cloud_credential = \
create_cluster_node_template_label(test_label, label_value)
client = get_user_client()
cluster_node_template_2["cluster"].append(cluster)
cluster_node_template_2["node_template"].append(node_template)
create_kubeconfig(cluster)
node = client.list_node(clusterId=cluster.id).data
node_id = node[0].id
node = client.by_id_node(node_id)
# Edit label on node via API
node_labels = node.labels.data_dict()
assert node_labels[test_label] == label_value
# edit label through API
new_value = random_name()
node_labels[test_label] = new_value
client.update(node, labels=node_labels)
node = client.reload(node)
time.sleep(2)
# Label should be added
wait_for_condition(client, node, check_label_added(test_label), None, 10)
validate_label_set_on_node(client, node, test_label, new_value)
def test_node_label_node_template_delete_api():
"""
This test validates delete of label via API
which is added through node template
:return: None
Expected failure because of issue -
https://github.com/rancher/rancher/issues/26604
"""
test_label = random_name()
label_value = random_name()
cluster, node_pools, node_template, do_cloud_credential = \
create_cluster_node_template_label(test_label, label_value)
client = get_user_client()
cluster_node_template_2["cluster"].append(cluster)
cluster_node_template_2["node_template"].append(node_template)
create_kubeconfig(cluster)
node = client.list_node(clusterId=cluster.id).data
node_id = node[0].id
node = client.by_id_node(node_id)
node_labels = node.labels.data_dict()
assert node_labels[test_label] == label_value
# delete label
del node_labels[test_label]
client.update(node, labels=node_labels)
# cluster will go into updating state
cluster = validate_cluster_state(client, cluster, True,
intermediate_state="updating",
nodes_not_in_active_state=[])
node = client.reload(node)
# label should be deleted
validate_label_deleted_on_node(client, node, test_label)
def test_node_label_custom_add():
"""
This test validates the label on a custom node
added through the registration command
:return:
"""
test_label = random_name()
label_value = random_name()
cluster_custom["test_label"] = test_label
cluster_custom["label_value"] = label_value
client = cluster_detail["client"]
node_roles = [["worker", "controlplane", "etcd"]]
if cluster_custom["cluster"] is None:
cluster_custom["cluster"], aws_nodes = \
create_custom_node_label(node_roles, test_label, label_value, True)
cluster = cluster_custom["cluster"]
cluster_custom["aws_node"] = aws_nodes
else:
cluster = cluster_custom["cluster"]
create_kubeconfig(cluster_custom["cluster"])
nodes = client.list_node(clusterId=cluster.id).data
node = nodes[0]
validate_label_set_on_node(client, node, test_label, label_value)
@pytest.mark.run(after='test_node_label_custom_add')
def test_node_label_custom_edit():
"""
This test validates edit on the label on the node -
added through custom registration command
:return: None
"""
create_kubeconfig(cluster_custom["cluster"])
client = cluster_detail["client"]
cluster = cluster_custom["cluster"]
test_label = cluster_custom["test_label"]
nodes = client.list_node(clusterId=cluster.id).data
assert len(nodes) > 0
node_id = nodes[0].id
node = client.by_id_node(node_id)
# edit label through API
node = client.reload(node)
node_labels = node.labels.data_dict()
new_value = random_name()
node_labels[test_label] = new_value
client.update(node, labels=node_labels)
# cluster will go into updating state
cluster = validate_cluster_state(client, cluster, True,
intermediate_state="updating",
nodes_not_in_active_state=[])
node = client.reload(node)
validate_label_set_on_node(client, node, test_label, new_value)
cluster_custom["label_value"] = new_value
@pytest.mark.run(after='test_node_label_custom_edit')
def test_node_label_custom_add_additional():
"""
This test validates addition of labels on the custom nodes
:return: None
"""
create_kubeconfig(cluster_custom["cluster"])
client = cluster_detail["client"]
cluster = cluster_custom["cluster"]
test_label = cluster_custom["test_label"]
label_value = cluster_custom["label_value"]
new_label = random_name()
label_value_new = random_name()
nodes = client.list_node(clusterId=cluster.id).data
assert len(nodes) > 0
node_id = nodes[0].id
node = client.by_id_node(node_id)
node_labels = node.labels.data_dict()
node_labels[new_label] = label_value_new
client.update(node, labels=node_labels)
time.sleep(2)
# Label should be added
wait_for_condition(client, node, check_label_added(test_label), None, 10)
validate_label_set_on_node(client, node, new_label, label_value_new)
validate_label_set_on_node(client, node, test_label, label_value)
# remove label
node = client.reload(node)
node_labels = node.labels.data_dict()
del node_labels[new_label]
client.update(node, labels=node_labels)
@pytest.mark.run(after='test_node_label_custom_add_additional')
def test_node_label_custom_delete():
"""
This test deletes the label on the node via API
:return: None
Expected failure because of issue -
https://github.com/rancher/rancher/issues/26604
"""
create_kubeconfig(cluster_custom["cluster"])
client = cluster_detail["client"]
cluster = cluster_custom["cluster"]
test_label = cluster_custom["test_label"]
nodes = client.list_node(clusterId=cluster.id).data
assert len(nodes) > 0
node_id = nodes[0].id
node = client.by_id_node(node_id)
# remove label
node = client.reload(node)
node_labels = node.labels.data_dict()
del node_labels[test_label]
client.update(node, labels=node_labels)
# cluster will go into updating state
cluster = validate_cluster_state(client, cluster, True,
intermediate_state="updating",
nodes_not_in_active_state=[])
node = client.reload(node)
# label should be deleted
validate_label_deleted_on_node(client, node, test_label)
@if_test_rbac
@pytest.mark.parametrize("role", roles)
def test_rbac_node_label_add(role):
test_label = random_name()
label_value = random_name()
# get node details
client, node = get_node_details(cluster_detail["cluster"], cluster_detail["client"])
node_labels = node.labels.data_dict()
# get user token and client
token = rbac_get_user_token_by_role(role)
print("token: ", token)
user_client = get_client_for_token(token)
node_labels[test_label] = label_value
if role == CLUSTER_OWNER:
user_client.update(node, labels=node_labels)
time.sleep(2)
# Label should be added
wait_for_condition(user_client, node,
check_label_added(test_label), None, 10)
validate_label_set_on_node(user_client, node, test_label, label_value)
else:
with pytest.raises(ApiError) as e:
user_client.update(node, labels=node_labels)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
@pytest.mark.parametrize("role", roles)
def test_rbac_node_label_add_kubectl(role):
test_label = random_name()
label_value = random_name()
# get node details
client, node = get_node_details(cluster_detail["cluster"], cluster_detail["client"])
node_name = node.nodeName
# get user token and client
token = rbac_get_user_token_by_role(role)
user_client = get_client_for_token(token)
print(cluster_detail["cluster"]["id"])
print(cluster_detail["cluster"])
cluster = user_client.list_cluster(id=cluster_detail["cluster"]["id"]).data
print(cluster)
create_kubeconfig(cluster[0])
# add label on node
command = "label nodes " + node_name + " " + test_label + "=" + label_value
if role == CLUSTER_OWNER:
execute_kubectl_cmd(command, False)
time.sleep(2)
# Label should be added
wait_for_condition(user_client, node,
check_label_added(test_label), None, 10)
validate_label_set_on_node(user_client, node, test_label, label_value)
elif role == CLUSTER_MEMBER:
result = execute_kubectl_cmd(command, False, stderr=True)
result = result.decode('ascii')
assert "cannot patch resource \"nodes\"" in result
assert "forbidden" in result
else:
result = execute_kubectl_cmd(command, False, stderr=True)
result = result.decode('ascii')
assert "cannot get resource \"nodes\"" in result
assert "forbidden" in result
@pytest.fixture(scope='module', autouse="True")
def create_project_client(request):
cluster_detail["client"], cluster_detail["cluster"] = \
get_user_client_and_cluster()
test_label = random_name()
label_value = random_name()
"""
Create a cluster for node template related test cases
"""
cluster_node_template["cluster"], \
node_pools, \
cluster_node_template["node_template"], \
cluster_node_template["do_cloud_credential"] = \
create_cluster_node_template_label(test_label, label_value)
cluster_node_template["node_pools"] = node_pools[0]
cluster_node_template["test_label"] = test_label
cluster_node_template["label_value"] = label_value
def fin():
client = get_user_client()
cluster = cluster_node_template["cluster"]
if cluster is not None:
node_pools_list = client.list_nodePool(clusterId=cluster.id).data
# get unique node template ids
client.delete(cluster_node_template["cluster"])
wait_for_cluster_delete(client, cluster["name"])
time.sleep(10)
unique_node_pool = {}
for node_pool in node_pools_list:
if node_pool.nodeTemplateId not in unique_node_pool.keys():
unique_node_pool[node_pool.nodeTemplateId] = \
client.list_node_template(
id=node_pool.nodeTemplateId).data[0]
print("unique_node_pool: ", unique_node_pool)
for key, value in unique_node_pool.items():
client.delete(value)
if cluster_custom["cluster"] is not None:
client.delete(cluster_custom["cluster"])
if cluster_custom["aws_node"] is not None:
delete_node(cluster_custom["aws_node"])
if custom_cluster_add_edit["cluster"] is not None:
client.delete(custom_cluster_add_edit["cluster"])
if custom_cluster_add_edit["aws_node"] is not None:
delete_node(custom_cluster_add_edit["aws_node"])
if len(cluster_node_template_2["cluster"]) != 0:
for cluster in cluster_node_template_2["cluster"]:
client.delete(cluster)
wait_for_cluster_delete(client, cluster.name)
time.sleep(10)
for node_template in cluster_node_template_2["node_template"]:
client.reload(node_template)
client.delete(node_template)
request.addfinalizer(fin)
def check_cluster_deleted(client):
def _find_condition(resource):
cluster = client.reload(resource)
if len(cluster["data"]) == 0:
return True
else:
return False
return _find_condition
def check_label_added(test_label):
def _find_condition(resource):
node_labels = resource.labels.data_dict()
if test_label in node_labels:
return True
else:
return False
return _find_condition
def check_label_removed(test_label):
def _find_condition(resource):
node_labels = resource.labels.data_dict()
if test_label not in node_labels:
return True
else:
return False
return _find_condition
def validate_label_set_on_node(client, node, test_label, label_value):
"""
This method checks if the label is added on the node via API and kubectl
:param client: user client
:param node: node on which user has to validate if the label is added
:param test_label: Label to be validated on the node
:param label_value: label value to be checked
:return: None
"""
print("label_value: ", label_value)
# check via API
node = client.reload(node)
node_labels = node.labels.data_dict()
assert node_labels[test_label] == label_value
# check via kubectl
node_name = node.nodeName
command = " get nodes " + node_name
node_detail = execute_kubectl_cmd(command)
print(node_detail["metadata"]["labels"])
assert test_label in node_detail["metadata"]["labels"], \
"Label is not set in kubectl"
assert node_detail["metadata"]["labels"][test_label] == label_value
def validate_label_deleted_on_node(client, node, test_label):
"""
This method checks if the label is deleted on the node via API and kubectl
:param client: user client
:param node: node on which user has to validate if the label is deleted
:param test_label: Label to be validated on the node
:return: None
"""
# check via API
node = client.reload(node)
node_labels = node.labels.data_dict()
assert test_label not in node_labels
# check via kubectl
node_name = node.nodeName
command = " get nodes " + node_name
print(command)
node_detail = execute_kubectl_cmd(command)
print(node_detail["metadata"]["labels"])
assert test_label not in node_detail["metadata"]["labels"]
def add_node_cluster(node_template, cluster):
"""
This method adds a node pool to a given cluster
:param node_template: node pool uses this to create a node
:param cluster: node pool is added to this cluster
:return: cluster, node_pools
"""
client = get_user_client()
nodes = []
node_name = random_node_name()
node = {"hostnamePrefix": node_name,
"nodeTemplateId": node_template.id,
"controlPlane": False,
"etcd": False,
"worker": True,
"quantity": 1,
"clusterId": None}
nodes.append(node)
node_pools = []
for node in nodes:
node["clusterId"] = cluster.id
success = False
start = time.time()
while not success:
if time.time() - start > 10:
raise AssertionError(
"Timed out waiting for cluster owner global Roles")
try:
time.sleep(1)
node_pool = client.create_node_pool(**node)
success = True
except ApiError:
success = False
node_pool = client.wait_success(node_pool)
node_pools.append(node_pool)
cluster = validate_cluster_state(client, cluster,
check_intermediate_state=False)
return cluster, node_pools
def create_cluster_node_template_label(test_label, label_value):
"""
This method create a node template with the label key and value provided.
Creates a cluster with nodepool, which uses the above node template.
Cluster spec: 1 node all roles
:param test_label: label to add in the node template
:param label_value: label value
:return: cluster, node_pools, node_template, do_cloud_credential
"""
client = get_user_client()
node_template, do_cloud_credential = \
create_node_template_label(client, test_label, label_value)
assert test_label in node_template["labels"], \
"Label is not set on node template"
assert node_template["labels"][test_label] == label_value
nodes = []
node_name = random_node_name()
node = {"hostnamePrefix": node_name,
"nodeTemplateId": node_template.id,
"controlPlane": True,
"etcd": True,
"worker": True,
"quantity": 1,
"clusterId": None}
nodes.append(node)
cluster = client.create_cluster(
name=random_name(),
rancherKubernetesEngineConfig=rke_config)
node_pools = []
for node in nodes:
node["clusterId"] = cluster.id
success = False
start = time.time()
while not success:
if time.time() - start > 10:
raise AssertionError(
"Timed out waiting for cluster owner global Roles")
try:
time.sleep(1)
node_pool = client.create_node_pool(**node)
success = True
except ApiError:
success = False
node_pool = client.wait_success(node_pool)
node_pools.append(node_pool)
cluster = validate_cluster_state(client, cluster)
return cluster, node_pools, node_template, do_cloud_credential
def create_custom_node_label(node_roles, test_label,
label_value, random_cluster_name=False):
"""
This method creates nodes from AWS and adds the label key and value to
the register command and deploys a custom cluster.
:param node_roles: list of node roles for the cluster
:param test_label: label to add in the docker register command
:param label_value: label value to add in the docker register command
:param random_cluster_name: cluster name
:return: cluster and aws nodes created
"""
aws_nodes = \
AmazonWebServices().create_multiple_nodes(
len(node_roles), random_test_name(HOST_NAME))
client = get_user_client()
cluster_name = random_name() if random_cluster_name \
else evaluate_clustername()
cluster = client.create_cluster(name=cluster_name,
driver="rancherKubernetesEngine",
rancherKubernetesEngineConfig=rke_config)
assert cluster.state == "provisioning"
i = 0
for aws_node in aws_nodes:
docker_run_cmd = \
get_custom_host_registration_cmd(client, cluster, node_roles[i],
aws_node)
for nr in node_roles[i]:
aws_node.roles.append(nr)
docker_run_cmd = docker_run_cmd + " --label " + \
test_label + "=" + label_value
aws_node.execute_command(docker_run_cmd)
i += 1
cluster = validate_cluster_state(client, cluster)
return cluster, aws_nodes
def create_node_template_label(client, test_label, label_value):
"""
This method adds a given label with key: test_label and value: label_value
to a node template and returns the node template
:param client: user client
:param test_label: label to add in the node template
:param label_value: value of the label to add in the node template
:return: node template and do cloud credential value
"""
do_cloud_credential_config = {"accessToken": DO_ACCESSKEY}
do_cloud_credential = client.create_cloud_credential(
digitaloceancredentialConfig=do_cloud_credential_config
)
node_template = client.create_node_template(
digitaloceanConfig={"region": "nyc3",
"size": "2gb",
"image": "ubuntu-16-04-x64"},
name=random_name(),
driver="digitalocean",
cloudCredentialId=do_cloud_credential.id,
useInternalIpAddress=True,
labels={"cattle.io/creator": "norman", test_label: label_value})
node_template = client.wait_success(node_template)
return node_template, do_cloud_credential
| |
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import re
import sys
from command import InteractiveCommand
from editor import Editor
from error import HookError, UploadError
from project import RepoHook
UNUSUAL_COMMIT_THRESHOLD = 5
def _ConfirmManyUploads(multiple_branches=False):
if multiple_branches:
print "ATTENTION: One or more branches has an unusually high number of commits."
else:
print "ATTENTION: You are uploading an unusually high number of commits."
print "YOU PROBABLY DO NOT MEAN TO DO THIS. (Did you rebase across branches?)"
answer = raw_input("If you are sure you intend to do this, type 'yes': ").strip()
return answer == "yes"
def _die(fmt, *args):
msg = fmt % args
print >>sys.stderr, 'error: %s' % msg
sys.exit(1)
def _SplitEmails(values):
result = []
for str in values:
result.extend([s.strip() for s in str.split(',')])
return result
class Upload(InteractiveCommand):
common = True
helpSummary = "Upload changes for code review"
helpUsage="""
%prog [--re --cc] [<project>]...
"""
helpDescription = """
The '%prog' command is used to send changes to the Gerrit Code
Review system. It searches for topic branches in local projects
that have not yet been published for review. If multiple topic
branches are found, '%prog' opens an editor to allow the user to
select which branches to upload.
'%prog' searches for uploadable changes in all projects listed at
the command line. Projects can be specified either by name, or by
a relative or absolute path to the project's local directory. If no
projects are specified, '%prog' will search for uploadable changes
in all projects listed in the manifest.
If the --reviewers or --cc options are passed, those emails are
added to the respective list of users, and emails are sent to any
new users. Users passed as --reviewers must already be registered
with the code review system, or the upload will fail.
Configuration
-------------
review.URL.autoupload:
To disable the "Upload ... (y/N)?" prompt, you can set a per-project
or global Git configuration option. If review.URL.autoupload is set
to "true" then repo will assume you always answer "y" at the prompt,
and will not prompt you further. If it is set to "false" then repo
will assume you always answer "n", and will abort.
review.URL.autocopy:
To automatically copy a user or mailing list to all uploaded reviews,
you can set a per-project or global Git option to do so. Specifically,
review.URL.autocopy can be set to a comma separated list of reviewers
who you always want copied on all uploads with a non-empty --re
argument.
review.URL.username:
Override the username used to connect to Gerrit Code Review.
By default the local part of the email address is used.
The URL must match the review URL listed in the manifest XML file,
or in the .git/config within the project. For example:
[remote "origin"]
url = git://git.example.com/project.git
review = http://review.example.com/
[review "http://review.example.com/"]
autoupload = true
autocopy = johndoe@company.com,my-team-alias@company.com
References
----------
Gerrit Code Review: http://code.google.com/p/gerrit/
"""
def _Options(self, p):
p.add_option('-t',
dest='auto_topic', action='store_true',
help='Send local branch name to Gerrit Code Review')
p.add_option('--re', '--reviewers',
type='string', action='append', dest='reviewers',
help='Request reviews from these people.')
p.add_option('--cc',
type='string', action='append', dest='cc',
help='Also send email to these email addresses.')
p.add_option('--br',
type='string', action='store', dest='branch',
help='Branch to upload.')
p.add_option('--cbr', '--current-branch',
dest='current_branch', action='store_true',
help='Upload current git branch.')
# Options relating to upload hook. Note that verify and no-verify are NOT
# opposites of each other, which is why they store to different locations.
# We are using them to match 'git commit' syntax.
#
# Combinations:
# - no-verify=False, verify=False (DEFAULT):
# If stdout is a tty, can prompt about running upload hooks if needed.
# If user denies running hooks, the upload is cancelled. If stdout is
# not a tty and we would need to prompt about upload hooks, upload is
# cancelled.
# - no-verify=False, verify=True:
# Always run upload hooks with no prompt.
# - no-verify=True, verify=False:
# Never run upload hooks, but upload anyway (AKA bypass hooks).
# - no-verify=True, verify=True:
# Invalid
p.add_option('--no-verify',
dest='bypass_hooks', action='store_true',
help='Do not run the upload hook.')
p.add_option('--verify',
dest='allow_all_hooks', action='store_true',
help='Run the upload hook without prompting.')
def _SingleBranch(self, opt, branch, people):
project = branch.project
name = branch.name
remote = project.GetBranch(name).remote
key = 'review.%s.autoupload' % remote.review
answer = project.config.GetBoolean(key)
if answer is False:
_die("upload blocked by %s = false" % key)
if answer is None:
date = branch.date
list = branch.commits
print 'Upload project %s/ to remote branch %s:' % (project.relpath, project.revisionExpr)
print ' branch %s (%2d commit%s, %s):' % (
name,
len(list),
len(list) != 1 and 's' or '',
date)
for commit in list:
print ' %s' % commit
sys.stdout.write('to %s (y/N)? ' % remote.review)
answer = sys.stdin.readline().strip()
answer = answer in ('y', 'Y', 'yes', '1', 'true', 't')
if answer:
if len(branch.commits) > UNUSUAL_COMMIT_THRESHOLD:
answer = _ConfirmManyUploads()
if answer:
self._UploadAndReport(opt, [branch], people)
else:
_die("upload aborted by user")
def _MultipleBranches(self, opt, pending, people):
projects = {}
branches = {}
script = []
script.append('# Uncomment the branches to upload:')
for project, avail in pending:
script.append('#')
script.append('# project %s/:' % project.relpath)
b = {}
for branch in avail:
name = branch.name
date = branch.date
list = branch.commits
if b:
script.append('#')
script.append('# branch %s (%2d commit%s, %s) to remote branch %s:' % (
name,
len(list),
len(list) != 1 and 's' or '',
date,
project.revisionExpr))
for commit in list:
script.append('# %s' % commit)
b[name] = branch
projects[project.relpath] = project
branches[project.name] = b
script.append('')
script = [ x.encode('utf-8')
if issubclass(type(x), unicode)
else x
for x in script ]
script = Editor.EditString("\n".join(script)).split("\n")
project_re = re.compile(r'^#?\s*project\s*([^\s]+)/:$')
branch_re = re.compile(r'^\s*branch\s*([^\s(]+)\s*\(.*')
project = None
todo = []
for line in script:
m = project_re.match(line)
if m:
name = m.group(1)
project = projects.get(name)
if not project:
_die('project %s not available for upload', name)
continue
m = branch_re.match(line)
if m:
name = m.group(1)
if not project:
_die('project for branch %s not in script', name)
branch = branches[project.name].get(name)
if not branch:
_die('branch %s not in %s', name, project.relpath)
todo.append(branch)
if not todo:
_die("nothing uncommented for upload")
many_commits = False
for branch in todo:
if len(branch.commits) > UNUSUAL_COMMIT_THRESHOLD:
many_commits = True
break
if many_commits:
if not _ConfirmManyUploads(multiple_branches=True):
_die("upload aborted by user")
self._UploadAndReport(opt, todo, people)
def _AppendAutoCcList(self, branch, people):
"""
Appends the list of users in the CC list in the git project's config if a
non-empty reviewer list was found.
"""
name = branch.name
project = branch.project
key = 'review.%s.autocopy' % project.GetBranch(name).remote.review
raw_list = project.config.GetString(key)
if not raw_list is None and len(people[0]) > 0:
people[1].extend([entry.strip() for entry in raw_list.split(',')])
def _FindGerritChange(self, branch):
last_pub = branch.project.WasPublished(branch.name)
if last_pub is None:
return ""
refs = branch.GetPublishedRefs()
try:
# refs/changes/XYZ/N --> XYZ
return refs.get(last_pub).split('/')[-2]
except:
return ""
def _UploadAndReport(self, opt, todo, original_people):
have_errors = False
for branch in todo:
try:
people = copy.deepcopy(original_people)
self._AppendAutoCcList(branch, people)
# Check if there are local changes that may have been forgotten
if branch.project.HasChanges():
key = 'review.%s.autoupload' % branch.project.remote.review
answer = branch.project.config.GetBoolean(key)
# if they want to auto upload, let's not ask because it could be automated
if answer is None:
sys.stdout.write('Uncommitted changes in ' + branch.project.name + ' (did you forget to amend?). Continue uploading? (y/N) ')
a = sys.stdin.readline().strip().lower()
if a not in ('y', 'yes', 't', 'true', 'on'):
print >>sys.stderr, "skipping upload"
branch.uploaded = False
branch.error = 'User aborted'
continue
branch.UploadForReview(people, auto_topic=opt.auto_topic)
branch.uploaded = True
except UploadError, e:
branch.error = e
branch.uploaded = False
have_errors = True
print >>sys.stderr, ''
print >>sys.stderr, '----------------------------------------------------------------------'
if have_errors:
for branch in todo:
if not branch.uploaded:
if len(str(branch.error)) <= 30:
fmt = ' (%s)'
else:
fmt = '\n (%s)'
print >>sys.stderr, ('[FAILED] %-15s %-15s' + fmt) % (
branch.project.relpath + '/', \
branch.name, \
str(branch.error))
print >>sys.stderr, ''
for branch in todo:
if branch.uploaded:
print >>sys.stderr, '[OK ] %-15s %s' % (
branch.project.relpath + '/',
branch.name)
if have_errors:
sys.exit(1)
def Execute(self, opt, args):
project_list = self.GetProjects(args)
pending = []
reviewers = []
cc = []
branch = None
if opt.branch:
branch = opt.branch
for project in project_list:
if opt.current_branch:
cbr = project.CurrentBranch
avail = [project.GetUploadableBranch(cbr)] if cbr else None
else:
avail = project.GetUploadableBranches(branch)
if avail:
pending.append((project, avail))
if pending and (not opt.bypass_hooks):
hook = RepoHook('pre-upload', self.manifest.repo_hooks_project,
self.manifest.topdir, abort_if_user_denies=True)
pending_proj_names = [project.name for (project, avail) in pending]
try:
hook.Run(opt.allow_all_hooks, project_list=pending_proj_names)
except HookError, e:
print >>sys.stderr, "ERROR: %s" % str(e)
return
if opt.reviewers:
reviewers = _SplitEmails(opt.reviewers)
if opt.cc:
cc = _SplitEmails(opt.cc)
people = (reviewers,cc)
if not pending:
print >>sys.stdout, "no branches ready for upload"
elif len(pending) == 1 and len(pending[0][1]) == 1:
self._SingleBranch(opt, pending[0][1][0], people)
else:
self._MultipleBranches(opt, pending, people)
| |
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import dateutil.parser
import re
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import GoogleCredentials
from oauth2client.service_account import ServiceAccountCredentials
from googleapiclient.discovery import build
import packer
import service
import validate
IMAGE_CACHE = {}
def google_get_images_json_key(project, key_json):
credentials = ServiceAccountCredentials.from_json_keyfile_dict(
key_json, scopes=['https://www.googleapis.com/auth/compute'])
compute = build('compute', 'v1', credentials=credentials)
images = compute.images().list(project=project).execute()
items = images.get('items', [])
return [(i['name'], dateutil.parser.parse(i['creationTimestamp'])) for i in items]
class GooglePackerBuildArtefact(packer.PackerBuildArtefact):
def __init__(self, image, environment):
super(GooglePackerBuildArtefact, self).__init__(image['cmds'])
self.machineType = image['machineType']
self.source = image['source']
self.zone = image['zone']
self.project = environment['project']
self.sshUser = environment['sshUser']
self.serviceAccount = environment['serviceAccount']
def builderHashCode(self):
builder_hash = 0;
builder_hash ^= packer.hash_string(self.machineType)
builder_hash ^= packer.hash_string(self.source)
builder_hash ^= packer.hash_string(self.zone)
return builder_hash
def builder(self):
return {
'name': self.name(),
'image_name': self.name(),
'instance_name': self.name(),
'type': 'googlecompute',
'image_description': 'Image built by micromanage',
'project_id': self.project,
'account_file': json.dumps(self.serviceAccount),
'machine_type': self.machineType,
'source_image': self.source,
'zone': self.zone,
'ssh_username': self.sshUser,
}
def needsBuild(self):
print 'Checking if image exists: %s/%s' % (self.project, self.name())
if self.project in IMAGE_CACHE:
existing_image_names = IMAGE_CACHE[self.project]
else:
existing_image_names = [img[0] for img in google_get_images_json_key(self.project, self.serviceAccount)]
IMAGE_CACHE[self.project] = existing_image_names
return self.name() not in existing_image_names
def doBuild(self, dirpath):
super(GooglePackerBuildArtefact, self).doBuild(dirpath)
if self.project not in IMAGE_CACHE:
IMAGE_CACHE[self.project] = []
IMAGE_CACHE[self.project] += [self.name()]
def postBuild(self):
pass
class GoogleService(service.Service):
def validateEnvironment(self, root, path):
fields = {'kind', 'project', 'region', 'sshUser', 'serviceAccount'}
validate.obj_only(root, path, fields)
validate.path_val(root, path + ['project'], 'string')
validate.path_val(root, path + ['region'], 'string')
validate.path_val(root, path + ['sshUser'], 'string')
acc = validate.path_val(root, path + ['serviceAccount'], 'object')
validate.path_val(root, path + ['serviceAccount', 'client_email'], 'string')
validate.path_val(root, path + ['serviceAccount', 'private_key'], 'string')
validate.path_val(root, path + ['serviceAccount', 'type'], validate.is_value('service_account'), 'service_account')
validate.path_val(root, path + ['serviceAccount', 'client_id'], 'string', '')
validate.path_val(root, path + ['serviceAccount', 'private_key_id'], 'string', '')
fields = {'client_email', 'private_key', 'type', 'client_id', 'private_key_id'}
validate.obj_only(root, path + ['serviceAccount'], fields)
def validateService(self, root, path):
super(GoogleService, self).validateService(root, path)
infra_path = path + ['infrastructure']
validate.path_val(root, infra_path, 'object', {})
inst_path = infra_path + ['google_compute_instance']
instances = validate.path_val(root, inst_path, 'object', {})
disk_path = infra_path + ['google_compute_disk']
disks = validate.path_val(root, disk_path, 'object', {})
# Validate image configs
for inst_name, inst in instances.iteritems():
self.validateCmds(root, inst_path + [inst_name, 'cmds'])
self.validateCmds(root, inst_path + [inst_name, 'bootCmds'])
# Assume instances have a root disk.
validate.path_val(root, inst_path + [inst_name, 'disk'], 'array')
inst_disk_path = inst_path + [inst_name, 'disk', 0]
disk = validate.path_val(root, inst_disk_path, 'object')
image = disk.get('image')
if isinstance(image, dict):
self.validateImage(root, inst_disk_path + ['image'])
for disk_name, disk in disks.iteritems():
image = disk.get('image')
if isinstance(image, dict):
self.validateImage(root, disk_path + [disk_name, 'image'])
def validateImage(self, root, path):
super(GoogleService, self).validateImage(root, path)
validate.path_val(root, path + ['machineType'], 'string', 'n1-standard-1')
validate.path_val(root, path + ['source'], 'string')
validate.path_val(root, path + ['zone'], 'string')
validate.obj_only(root, path, {'cmds', 'machineType', 'source', 'zone'})
def compileProvider(self, environment_name, environment):
return {
'environment.%s.tf' % environment_name: {
'provider': {
'google': {
'alias': environment_name,
'credentials': json.dumps(environment['serviceAccount']),
'project': environment['project'],
'region' : environment['region'],
},
},
},
}
def getBuildArtefacts(self, environment, ctx, service):
service = copy.deepcopy(service)
barts = {} # Build artefacts.
instances = service['infrastructure']['google_compute_instance']
disks = service['infrastructure']['google_compute_disk']
# Process image configs
for inst_name, inst in instances.iteritems():
image = inst['disk'][0].get('image')
if isinstance(image, dict):
bart = GooglePackerBuildArtefact(image, environment)
barts[bart.name()] = bart
inst['disk'][0]['image'] = bart.name()
for disk_name, disk in disks.iteritems():
image = disk.get('image')
if isinstance(image, dict):
bart = GooglePackerBuildArtefact(image, environment)
barts[bart.name()] = bart
disk['image'] = bart.name()
return service, barts
def compile(self, ctx, service_name, service, barts):
infra = service['infrastructure']
# Add provider attributes
for res_kind_name, res_kind_obj in infra.iteritems():
for res_name, res in res_kind_obj.iteritems():
res['provider'] = 'google.%s' % service['environment']
# Process instance commands
instances = infra.get('google_compute_instance', {})
for inst_name, inst in instances.iteritems():
cmds = inst['cmds']
boot_cmds = inst['bootCmds']
metadata = inst['metadata']
def curl_md(k):
md_pref = 'http://169.254.169.254/computeMetadata/v1/instance/attributes'
return 'curl -s -H Metadata-Flavor:Google %s/%s' % (md_pref, k)
if 'startup-script' in metadata:
# Move user startup script out of the way (but still run it at every boot).
metadata['micromanage-user-startup-script'] = metadata['startup-script']
metadata.pop('startup-script', None)
bootCmds += ['%s | bash' % curl_md('micromanage-user-startup-script')]
inst['metadata'] = metadata
inst['metadata_startup_script'] = self.compileStartupScript(cmds, boot_cmds)
inst.pop('cmds', None)
inst.pop('bootCmds', None)
return {
'service.%s.tf' % self.fullName(ctx, service_name): {
'resource': infra,
'output': {
k: { 'value': v }
for k, v in service['outputs'].iteritems()
}
}
}
| |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2015--, The Horizomer Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
#
# Parse output files of HGT tools.
#
import click
import sys
from skbio import Sequence
# T-REX version 3.6
# RANGER-DTL-U version 1.0
# RIATA-HGT version 3.5.6
# JANE version 4
# each tuple consists of three strings, first string is the unique string to
# identify the line with HGT information, second and third strings are the
# bounds for the actual number of HGTs
hgt_parse_strs = {
'ranger-dtl': ('The minimum reconciliation cost is: ',
'Transfers: ',
', Losses'),
'trex': ('hgt : number of HGT(s) found = ',
'hgt : number of HGT(s) found = ',
' '),
'jane4': ('Host Switch: ',
'Host Switch: ',
' '),
'riata-hgt': ('There are ',
'There are ',
' component(s)')}
def parse_hgts(input_f, method):
""" Extract number of HGTs found.
Parameters
----------
input_f: string
file descriptor for T-REX output results
method: string
HGT detection method
Returns
-------
number_of_hgts: string
number of HGTs reported by a tool, or NaN if an entry was not found
"""
for line in input_f:
if hgt_parse_strs[method][0] in line:
return line.strip().split(
hgt_parse_strs[method][1])[1].split(
hgt_parse_strs[method][2])[0]
return 'NaN'
def parse_consel(input_f):
""" Parse output of Consel version 0.20.
Parameters
----------
input_f: string
file descriptor for Consel output results
Returns
-------
pvalues: list
list of P-values
"""
pvalues = []
# skip header lines
skip_lines = 3
for s in range(skip_lines):
next(input_f)
for line in input_f:
line = line.split()
# skip empty line at bottom of file
if not line:
continue
pv_au = line[4]
if 0 <= float(pv_au) <= 1:
pvalues.append("%.2f" % float(pv_au))
return pvalues
def parse_darkhorse(input_f, output_fp, low_lpi=0.0, high_lpi=0.6):
""" Parse output of DarkHorse (smry file).
Paramters
---------
input_f: string
file descriptor for Consel output results
output_fp: str
Filepath to output best hit genome IDs
low_lpi: float
lower LPI (lineage probability index) score bound
high_lpi: float
upper LPI score bound
Returns
-------
hgts: string
one putative HGT-derived gene per line
columns: query_id, besthit_id, tax_id, species, lineage, pct_id,
pct_coverage, norm_LPI
Notes
-----
Parse output of DarkHorse to return tab-separated file of putative HGTs
using the LPI bounds and a file with all best hit genome IDs.
"""
best_hit_ids = set()
hgts = []
# skip header
next(input_f)
for line in input_f:
x = line.strip('\r\n').split('\t')
best_hit_ids.add(x[3])
if low_lpi < float(x[5]) < high_lpi:
hgt = '\t'.join((x[0], x[3], x[12], x[13], x[14],
x[6], x[9], x[4]))
hgts.append(hgt)
if output_fp:
with open(output_fp, 'w') as output_f:
output_f.write('\n'.join(best_hit_ids))
return '\n'.join(hgts)
def parse_hgtector(input_f):
""" Parse output of HGTector version 0.2.1.
Parameters
----------
input_f: string
file descriptor for HGTector output results
Returns
-------
output: string
one putative HGT-derived gene per line
columns: query_id, donor_taxid, donor_species, donor_lineage, pct_id,
pct_coverage
"""
hgts = []
for line in input_f:
x = line.strip('\r\n').split('\t')
if (len(x) == 15) and (x[7] == '1'):
hgt = '\t'.join((x[0], x[12], x[13], x[14], x[10], x[11]))
hgts.append(hgt)
return '\n'.join(hgts)
def parse_egid(input_f, genbank_fp):
""" Extract genes contained in GIs identified by EGID
Parameters
----------
input_f: string
file descriptor for EGID output results (GI coordinates)
genbank_fp: string
file path to genome in GenBank format (containing gene coordinates)
Notes
-----
genbank_fp is the intermediate GenBank file generated by reformat_input.py,
in which multiple sequences are concantenated, instead of the original
GenBank file.
Returns
-------
output: string
gene names (protein_ids) separated by newline
"""
genes = {}
gb = Sequence.read(genbank_fp, format='genbank')
for feature in gb.interval_metadata.query(metadata={'type': 'CDS'}):
m = feature.metadata
if 'protein_id' in m:
protein_id = m['protein_id'].replace('\"', '')
if protein_id not in genes:
# in scikit-bio, this number is the start location - 1
start = feature.bounds[0][0] + 1
end = feature.bounds[0][1]
genes[protein_id] = (start, end)
genes_in_gi = {}
for line in input_f:
x = line.strip().split()
# a valid GI definition should have at least 2 columns
if len(x) < 2:
continue
start = int(x[0])
end = int(x[1])
for (gene, pos) in genes.items():
if (pos[0] >= start and pos[1] <= end):
if gene not in genes_in_gi:
genes_in_gi[gene] = 1
return '\n'.join(sorted(genes_in_gi))
def parse_genemark(input_f, genbank_fp):
""" Extract atypical genes identified by GeneMark
Parameters
----------
input_f: string
file descriptor for GeneMark output gene list (*.lst)
genbank_fp: string
file path to genome in GenBank format
Notes
-----
genbank_fp is the intermediate GenBank file generated by reformat_input.py,
in which multiple sequences are concantenated, instead of the original
GenBank file.
Returns
-------
output: string
gene names (protein_ids) separated by newline
"""
genes = {}
gb = Sequence.read(genbank_fp, format='genbank')
for feature in gb.interval_metadata._intervals:
m = feature.metadata
if m['type'] == 'CDS' and 'protein_id' in m:
protein_id = m['protein_id'].replace('\"', '')
if protein_id not in genes:
strand = m['strand']
start = feature.bounds[0][0] + 1
end = feature.bounds[0][1]
genes[protein_id] = (start, end, strand)
atypical_genes = []
reading = False
for line in input_f:
x = line.strip().split()
if len(x) == 2 and x == ['#', 'Length']:
reading = True
# atypical genes have class '2' in the 6th column
elif reading and len(x) == 6 and x[5] == '2':
(start, end, strand) = (int(x[2].lstrip('<>')),
int(x[3].lstrip('<>')),
x[1])
for (gene, x) in genes.items():
if x[0] == start and x[1] == end and x[2] == strand:
atypical_genes.append(gene)
return '\n'.join(sorted(atypical_genes))
def parse_output(hgt_results_fp,
method,
genbank_fp=None,
low_lpi=0.0,
high_lpi=0.6,
output_fp=None):
"""Call parse_hgts() based on HGT detection method used.
Parameters
----------
hgt_results_fp: str
filepath to detected HGTs
genbank_fp: string
file path to genome in GenBank format
method: string
tool used to detect HGTs
output_fp: str
output file storing best hit IDs (DarkHorse)
low_lpi: float
lower bound LPI score (DarkHorse Lineage Probability Index)
high_lpi: float
upper bound LPI score (DarkHorse Lineage Probability Index)
Returns
-------
output: string
number of HGTs detected
"""
with open(hgt_results_fp, 'r') as input_f:
if (method == 'ranger-dtl' or
method == 'trex' or
method == 'jane4' or
method == 'riata-hgt'):
output = parse_hgts(input_f=input_f,
method=method)
elif method == 'consel':
output = parse_consel(input_f=input_f)
elif method == 'darkhorse':
output = parse_darkhorse(input_f=input_f,
output_fp=output_fp,
low_lpi=low_lpi,
high_lpi=high_lpi)
elif method == 'hgtector':
output = parse_hgtector(input_f=input_f)
elif method == 'egid':
output = parse_egid(input_f=input_f,
genbank_fp=genbank_fp)
elif method == 'genemark':
output = parse_genemark(input_f=input_f,
genbank_fp=genbank_fp)
else:
raise ValueError("Method is not supported: %s" % method)
return output
@click.command()
@click.option('--hgt-results-fp', required=True,
type=click.Path(resolve_path=True, readable=True, exists=True,
file_okay=True),
help='Output file containing HGT information')
@click.option('--genbank-fp', required=False,
type=click.Path(resolve_path=True, readable=True, exists=True,
file_okay=True),
help='Output file containing HGT information')
@click.option('--ncbi-nr', required=False,
type=click.Path(resolve_path=True, readable=True, exists=True,
file_okay=True),
help='NCBI nr database in FASTA format to link'
'taxon ids with accession numbers for DarkHorse output')
@click.option('--method', required=True,
type=click.Choice(['trex', 'ranger-dtl',
'riata-hgt', 'consel',
'darkhorse', 'hgtector',
'genemark', 'egid', 'jane4',
'tree-puzzle']),
help='The method used for HGT detection')
@click.option('--darkhorse-low-lpi', type=float, default=0.0,
show_default=True, required=False, help='Lower bound LPI score')
@click.option('--darkhorse-high-lpi', type=float, default=0.6,
show_default=True, required=False, help='Upper bound LPI score')
@click.option('--darkhorse-output-fp', required=False,
type=click.Path(resolve_path=True, readable=True, exists=False,
file_okay=True),
help='Output all best hit IDs from DarkHorse summary')
def main(hgt_results_fp,
genbank_fp,
method,
ncbi_nr,
darkhorse_low_lpi,
darkhorse_high_lpi,
darkhorse_output_fp=None):
""" Parsing functions for various HGT detection tool outputs.
"""
output = parse_output(hgt_results_fp=hgt_results_fp,
method=method,
genbank_fp=genbank_fp,
low_lpi=darkhorse_low_lpi,
high_lpi=darkhorse_high_lpi,
output_fp=darkhorse_output_fp)
sys.stdout.write(output)
if __name__ == "__main__":
main()
| |
import unittest
from collections import deque
def bfs(source, neighbors):
res = []
visited = set([source])
queue = deque([source])
while queue:
v = queue.popleft()
res.append(v)
for n in neighbors(v):
if n not in visited:
visited.add(n)
queue.append(n)
return res
def moralize(vertices, parents, adjacent, addedge):
for v in vertices:
v_parents = parents(v)
for i in range(len(v_parents)):
for j in range(i+1, len(v_parents)):
if not adjacent(v_parents[i], v_parents[j]):
addedge(v_parents[i], v_parents[j])
def triangulate(vertices, neighbors, addedge, reorder=True):
if reorder:
vertices = bfs(vertices[0], neighbors)
seen = set()
for v in vertices:
seen.add(v)
v_neighbors = neighbors(v)
for i in range(len(v_neighbors)):
neighbor_i = v_neighbors[i]
if neighbor_i not in seen:
for j in range(i+1, len(v_neighbors)):
neighbor_j = v_neighbors[j]
if neighbor_j not in seen and neighbor_j not in neighbors(neighbor_i):
addedge(neighbor_i, neighbor_j)
def cliquegraphvertices(vertices, neighbors, reorder=True):
if reorder:
vertices = bfs(vertices[0], neighbors)
cliques = set()
for v in vertices:
clique = [v]
v_neighbors = neighbors(v)
for n in v_neighbors:
viable = True
for c in clique:
if n not in neighbors(c):
viable = False
break
if viable:
clique.append(n)
cliques.add(tuple(sorted(clique)))
return cliques
def kruskal(vertices, edges, weight):
parents = {}
for v in vertices:
parents[v] = v
def find(u):
while parents[u] != u:
u = parents[u]
return u
def union(u, v):
parents[find(v)] = find(u)
mst = []
edges = sorted(edges, key=lambda x: weight(x))
for e in edges:
if find(e[0]) != find(e[1]):
mst.append(e)
union(e[0], e[1])
return mst
def cliquegraph(vertices, neighbors, reorder=True):
v = list(cliquegraphvertices(vertices, neighbors, reorder))
ug = UndirectedGraph(v, [])
for i in range(len(v)):
for j in range(i+1, len(v)):
if len(set(v[i]).intersection(set(v[j]))) > 0:
ug.add_edge(v[i], v[j])
return ug
class Graph:
def to_networkx(self):
import networkx as nx
g = nx.Graph()
for v in self.v:
g.add_node(v)
for e in self.e:
g.add_edge(e[0], e[1])
return g
def show(self, pos=None):
import networkx as nx
import matplotlib.pyplot as plt
nx.draw_networkx(self.to_networkx(), pos=pos, with_labels=True)
plt.show()
class DirectedGraph(Graph):
def __init__(self, v, e):
self.v = list(v)
self.e = list(e)
# Compute parents
self.parents = {}
for u in v:
self.parents[u] = []
for edge in e:
self.parents[edge[1]].append(edge[0])
def adjacent(self, u, v):
return u in self.parents[v] or v in self.parents[u]
def to_undirected(self):
return UndirectedGraph(self.v, self.e)
def add_edge(self, u, v):
self.e.append((u, v))
self.parents[v].append(u)
def moralize(self):
moralize(self.v,
lambda x: self.parents[x],
self.adjacent,
self.add_edge)
class UndirectedGraph(Graph):
def __init__(self, v, e):
self.v = list(v)
self.e = list(e)
self.edges = {}
for v in v:
self.edges[v] = []
for e in e:
self.edges[e[0]].append(e[1])
self.edges[e[1]].append(e[0])
self.triangulated = False
def has_edge(self, u, v):
return v in self.edges[u]
def neighbors(self, v):
return self.edges[v]
def add_edge(self, u, v):
self.edges[u].append(v)
self.edges[v].append(u)
self.e.append((u, v))
def triangulate(self, reorder=False):
if reorder:
self.v = bfs(self.v[0], self.neighbors)
triangulate(self.v, self.neighbors, self.add_edge, False)
self.triangulated = True
def to_cliquegraph(self):
if not self.triangulated:
self.triangulate()
return cliquegraph(self.v, self.neighbors, False)
def to_junctiontree(self):
cg = self.to_cliquegraph()
weights = {}
for e in cg.e:
weights[e] = -len(set(e[0]).intersection(set(e[1])))
mst = kruskal(cg.v, cg.e, lambda x: weights[x])
return UndirectedGraph(cg.v, mst)
class Tests(unittest.TestCase):
def bn(self, n, d):
return [] if n >= 2 ** (d-1) else [n*2, n*2+1]
def test_bfs(self):
depth = 3
self.assertEqual(list(range(1, 2 ** depth)),
bfs(1, lambda x: self.bn(x, depth)))
def test_triangulate_1(self):
# https://www.cs.cmu.edu/~epxing/Class/10708-05/Slides/ve2.pdf
v = ['C', 'D', 'S', 'I', 'L', 'H', 'J', 'G']
e = [('C', 'D'),
('D', 'I'),
('D', 'G'),
('I', 'G'),
('I', 'S'),
('G', 'H'),
('G', 'L'),
('G', 'J'),
('S', 'L'),
('S', 'J'),
('L', 'J'),
('H', 'J')]
ug = UndirectedGraph(v, e)
self.assertFalse(ug.has_edge('I', 'L'))
self.assertFalse(ug.has_edge('I', 'J'))
triangulate(ug.v, ug.neighbors, ug.add_edge, False)
self.assertTrue(ug.has_edge('I', 'L'))
self.assertTrue(ug.has_edge('I', 'J'))
self.assertEqual(14, len(ug.e))
def test_triangulate_2(self):
# Fig 1 in http://ac.els-cdn.com/0022247X70902829/1-s2.0-0022247X70902829-main.pdf
v = [1, 2, 3, 4, 5, 6]
e = [(1, 2),
(1, 3),
(1, 4),
(1, 6),
(2, 3),
(2, 4),
(2, 5),
(3, 5),
(3, 6),
(4, 5),
(4, 6),
(5, 6)]
ug = UndirectedGraph(v, e)
triangulate(ug.v, ug.neighbors, ug.add_edge, False)
self.assertTrue(ug.has_edge(2, 6)) # for [2,4,6,3,2]
self.assertTrue(ug.has_edge(3, 4)) # for [1,4,5,3,1]
self.assertEqual(14, len(ug.e))
def test_cliquegraphvertices(self):
v = [1, 2, 3, 4, 5, 6]
e = [(1, 2),
(1, 3),
(2, 3),
(3, 4),
(4, 5),
(4, 6),
(5, 6)]
ug = UndirectedGraph(v, e)
ug.triangulate()
cliques = cliquegraphvertices(ug.v, ug.neighbors, False)
self.assertTrue((4, 5, 6) in cliques)
self.assertTrue((3, 4) in cliques)
self.assertTrue((1, 2, 3) in cliques)
self.assertEquals(3, len(cliques))
def test_kruskal(self):
v = ['a', 'b', 'c', 'd', 'e', 'f']
e = {('a', 'b'): 5,
('a', 'c'): 6,
('a', 'd'): 4,
('b', 'c'): 1,
('b', 'd'): 2,
('c', 'd'): 2,
('c', 'e'): 5,
('c', 'f'): 3,
('d', 'f'): 4,
('e', 'f'): 4}
ans = [('a', 'd'), ('b', 'c'), ('c', 'd'), ('c', 'f'), ('e', 'f')]
self.assertEquals(ans, sorted(kruskal(v, e.keys(), lambda x: e[x])))
def test_junctiontree(self):
v = ['a', 'b', 'c', 'd', 'e']
e = [('a', 'b'),
('a', 'd'),
('b', 'c'),
('b', 'd'),
('c', 'd'),
('c', 'e'),
('d', 'e')]
ug = UndirectedGraph(v, e)
jt = ug.to_junctiontree()
self.assertEqual(jt.v,
[('b', 'c', 'd'), ('a', 'b', 'd'), ('c', 'd', 'e')])
self.assertEqual(jt.e,
[(('b', 'c', 'd'), ('a', 'b', 'd')),
(('b', 'c', 'd'), ('c', 'd', 'e'))])
# if __name__ == '__main__':
# unittest.main()
| |
# https://github.com/ethereum/go-ethereum/wiki/Blockpool
import time
from ethereum.utils import sha3
import rlp
from rlp.utils import encode_hex
from ethereum import processblock
from synchronizer import Synchronizer
from ethereum.slogging import get_logger
from ethereum.chain import Chain
from ethereum.blocks import Block, VerificationFailed
from ethereum.transactions import Transaction
from devp2p.service import WiredService
import eth_protocol
import gevent
import gevent.lock
from gevent.queue import Queue
log = get_logger('eth.chainservice')
# patch to get context switches between tx replay
processblock_apply_transaction = processblock.apply_transaction
def apply_transaction(block, tx):
# import traceback
# print traceback.print_stack()
log.debug('apply_transaction ctx switch', at=time.time())
gevent.sleep(0.001)
return processblock_apply_transaction(block, tx)
processblock.apply_transaction = apply_transaction
rlp_hash_hex = lambda data: encode_hex(sha3(rlp.encode(data)))
class DuplicatesFilter(object):
def __init__(self, max_items=128):
self.max_items = max_items
self.filter = list()
def known(self, data):
if data not in self.filter:
self.filter.append(data)
if len(self.filter) > self.max_items:
self.filter.pop(0)
return False
else:
self.filter.append(self.filter.pop(0))
return True
class ChainService(WiredService):
"""
Manages the chain and requests to it.
"""
# required by BaseService
name = 'chain'
default_config = dict(eth=dict(network_id=0))
# required by WiredService
wire_protocol = eth_protocol.ETHProtocol # create for each peer
# initialized after configure:
chain = None
genesis = None
synchronizer = None
config = None
block_queue_size = 1024
transaction_queue_size = 1024
processed_gas = 0
processed_elapsed = 0
def __init__(self, app):
self.config = app.config
self.db = app.services.db
assert self.db is not None
super(ChainService, self).__init__(app)
log.info('initializing chain')
coinbase = app.services.accounts.coinbase
self.chain = Chain(self.db, new_head_cb=self._on_new_head, coinbase=coinbase)
log.info('chain at', number=self.chain.head.number)
self.synchronizer = Synchronizer(self, force_sync=None)
self.block_queue = Queue(maxsize=self.block_queue_size)
self.transaction_queue = Queue(maxsize=self.transaction_queue_size)
self.add_blocks_lock = False
self.add_transaction_lock = gevent.lock.Semaphore()
self.broadcast_filter = DuplicatesFilter()
self.on_new_head_cbs = []
self.on_new_head_candidate_cbs = []
@property
def is_syncing(self):
return self.synchronizer.synctask is not None
def _on_new_head(self, block):
for cb in self.on_new_head_cbs:
cb(block)
self._on_new_head_candidate() # we implicitly have a new head_candidate
def _on_new_head_candidate(self):
for cb in self.on_new_head_candidate_cbs:
cb(self.chain.head_candidate)
def add_transaction(self, tx, origin=None):
assert isinstance(tx, Transaction)
log.debug('add_transaction', locked=self.add_transaction_lock.locked())
self.add_transaction_lock.acquire()
success = self.chain.add_transaction(tx)
self.add_transaction_lock.release()
if success:
self._on_new_head_candidate()
self.broadcast_transaction(tx, origin=origin) # asap
def add_block(self, t_block, proto):
"adds a block to the block_queue and spawns _add_block if not running"
self.block_queue.put((t_block, proto)) # blocks if full
if not self.add_blocks_lock:
self.add_blocks_lock = True # need to lock here (ctx switch is later)
gevent.spawn(self._add_blocks)
def add_mined_block(self, block):
log.debug('adding mined block', block=block)
assert block.check_pow()
if self.chain.add_block(block):
log.info('added', block=block, ts=time.time())
assert block == self.chain.head
self.broadcast_newblock(block, chain_difficulty=block.chain_difficulty())
def knows_block(self, block_hash):
"if block is in chain or in queue"
if block_hash in self.chain:
return True
# check if queued or processed
for i in range(len(self.block_queue.queue)):
if block_hash == self.block_queue.queue[i][0].header.hash:
return True
return False
def _add_blocks(self):
log.debug('add_blocks', qsize=self.block_queue.qsize(),
add_tx_lock=self.add_transaction_lock.locked())
assert self.add_blocks_lock is True
self.add_transaction_lock.acquire()
try:
while not self.block_queue.empty():
t_block, proto = self.block_queue.peek() # peek: knows_block while processing
if t_block.header.hash in self.chain:
log.warn('known block', block=t_block)
self.block_queue.get()
continue
if t_block.header.prevhash not in self.chain:
log.warn('missing parent', block=t_block)
self.block_queue.get()
continue
# FIXME, this is also done in validation and in synchronizer for new_blocks
if not t_block.header.check_pow():
log.warn('invalid pow', block=t_block, FIXME='ban node')
self.block_queue.get()
continue
try: # deserialize
st = time.time()
block = t_block.to_block(db=self.chain.db)
elapsed = time.time() - st
log.debug('deserialized', elapsed='%.4fs' % elapsed,
gas_used=block.gas_used, gpsec=self.gpsec(block.gas_used, elapsed))
except processblock.InvalidTransaction as e:
log.warn('invalid transaction', block=t_block, error=e, FIXME='ban node')
self.block_queue.get()
continue
except VerificationFailed as e:
log.warn('verification failed', error=e, FIXME='ban node')
self.block_queue.get()
continue
if self.chain.add_block(block):
log.info('added', block=block, ts=time.time())
self.block_queue.get() # remove block from queue (we peeked only)
gevent.sleep(0.001)
finally:
self.add_blocks_lock = False
self.add_transaction_lock.release()
def gpsec(self, gas_spent=0, elapsed=0):
self.processed_gas += gas_spent
self.processed_elapsed += elapsed
return int(self.processed_gas / (0.001 + self.processed_elapsed))
def broadcast_newblock(self, block, chain_difficulty=None, origin=None):
if not chain_difficulty:
assert block.hash in self.chain
chain_difficulty = block.chain_difficulty()
assert isinstance(block, (eth_protocol.TransientBlock, Block))
if self.broadcast_filter.known(block.header.hash):
log.debug('already broadcasted block')
else:
log.debug('broadcasting newblock', origin=origin)
bcast = self.app.services.peermanager.broadcast
bcast(eth_protocol.ETHProtocol, 'newblock', args=(block, chain_difficulty),
exclude_peers=[origin.peer] if origin else [])
def broadcast_transaction(self, tx, origin=None):
assert isinstance(tx, Transaction)
if self.broadcast_filter.known(tx.hash):
log.debug('already broadcasted tx')
else:
log.debug('broadcasting tx', origin=origin)
bcast = self.app.services.peermanager.broadcast
bcast(eth_protocol.ETHProtocol, 'transactions', args=(tx,),
exclude_peers=[origin.peer] if origin else [])
# wire protocol receivers ###########
def on_wire_protocol_start(self, proto):
log.debug('on_wire_protocol_start', proto=proto)
assert isinstance(proto, self.wire_protocol)
# register callbacks
proto.receive_status_callbacks.append(self.on_receive_status)
proto.receive_transactions_callbacks.append(self.on_receive_transactions)
proto.receive_getblockhashes_callbacks.append(self.on_receive_getblockhashes)
proto.receive_blockhashes_callbacks.append(self.on_receive_blockhashes)
proto.receive_getblocks_callbacks.append(self.on_receive_getblocks)
proto.receive_blocks_callbacks.append(self.on_receive_blocks)
proto.receive_newblock_callbacks.append(self.on_receive_newblock)
# send status
head = self.chain.head
proto.send_status(chain_difficulty=head.chain_difficulty(), chain_head_hash=head.hash,
genesis_hash=self.chain.genesis.hash)
def on_wire_protocol_stop(self, proto):
assert isinstance(proto, self.wire_protocol)
log.debug('on_wire_protocol_stop', proto=proto)
def on_receive_status(self, proto, eth_version, network_id, chain_difficulty, chain_head_hash,
genesis_hash):
log.debug('status received', proto=proto, eth_version=eth_version)
assert eth_version == proto.version, (eth_version, proto.version)
if network_id != self.config['eth'].get('network_id', proto.network_id):
log.warn("invalid network id", remote_network_id=network_id,
expected_network_id=self.config['eth'].get('network_id', proto.network_id))
raise eth_protocol.ETHProtocolError('wrong network_id')
# check genesis
if genesis_hash != self.chain.genesis.hash:
log.warn("invalid genesis hash", remote_id=proto, genesis=genesis_hash.encode('hex'))
raise eth_protocol.ETHProtocolError('wrong genesis block')
# request chain
self.synchronizer.receive_status(proto, chain_head_hash, chain_difficulty)
# send transactions
transactions = self.chain.get_transactions()
if transactions:
log.debug("sending transactions", remote_id=proto)
proto.send_transactions(*transactions)
# transactions
def on_receive_transactions(self, proto, transactions):
"receives rlp.decoded serialized"
log.debug('remote_transactions_received', count=len(transactions), remote_id=proto)
for tx in transactions:
self.add_transaction(tx, origin=proto)
# blockhashes ###########
def on_receive_getblockhashes(self, proto, child_block_hash, count):
log.debug("handle_get_blockhashes", count=count, block_hash=encode_hex(child_block_hash))
max_hashes = min(count, self.wire_protocol.max_getblockhashes_count)
found = []
if child_block_hash not in self.chain:
log.debug("unknown block")
proto.send_blockhashes(*[])
return
last = child_block_hash
while len(found) < max_hashes:
try:
last = rlp.decode_lazy(self.chain.db.get(last))[0][0]
except KeyError:
# this can happen if we started a chain download, which did not complete
# should not happen if the hash is part of the canonical chain
log.warn('KeyError in getblockhashes', hash=last)
break
if last:
found.append(last)
else:
break
log.debug("sending: found block_hashes", count=len(found))
proto.send_blockhashes(*found)
def on_receive_blockhashes(self, proto, blockhashes):
if blockhashes:
log.debug("on_receive_blockhashes", count=len(blockhashes), remote_id=proto,
first=encode_hex(blockhashes[0]), last=encode_hex(blockhashes[-1]))
else:
log.debug("recv 0 remote block hashes, signifying genesis block")
self.synchronizer.receive_blockhashes(proto, blockhashes)
# blocks ################
def on_receive_getblocks(self, proto, blockhashes):
log.debug("on_receive_getblocks", count=len(blockhashes))
found = []
for bh in blockhashes[:self.wire_protocol.max_getblocks_count]:
try:
found.append(self.chain.db.get(bh))
except KeyError:
log.debug("unknown block requested", block_hash=encode_hex(bh))
if found:
log.debug("found", count=len(found))
proto.send_blocks(*found)
def on_receive_blocks(self, proto, transient_blocks):
blk_number = max(x.header.number for x in transient_blocks) if transient_blocks else 0
log.debug("recv blocks", count=len(transient_blocks), remote_id=proto,
highest_number=blk_number)
if transient_blocks:
self.synchronizer.receive_blocks(proto, transient_blocks)
def on_receive_newblock(self, proto, block, chain_difficulty):
log.debug("recv newblock", block=block, remote_id=proto)
self.synchronizer.receive_newblock(proto, block, chain_difficulty)
| |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'DataVisualizationResource'
db.create_table('data_displays_datavisualizationresource', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
('source_file', self.gf('django.db.models.fields.files.FileField')(max_length=100, blank=True)),
('url', self.gf('django.db.models.fields.CharField')(max_length=300, blank=True)),
))
db.send_create_signal('data_displays', ['DataVisualizationResource'])
# Adding model 'DataVisualization'
db.create_table('data_displays_datavisualization', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('data_displays', ['DataVisualization'])
# Adding model 'DataVisualizationPart'
db.create_table('data_displays_datavisualizationpart', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('visualization', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['data_displays.DataVisualization'], blank=True)),
('resource', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['data_displays.DataVisualizationResource'], blank=True)),
('load_order', self.gf('django.db.models.fields.IntegerField')(default=0, blank=True)),
))
db.send_create_signal('data_displays', ['DataVisualizationPart'])
# Adding model 'DataDisplayTemplate'
db.create_table('data_displays_datadisplaytemplate', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=300)),
('subtitle', self.gf('django.db.models.fields.CharField')(max_length=300, blank=True)),
('subsubtitle', self.gf('django.db.models.fields.CharField')(max_length=300, blank=True)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('data_displays', ['DataDisplayTemplate'])
# Adding M2M table for field visualizations on 'DataDisplayTemplate'
db.create_table('data_displays_datadisplaytemplate_visualizations', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('datadisplaytemplate', models.ForeignKey(orm['data_displays.datadisplaytemplate'], null=False)),
('datavisualization', models.ForeignKey(orm['data_displays.datavisualization'], null=False))
))
db.create_unique('data_displays_datadisplaytemplate_visualizations', ['datadisplaytemplate_id', 'datavisualization_id'])
# Adding M2M table for field levels on 'DataDisplayTemplate'
db.create_table('data_displays_datadisplaytemplate_levels', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('datadisplaytemplate', models.ForeignKey(orm['data_displays.datadisplaytemplate'], null=False)),
('geolevel', models.ForeignKey(orm['profiles.geolevel'], null=False))
))
db.create_unique('data_displays_datadisplaytemplate_levels', ['datadisplaytemplate_id', 'geolevel_id'])
# Adding M2M table for field records on 'DataDisplayTemplate'
db.create_table('data_displays_datadisplaytemplate_records', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('datadisplaytemplate', models.ForeignKey(orm['data_displays.datadisplaytemplate'], null=False)),
('georecord', models.ForeignKey(orm['profiles.georecord'], null=False))
))
db.create_unique('data_displays_datadisplaytemplate_records', ['datadisplaytemplate_id', 'georecord_id'])
# Adding M2M table for field domains on 'DataDisplayTemplate'
db.create_table('data_displays_datadisplaytemplate_domains', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('datadisplaytemplate', models.ForeignKey(orm['data_displays.datadisplaytemplate'], null=False)),
('datadomain', models.ForeignKey(orm['profiles.datadomain'], null=False))
))
db.create_unique('data_displays_datadisplaytemplate_domains', ['datadisplaytemplate_id', 'datadomain_id'])
# Adding M2M table for field indicators on 'DataDisplayTemplate'
db.create_table('data_displays_datadisplaytemplate_indicators', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('datadisplaytemplate', models.ForeignKey(orm['data_displays.datadisplaytemplate'], null=False)),
('indicator', models.ForeignKey(orm['profiles.indicator'], null=False))
))
db.create_unique('data_displays_datadisplaytemplate_indicators', ['datadisplaytemplate_id', 'indicator_id'])
# Adding model 'DataDisplay'
db.create_table('data_displays_datadisplay', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=300)),
('subtitle', self.gf('django.db.models.fields.CharField')(max_length=300, blank=True)),
('subsubtitle', self.gf('django.db.models.fields.CharField')(max_length=300, blank=True)),
('record', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['profiles.GeoRecord'], null=True, blank=True)),
('indicator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['profiles.Indicator'], null=True, blank=True)),
('time', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['profiles.Time'], null=True, blank=True)),
('image', self.gf('sorl.thumbnail.fields.ImageField')(max_length=100, blank=True)),
('template', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['data_displays.DataDisplayTemplate'])),
('html', self.gf('django.db.models.fields.TextField')(blank=True)),
('slug', self.gf('django.db.models.fields.SlugField')(db_index=True, unique=True, max_length=100, blank=True)),
))
db.send_create_signal('data_displays', ['DataDisplay'])
def backwards(self, orm):
# Deleting model 'DataVisualizationResource'
db.delete_table('data_displays_datavisualizationresource')
# Deleting model 'DataVisualization'
db.delete_table('data_displays_datavisualization')
# Deleting model 'DataVisualizationPart'
db.delete_table('data_displays_datavisualizationpart')
# Deleting model 'DataDisplayTemplate'
db.delete_table('data_displays_datadisplaytemplate')
# Removing M2M table for field visualizations on 'DataDisplayTemplate'
db.delete_table('data_displays_datadisplaytemplate_visualizations')
# Removing M2M table for field levels on 'DataDisplayTemplate'
db.delete_table('data_displays_datadisplaytemplate_levels')
# Removing M2M table for field records on 'DataDisplayTemplate'
db.delete_table('data_displays_datadisplaytemplate_records')
# Removing M2M table for field domains on 'DataDisplayTemplate'
db.delete_table('data_displays_datadisplaytemplate_domains')
# Removing M2M table for field indicators on 'DataDisplayTemplate'
db.delete_table('data_displays_datadisplaytemplate_indicators')
# Deleting model 'DataDisplay'
db.delete_table('data_displays_datadisplay')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'data_displays.datadisplay': {
'Meta': {'object_name': 'DataDisplay'},
'html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']", 'null': 'True', 'blank': 'True'}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoRecord']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '100', 'blank': 'True'}),
'subsubtitle': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'subtitle': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data_displays.DataDisplayTemplate']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Time']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'data_displays.datadisplaytemplate': {
'Meta': {'object_name': 'DataDisplayTemplate'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'domains': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.DataDomain']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.Indicator']", 'symmetrical': 'False', 'blank': 'True'}),
'levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.GeoLevel']", 'symmetrical': 'False', 'blank': 'True'}),
'records': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.GeoRecord']", 'symmetrical': 'False', 'blank': 'True'}),
'subsubtitle': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'subtitle': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'visualizations': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['data_displays.DataVisualization']", 'symmetrical': 'False', 'blank': 'True'})
},
'data_displays.datavisualization': {
'Meta': {'object_name': 'DataVisualization'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'data_displays.datavisualizationpart': {
'Meta': {'object_name': 'DataVisualizationPart'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'load_order': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'resource': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data_displays.DataVisualizationResource']", 'blank': 'True'}),
'visualization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data_displays.DataVisualization']", 'blank': 'True'})
},
'data_displays.datavisualizationresource': {
'Meta': {'object_name': 'DataVisualizationResource'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'})
},
'profiles.datadomain': {
'Meta': {'object_name': 'DataDomain'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.Indicator']", 'through': "orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}),
'weight': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'profiles.datasource': {
'Meta': {'object_name': 'DataSource'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'implementation': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
'profiles.geolevel': {
'Meta': {'object_name': 'GeoLevel'},
'data_sources': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.DataSource']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoLevel']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'})
},
'profiles.georecord': {
'Meta': {'unique_together': "(('slug', 'level'), ('level', 'geo_id', 'custom_name', 'owner'))", 'object_name': 'GeoRecord'},
'components': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'components_rel_+'", 'blank': 'True', 'to': "orm['profiles.GeoRecord']"}),
'custom_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'geo_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'geom': ('django.contrib.gis.db.models.fields.GeometryField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoLevel']"}),
'mappings': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'mappings_rel_+'", 'blank': 'True', 'to': "orm['profiles.GeoRecord']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoRecord']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '100', 'blank': 'True'})
},
'profiles.indicator': {
'Meta': {'object_name': 'Indicator'},
'data_domains': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.DataDomain']", 'through': "orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'data_type': ('django.db.models.fields.CharField', [], {'default': "'COUNT'", 'max_length': '10'}),
'display_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'display_percent': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.GeoLevel']", 'symmetrical': 'False'}),
'limitations': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'long_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'purpose': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'routine_use': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'short_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'universe': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'})
},
'profiles.indicatordomain': {
'Meta': {'object_name': 'IndicatorDomain'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.DataDomain']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"})
},
'profiles.time': {
'Meta': {'object_name': 'Time'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'sort': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '1'})
}
}
complete_apps = ['data_displays']
| |
#!/usr/bin/env python
""" A unittest script for the Study module. """
import unittest
import json
from cutlass import Study
from cutlass import MIXS, MixsException
from CutlassTestConfig import CutlassTestConfig
from CutlassTestUtil import CutlassTestUtil
# pylint: disable=W0703, C1801
class StudyTest(unittest.TestCase):
""" A unit test class for the Study module. """
session = None
util = None
@classmethod
def setUpClass(cls):
""" Setup for the unittest. """
# Establish the session for each test method
cls.session = CutlassTestConfig.get_session()
cls.util = CutlassTestUtil()
def testImport(self):
""" Test the import of the Study module. """
success = False
try:
from cutlass import Study
success = True
except Exception:
pass
self.failUnless(success)
self.failIf(Study is None)
def testSessionCreate(self):
""" Test the creation of a Study via the session. """
success = False
study = None
try:
study = self.session.create_study()
success = True
except Exception:
pass
self.failUnless(success)
self.failIf(study is None)
def testName(self):
""" Test the name property. """
study = self.session.create_study()
self.util.stringTypeTest(self, study, "name")
self.util.stringPropertyTest(self, study, "name")
def testDescription(self):
""" Test the description property. """
study = self.session.create_study()
self.util.stringTypeTest(self, study, "description")
self.util.stringPropertyTest(self, study, "description")
def testIllegalSubtype(self):
""" Test the subtype property with an illegal value. """
study = self.session.create_study()
with self.assertRaises(Exception):
study.subtype = "random"
def testLegalSubtype(self):
""" Test the subtype property with a legal value. """
study = self.session.create_study()
success = False
subtype = "prediabetes"
try:
study.subtype = subtype
success = True
except Exception:
pass
self.assertTrue(success, "Able to use the subtype setter")
self.assertEqual(study.subtype, subtype,
"Property getter for 'subtype' works.")
def testIllegalCenter(self):
""" Test the center property with an illegal value. """
study = self.session.create_study()
with self.assertRaises(Exception):
study.center = "random"
def testLegalCenter(self):
""" Test the center property with a legal value. """
study = self.session.create_study()
success = False
center = "Broad Institute"
try:
study.center = center
success = True
except Exception:
pass
self.assertTrue(success, "Able to use the center setter")
self.assertEqual(study.center, center,
"Property getter for 'center' works.")
def testSRPID(self):
""" Test the srp_id property. """
study = self.session.create_study()
self.util.stringTypeTest(self, study, "srp_id")
self.util.stringPropertyTest(self, study, "srp_id")
def testContact(self):
""" Test the contact property. """
study = self.session.create_study()
self.util.stringTypeTest(self, study, "contact")
self.util.stringPropertyTest(self, study, "contact")
def testToJson(self):
""" Test the to_json() method. """
study = self.session.create_study()
success = False
name = "Tested name"
study.name = name
study_json = None
try:
study_json = study.to_json()
success = True
except Exception:
pass
self.assertTrue(success, "Able to use 'to_json'.")
self.assertTrue(study_json is not None, "to_json() returned data.")
parse_success = False
try:
study_data = json.loads(study_json)
parse_success = True
except Exception:
pass
self.assertTrue(parse_success, "to_json() did not throw an exception.")
self.assertTrue(study_data is not None,
"to_json() returned parsable JSON.")
self.assertTrue('meta' in study_data, "JSON has 'meta' key in it.")
self.assertEqual(study_data['meta']['name'],
name, "'name' in JSON had expected value.")
def testId(self):
""" Test the id property. """
study = self.session.create_study()
self.assertTrue(study.id is None,
"New template study has no ID.")
with self.assertRaises(AttributeError):
study.id = "test"
def testVersion(self):
""" Test the version property. """
study = self.session.create_study()
self.assertTrue(study.version is None,
"New template study has no version.")
with self.assertRaises(ValueError):
study.version = "test"
def testTags(self):
""" Test the tags property. """
study = self.session.create_study()
tags = study.tags
self.assertTrue(type(tags) == list, "Study tags() method returns a list.")
self.assertEqual(len(tags), 0, "Template study tags list is empty.")
new_tags = ["tagA", "tagB"]
study.tags = new_tags
self.assertEqual(study.tags, new_tags, "Can set tags on a study.")
json_str = study.to_json()
doc = json.loads(json_str)
self.assertTrue('tags' in doc['meta'],
"JSON representation has 'tags' field in 'meta'.")
self.assertEqual(doc['meta']['tags'], new_tags,
"JSON representation had correct tags after setter.")
def testAddTag(self):
""" Test the add_tag() method. """
study = self.session.create_study()
study.add_tag("test")
self.assertEqual(study.tags, ["test"], "Can add a tag to a study.")
json_str = study.to_json()
doc = json.loads(json_str)
self.assertEqual(doc['meta']['tags'], ["test"],
"JSON representation had correct tags after add_tag().")
# Try adding the same tag yet again, shouldn't get a duplicate
with self.assertRaises(ValueError):
study.add_tag("test")
json_str = study.to_json()
doc2 = json.loads(json_str)
self.assertEqual(doc2['meta']['tags'], ["test"],
"JSON document did not end up with duplicate tags.")
def testRequiredFields(self):
""" Test the required_fields() static method. """
required = Study.required_fields()
self.assertEqual(type(required), tuple,
"required_fields() returns a tuple.")
self.assertTrue(len(required) > 0,
"required_field() did not return empty value.")
def testLoadSaveDeleteStudy(self):
""" Extensive test for the load, edit, save and delete functions. """
# Attempt to save the study at all points before and after
# adding the required fields
study = self.session.create_study()
test_name = "Test name"
test_description = "Test description"
test_contact = "Test contacts"
test_links = {"part_of": []}
test_center = "Jackson Laboratory"
test_tag = "test"
test_subtype = "prediabetes"
self.assertFalse(study.save(),
"Study not saved successfully, no required fields")
study.name = test_name
study.description = test_description
self.assertFalse(study.save(), "Study not saved successfully")
study.contact = test_contact
study.subtype = test_subtype
study.links = test_links
self.assertFalse(study.save(), "Study not saved successfully")
study.center = test_center
study.add_tag(test_tag)
# Make sure study does not delete if it does not exist
with self.assertRaises(Exception):
study.delete()
self.assertTrue(study.save() is True,
"Study was not saved successfully")
# Load the study that was just saved from the OSDF instance
study_loaded = self.session.create_study()
study_loaded = study_loaded.load(study.id)
# Check all fields were saved and loaded successfully
self.assertEqual(study.name, study_loaded.name,
"Study name not saved & loaded successfully")
self.assertEqual(study.tags[0], study_loaded.tags[0],
"Study tags not saved & loaded successfully")
self.assertEqual(study.center, study_loaded.center,
"Study MIXS not saved & loaded successfully")
# Study is deleted successfully
self.assertTrue(study.delete(), "Study was not deleted successfully")
# The study of the initial ID should not load successfully
load_test = self.session.create_study()
with self.assertRaises(Exception):
load_test = load_test.load(study.id)
if __name__ == '__main__':
unittest.main()
| |
# ------------------------------------------------------------------------------
# Copyright (c) 2010-2013, EVEthing team
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
# ------------------------------------------------------------------------------
import datetime
import time
from decimal import Decimal
from .apitask import APITask
import json
from thing.models import Alliance, Character, CharacterApiScope, Contract, ContractItem, ContractSeeding, Corporation, Event, Item, Station, APIKey, UserProfile
from django.db.models import Q
from multiprocessing import Pool, Value, Array
class EsiContracts(APITask):
name = 'thing.esi_contracts'
corp_contract_url = 'https://esi.evetech.net/latest/corporations/%d/contracts/?datasource=tranquility&page=%s'
corp_contract_item_url = 'https://esi.evetech.net/latest/corporations/%d/contracts/%d/items/?datasource=tranquility'
char_contract_url = 'https://esi.evetech.net/latest/characters/%d/contracts/?datasource=tranquility&page=%s'
char_contract_item_url = 'https://esi.evetech.net/latest/characters/%d/contracts/%d/items/?datasource=tranquility'
def run(self):
self.init()
char_contract_scopes = CharacterApiScope.objects.filter(
scope__in=['esi-contracts.read_character_contracts.v1']
)
for scope in char_contract_scopes:
char = scope.character
success = self.import_contracts(char, False)
contract_scopes = CharacterApiScope.objects.filter(
scope__in=['esi-contracts.read_corporation_contracts.v1']
)
seen_corps = set()
for scope in contract_scopes:
char = scope.character
if 'corporation' in scope.scope:
if 'Contract_Manager' in char.get_apiroles():
if char.corporation_id not in seen_corps\
and char.corporation_id is not None:
try:
success = self.import_contracts(char, True)
except:
success = False
if success:
seen_corps.add(char.corporation_id)
def import_contracts(self, character, for_corp):
char_id = character.id
corp_id = character.corporation_id
if corp_id is None and for_corp:
return False
now = datetime.datetime.now()
if for_corp:
c_filter = Contract.objects.filter(Q(issuer_corp_id=corp_id) | Q(assignee_id=corp_id) | Q(acceptor_id=corp_id) | Q(assignee_id=character.corporation.alliance_id))
else:
c_filter = Contract.objects.filter(Q(issuer_char_id=char_id) | Q(assignee_id=char_id) | Q(acceptor_id=char_id))
contracts = []
page = 1
ttl_pages = None
if for_corp:
import_url = self.corp_contract_url
else:
import_url = self.char_contract_url
success, data, headers = self.fetch_esi_url(import_url % (corp_id if for_corp else char_id, page), character, headers_to_return=['x-pages'])
if not success:
print('Import failed for %s: %s' % (character.name, data))
return False
if 'x-pages' in headers:
ttl_pages = int(headers['x-pages'])
else:
ttl_pages = 1
if ttl_pages > 1:
urls = [import_url % (corp_id if for_corp else char_id, i) for i in range(2, ttl_pages+1)]
all_contract_data = self.fetch_batch_esi_urls(urls, character, batch_size=20)
else:
all_contract_data = dict()
all_contract_data[''] = (success, data)
for url, contract_data in all_contract_data.items():
success, data = contract_data
if not success:
# Failed to retrieve contract information, back out
print('Import failed: %s' % data)
return False
r_contracts = json.loads(data)
if 'response' in contracts:
r_contracts = contracts['response']
if len(r_contracts) == 0:
break
contracts.extend(r_contracts)
# First we need to get all of the acceptor and assignee IDs
contract_ids = set()
station_ids = set()
lookup_ids = set()
lookup_corp_ids = set()
contract_rows = []
for row in contracts:
contract_ids.add(int(row['contract_id']))
if 'start_location_id' in row:
station_ids.add(int(row['start_location_id']))
if 'end_location_id' in row:
station_ids.add(int(row['end_location_id']))
lookup_ids.add(int(row['issuer_id']))
lookup_corp_ids.add(int(row['issuer_corporation_id']))
if row['assignee_id'] != '0':
lookup_ids.add(int(row['assignee_id']))
if row['acceptor_id'] != '0':
lookup_ids.add(int(row['acceptor_id']))
contract_rows.append(row)
# Fetch bulk data
char_map = Character.objects.in_bulk(lookup_ids)
corp_map = Corporation.objects.in_bulk(lookup_ids | lookup_corp_ids)
alliance_map = Alliance.objects.in_bulk(lookup_ids)
station_map = Station.objects.in_bulk(station_ids)
# Add missing IDs as *UNKNOWN* Characters for now
new = []
for new_id in lookup_ids.difference(char_map, corp_map, alliance_map, lookup_corp_ids):
if new_id in char_map:
continue
char = Character(
id=new_id,
name="*UNKNOWN*",
)
new.append(char)
char_map[new_id] = char
if new:
Character.objects.bulk_create(new)
# Add missing Corporations too
new = []
for new_id in lookup_corp_ids.difference(corp_map):
if new_id in corp_map:
continue
corp = Corporation(
id=new_id,
name="*UNKNOWN*",
)
new.append(corp)
corp_map[new_id] = corp
if new:
Corporation.objects.bulk_create(new)
# Fetch station data
new = []
for new_id in station_ids.difference(station_map):
if new_id in station_map:
continue
station = Station(
id=new_id,
name="[Unknown Station: %d]" % new_id,
short_name="[Unknown Station: %d]" % new_id,
is_unknown=True,
)
new.append(station)
station_map[new_id] = station
if new:
Station.objects.bulk_create(new)
# Fetch all existing contracts
c_map = {}
for contract in c_filter.filter(contract_id__in=contract_ids):
c_map[contract.contract_id] = contract
# Finally, after all of that other bullshit, we can actually deal with
# our goddamn contract rows
new_contracts = []
new_events = []
for row in contract_rows:
contract_id = int(row['contract_id'])
issuer_char = char_map.get(int(row['issuer_id']))
if issuer_char is None:
self.log_warn('Invalid issuer_id %s', row['issuer_id'])
continue
issuer_corp = corp_map.get(int(row['issuer_corporation_id']))
if issuer_corp is None:
self.log_warn('Invalid issuer_corporation_id %s', row['issuer_corporation_id'])
continue
start_station = station_map.get(int(row['start_location_id']))
if start_station is None:
self.log_warn('Invalid start_location_id %s', row['start_location_id'])
continue
end_station = station_map.get(int(row['end_location_id']))
if end_station is None:
self.log_warn('Invalid end_location_id %s', row['end_location_id'])
continue
assignee_id = int(row['assignee_id'])
acceptor_id = int(row['acceptor_id'])
dateIssued = self.parse_api_date(row['date_issued'], True)
dateExpired = self.parse_api_date(row['date_expired'], True)
if 'date_accepted' in row:
dateAccepted = self.parse_api_date(row['date_accepted'], True)
else:
dateAccepted = None
if 'date_completed' in row:
dateCompleted = self.parse_api_date(row['date_completed'], True)
else:
dateCompleted = None
type = row['type']
'''
Contract Types:
"unknown",
"item_exchange",
"auction",
"courier",
"loan"
Contract Statuses:
"outstanding",
"in_progress",
"finished_issuer",
"finished_contractor",
"finished",
"cancelled",
"rejected",
"failed",
"deleted",
"reversed"
Availability:
"public",
"personal",
"corporation",
"alliance"
'''
contract = c_map.get(contract_id, None)
# Contract exists, maybe update stuff
if contract is not None:
if contract.status != row['status']:
text = "Contract %s changed status from '%s' to '%s'" % (
contract, contract.status, row['status'])
new_events.append(Event(
user_id=1,
issued=now,
text=text,
))
contract.status = row['status']
contract.date_accepted = dateAccepted
contract.date_completed = dateCompleted
contract.acceptor_id = acceptor_id
contract.save()
# Contract does not exist, make a new one
else:
contract = Contract(
contract_id=contract_id,
issuer_char=issuer_char,
issuer_corp=issuer_corp,
assignee_id=assignee_id,
acceptor_id=acceptor_id,
start_station=station_map[int(row['start_location_id'])],
end_station=station_map[int(row['end_location_id'])],
type=type,
status=row['status'],
title=row['title'],
for_corp=row['for_corporation'],
public=(row['availability'].lower() == 'public'),
date_issued=dateIssued,
date_expired=dateExpired,
date_accepted=dateAccepted,
date_completed=dateCompleted,
num_days=int(row['days_to_complete']),
price=Decimal(row['price']),
reward=Decimal(row['reward']),
collateral=Decimal(row['collateral'] if 'collateral' in row else 0),
buyout=Decimal(row['buyout'] if 'buyout' in row else 0),
volume=Decimal(row['volume']),
availability=row['availability']
)
new_contracts.append(contract)
# If this contract is a new contract in a non-completed state, log an event
if contract.status in ('outstanding', 'in_progress'):
# if assignee_id in user_chars or assignee_id in user_corps:
assignee = char_map.get(assignee_id, corp_map.get(assignee_id, alliance_map.get(assignee_id)))
if assignee is not None:
text = "Contract %s was created from '%s' to '%s' with status '%s'" % (
contract, contract.get_issuer_name(), assignee.name, contract.status)
new_events.append(Event(
user_id=1,
issued=now,
text=text,
))
# And save the damn things
try:
Contract.objects.bulk_create(new_contracts)
except:
import sys
print("Unexpected error:", sys.exc_info()[0])
print(character.name)
return False
Event.objects.bulk_create(new_events)
# Force the queryset to update
c_filter.update()
# # Now go fetch items for each contract
contracts_to_populate = c_filter.filter(retrieved_items=False).exclude(type='Courier').exclude(status='deleted')
if len(contracts_to_populate) > 100:
print('Populating Many Contracts (%d!!)! This will take a while!!' % len(contracts_to_populate))
ttl_count = 0
seen_contracts = []
seen_records = set()
new = []
item_url = self.corp_contract_item_url if for_corp else self.char_contract_item_url
for i in range(0, len(contracts_to_populate), 100):
urls = [item_url % (corp_id if for_corp else char_id, c.contract_id) for c in contracts_to_populate[i:i+100]]
cids = dict((item_url % (corp_id if for_corp else char_id, c.contract_id), c.id) for c in contracts_to_populate[i:i+100])
contract_item_data = self.fetch_batch_esi_urls(urls, character, headers_to_return=['status'], batch_size=1)
for url, item_data in contract_item_data.items():
success, data, headers = item_data
cid = cids[url]
if not success:
if 'status' in headers and headers['status'] == 404:
seen_contracts.append(cid)
ttl_count += 1
else:
try:
items_response = json.loads(data)
except:
continue
contract_items = []
for row in items_response:
contract_item = ContractItem(
contract_id=cid,
record_id=row['record_id'],
item_id=row['type_id'],
quantity=int(row['quantity']),
raw_quantity=row.get('raw_quantity', 0),
singleton=row['is_singleton'],
included=row['is_included'],
)
if row['record_id'] in seen_records:
continue
seen_records.add(row['record_id'])
contract_items.append(contract_item)
try:
if contract_item.item is None:
print('Item not found: %d', row['type_id'])
except:
self.log_error('Item not found: %d', row['type_id'])
new_item = Item(
id=row['type_id'],
name='**UNKNOWN**',
item_group_id=20, # Mineral, just
portion_size=1,
base_price=1,
)
new_item.save()
new = new + contract_items
ttl_count += 1
seen_contracts.append(cid)
if len(seen_contracts) >= 100:
print('Flushing %d-%d/%d contracts to DB...' % (ttl_count-len(seen_contracts), ttl_count, len(contracts_to_populate)))
c_filter.filter(id__in=seen_contracts).update(retrieved_items=True)
# Ensure we remove duplicate records
ContractItem.objects.filter(contract_id__in=seen_contracts).delete();
ContractItem.objects.filter(record_id__in=seen_records).delete()
ContractItem.objects.bulk_create(new)
new = []
seen_contracts = []
seen_records = set()
if new:
ContractItem.objects.filter(contract_id__in=seen_contracts).delete();
ContractItem.objects.filter(record_id__in=seen_records).delete()
ContractItem.objects.bulk_create(new)
c_filter.filter(id__in=seen_contracts).update(retrieved_items=True)
return True
| |
##########################################
# File: util.py #
# Copyright Richard Stebbing 2014. #
# Distributed under the MIT License. #
# (See accompany file LICENSE or copy at #
# http://opensource.org/licenses/MIT) #
##########################################
# Imports
import re
# raise_if_not_shape
def raise_if_not_shape(name, A, shape):
"""Raise a `ValueError` if the np.ndarray `A` does not have dimensions
`shape`."""
if A.shape != shape:
raise ValueError('{}.shape != {}'.format(name, shape))
# previous_float
PARSE_FLOAT_RE = re.compile(r'([+-]*)0x1\.([\da-f]{13})p(.*)')
def previous_float(x):
"""Return the next closest float (towards zero)."""
s, f, e = PARSE_FLOAT_RE.match(float(x).hex().lower()).groups()
f, e = int(f, 16), int(e)
if f > 0:
f -= 1
else:
f = int('f' * 13, 16)
e -= 1
return float.fromhex('{}0x1.{:013x}p{:d}'.format(s, f, e))
##############################################################################
"""
Author(s): Wei Chen (wchen459@umd.edu)
"""
import os
import sys
import numpy as np
from matplotlib import pyplot as plt
from sklearn.decomposition import PCA
from sklearn.utils.graph import graph_shortest_path
from sklearn.neighbors import kneighbors_graph
from scipy.sparse.csgraph import connected_components
from sklearn.manifold import Isomap
from sklearn.preprocessing import scale
from sklearn.metrics import pairwise_distances
from sklearn.neighbors import NearestNeighbors
from scipy.stats import pearsonr
from sklearn.externals import joblib
import ConfigParser
def create_dir(path):
if os.path.isdir(path):
pass
else:
os.mkdir(path)
def reduce_dim(data_h, plot=False, save=False, c=None):
if plot:
# Scree plot
plt.rc("font", size=12)
pca = PCA()
pca.fit(data_h)
plt.plot(range(1,data_h.shape[1]+1), pca.explained_variance_ratio_)
plt.xlabel('Dimensionality')
plt.ylabel('Explained variance ratio')
plt.title('Scree Plot')
plt.show()
plt.close()
# Dimensionality reduction
pca = PCA(n_components=.995) # 99.5% variance attained
data_l = pca.fit_transform(data_h)
print 'Reduced dimensionality: %d' % data_l.shape[1]
if save:
save_model(pca, 'xpca', c)
return data_l, pca.inverse_transform
def sort_eigen(M):
''' Sort the eigenvalues and eigenvectors in DESCENT order '''
w, v = np.linalg.eigh(M)
idx = w.argsort()[::-1]
w = w[idx]
v = v[:,idx]
return w, v
def find_gap(metrics, threshold=.99, method='difference', multiple=False, verbose=0):
''' Find the largest gap of any NONNEGATIVE metrics (which is in DESCENT order)
The returned index is before the gap
threshold: needs to be specified only if method is 'percentage'
multiple: whether to find multiple gaps
'''
if method == 'percentage':
s = np.sum(metrics)
for i in range(len(metrics)):
if np.sum(metrics[:i+1])/s > threshold:
break
if verbose == 2:
plt.figure()
plt.plot(metrics, 'o-')
plt.title('metrics')
plt.show()
return i
else:
if method == 'difference':
m0 = np.array(metrics[:-1])
m1 = np.array(metrics[1:])
d = m0-m1
elif method == 'divide':
metrics = np.clip(metrics, np.finfo(float).eps, np.inf)
m0 = np.array(metrics[:-1])
m1 = np.array(metrics[1:])
d = m0/m1
else:
print 'No method called %s!' % method
sys.exit(0)
if multiple:
# dmin = np.min(d)
# dmax = np.max(d)
# t = dmin + (dmax-dmin)/10 # set a threshold
# n_gap = sum(d > t)
# idx = d.argsort()[::-1][:n_gap]
# arggap = idx
tol = 1e-4
arggap = []
if d[0] > tol:
arggap.append(0)
for i in range(len(d)-1):
if d[i+1] > d[i]:
arggap.append(i+1)
arggap = np.array(arggap)
else:
arggap = np.argmax(d)
if verbose == 2:
plt.figure()
plt.subplot(211)
plt.plot(metrics, 'o')
plt.title('metrics')
plt.subplot(212)
plt.plot(d, 'o')
# plt.plot([0, len(d)], [t, t], 'g--')
plt.title('gaps')
plt.show()
gap = d[arggap]
return arggap, gap
def create_graph(X, n_neighbors, include_self=False, verbose=0):
kng = kneighbors_graph(X, n_neighbors, mode='distance', include_self=include_self)
nb_graph = graph_shortest_path(kng, directed=False)
if verbose:
# Visualize nearest neighbor graph
neigh = NearestNeighbors().fit(X)
nbrs = neigh.kneighbors(n_neighbors=n_neighbors, return_distance=False)
visualize_graph(X, nbrs)
return nb_graph
def get_geo_dist(X, K='auto', verbose=0):
m = X.shape[0]
if K == 'auto':
# Choose the smallest k that gives a fully connected graph
for k in range(2, m):
G = create_graph(X, k, verbose=verbose)
if connected_components(G, directed=False, return_labels=False) == 1:
break;
return G, k
else:
return create_graph(X, K, verbose=verbose)
def get_k_range(X, verbose=0):
N = X.shape[0]
# Select k_min
for k in range(1, N):
G = create_graph(X, k, include_self=False, verbose=verbose)
if connected_components(G,directed=False,return_labels=False) == 1:
break;
k_min = k
# Select k_max
for k in range(k_min, N):
kng = kneighbors_graph(X, k, include_self=False).toarray()
A = np.logical_or(kng, kng.T) # convert to undirrected graph
P = np.sum(A)/2
if 2*P/float(N) > k+2:
break;
k_max = k-1#min(k_min+10, N)
if verbose == 2:
print 'k_range: [%d, %d]' % (k_min, k_max)
if k_max < k_min:
print 'No suitable neighborhood size!'
return k_min, k_max
def get_candidate(X, dim, k_min, k_max, verbose=0):
errs = []
k_candidates = []
for k in range(k_min, k_max+1):
isomap = Isomap(n_neighbors=k, n_components=dim).fit(X)
rec_err = isomap.reconstruction_error()
errs.append(rec_err)
i = k - k_min
if i > 1 and errs[i-1] < errs[i-2] and errs[i-1] < errs[i]:
k_candidates.append(k-1)
if len(k_candidates) == 0:
k_candidates.append(k)
if verbose == 2:
print 'k_candidates: ', k_candidates
plt.figure()
plt.rc("font", size=12)
plt.plot(range(k_min, k_max+1), errs, '-o')
plt.xlabel('Neighborhood size')
plt.ylabel('Reconstruction error')
plt.title('Select candidates of neighborhood size')
plt.show()
return k_candidates
def pick_k(X, dim, k_min=None, k_max=None, verbose=0):
''' Pick optimal neighborhood size for isomap algothm
Reference:
Samko, O., Marshall, A. D., & Rosin, P. L. (2006). Selection of the optimal parameter
value for the Isomap algorithm. Pattern Recognition Letters, 27(9), 968-979.
'''
if k_min is None or k_max is None:
k_min, k_max = get_k_range(X, verbose=verbose)
ccs = []
k_candidates = range(k_min, k_max+1)#get_candidate(X, dim, k_min, k_max, verbose=verbose)
for k in k_candidates:
isomap = Isomap(n_neighbors=k, n_components=dim).fit(X)
F = isomap.fit_transform(X)
distF = pairwise_distances(F)
distX = create_graph(X, k, verbose=verbose)
cc = 1-pearsonr(distX.flatten(), distF.flatten())[0]**2
ccs.append(cc)
k_opt = k_candidates[np.argmin(ccs)]
if verbose == 2:
print 'k_opt: ', k_opt
plt.figure()
plt.rc("font", size=12)
plt.plot(k_candidates, ccs, '-o')
plt.xlabel('Neighborhood size')
plt.ylabel('Residual variance')
plt.title('Select optimal neighborhood size')
plt.show()
return k_opt
def estimate_dim(data, verbose=0):
''' Estimate intrinsic dimensionality of data
data: input data
Reference:
"Samko, O., Marshall, A. D., & Rosin, P. L. (2006). Selection of the optimal parameter
value for the Isomap algorithm. Pattern Recognition Letters, 27(9), 968-979."
'''
# Standardize by center to the mean and component wise scale to unit variance
data = scale(data)
# The reconstruction error will decrease as n_components is increased until n_components == intr_dim
errs = []
found = False
k_min, k_max = get_k_range(data, verbose=verbose)
for dim in range(1, data.shape[1]+1):
k_opt = pick_k(data, dim, k_min, k_max, verbose=verbose)
isomap = Isomap(n_neighbors=k_opt, n_components=dim).fit(data)
err = isomap.reconstruction_error()
#print(err)
errs.append(err)
if dim > 2 and errs[dim-2]-errs[dim-1] < .5 * (errs[dim-3]-errs[dim-2]):
intr_dim = dim-1
found = True
break
if not found:
intr_dim = 1
# intr_dim = find_gap(errs, method='difference', verbose=verbose)[0] + 1
# intr_dim = find_gap(errs, method='percentage', threshold=.9, verbose=verbose) + 1
if verbose == 2:
plt.figure()
plt.rc("font", size=12)
plt.plot(range(1,dim+1), errs, '-o')
plt.xlabel('Dimensionality')
plt.ylabel('Reconstruction error')
plt.title('Select intrinsic dimension')
plt.show()
return intr_dim
def get_singular_ratio(X_nbr, d):
x_mean = np.mean(X_nbr, axis=1).reshape(-1,1)
s = np.linalg.svd(X_nbr-x_mean, compute_uv=0)
r = (np.sum(s[d:]**2.)/np.sum(s[:d]**2.))**.5
return r
def select_neighborhood(X, dims, k_range=None, get_full_ind=False, verbose=0):
''' Inspired by the Neighborhood Contraction and Neighborhood Expansion algorithms
The selected neighbors for each sample point should reflect the local geometric structure of the manifold
Reference:
"Zhang, Z., Wang, J., & Zha, H. (2012). Adaptive manifold learning. IEEE Transactions
on Pattern Analysis and Machine Intelligence, 34(2), 253-265."
'''
print 'Selecting neighborhood ... '
m = X.shape[0]
if type(dims) == int:
dims = [dims] * m
if k_range is None:
k_min, k_max = get_k_range(X)
else:
k_min, k_max = k_range
# G = get_geo_dist(X, verbose=verbose)[0] # geodesic distances
# ind = np.argsort(G)[:,:k_max+1]
neigh = NearestNeighbors().fit(X)
ind = neigh.kneighbors(n_neighbors=k_max, return_distance=False)
ind = np.concatenate((np.arange(m).reshape(-1,1), ind), axis=1)
nbrs = []
# Choose eta
k0 = k_max
r0s =[]
for j in range(m):
X_nbr0 = X[ind[j,:k0]].T
r0 = get_singular_ratio(X_nbr0, dims[j])
r0s.append(r0)
r0s.sort(reverse=True)
j0 = find_gap(r0s, method='divide')[0]
eta = (r0s[j0]+r0s[j0+1])/2
# eta = 0.02
if verbose:
print 'eta = %f' % eta
for i in range(m):
''' Neighborhood Contraction '''
rs = []
for k in range(k_max, k_min-1, -1):
X_nbr = X[ind[i,:k]].T
r = get_singular_ratio(X_nbr, dims[i])
rs.append(r)
if r < eta:
ki = k
break
if k == k_min:
ki = k_max-np.argmin(rs)
nbrs.append(ind[i,:ki])
''' Neighborhood Expansion '''
pca = PCA(n_components=dims[i]).fit(X[nbrs[i]])
nbr_out = ind[i, ki:] # neighbors of x_i outside the neighborhood set by Neighborhood Contraction
for j in nbr_out:
theta = pca.transform(X[j].reshape(1,-1))
err = np.linalg.norm(pca.inverse_transform(theta) - X[j]) # reconstruction error
if err < eta * np.linalg.norm(theta):
nbrs[i] = np.append(nbrs[i], [j])
# print ki, len(nbrs[i])
# print max([len(nbrs[i]) for i in range(m)])
if verbose:
# Visualize nearest neighbor graph
visualize_graph(X, nbrs)
# Visualize neighborhood selection
if X.shape[1] > 3:
pca = PCA(n_components=3)
F = pca.fit_transform(X)
else:
F = np.zeros((X.shape[0], 3))
F[:,:X.shape[1]] = X
fig3d = plt.figure()
ax3d = fig3d.add_subplot(111, projection = '3d')
# Create cubic bounding box to simulate equal aspect ratio
max_range = np.array([F[:,0].max()-F[:,0].min(), F[:,1].max()-F[:,1].min(), F[:,2].max()-F[:,2].min()]).max()
Xb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][0].flatten() + 0.5*(F[:,0].max()+F[:,0].min())
Yb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][1].flatten() + 0.5*(F[:,1].max()+F[:,1].min())
Zb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][2].flatten() + 0.5*(F[:,2].max()+F[:,2].min())
ax3d.scatter(Xb, Yb, Zb, c='white', alpha=0)
# Plot point sets in 3D
plot_samples = [0, 1]
nbr_indices = []
for i in plot_samples:
nbr_indices = list(set(nbr_indices) | set(nbrs[i]))
F_ = np.delete(F, nbr_indices, axis=0)
ax3d.scatter(F_[:,0], F_[:,1], F_[:,2], c='white')
colors = ['b', 'g', 'y', 'r', 'c', 'm', 'y', 'k']
from itertools import cycle
colorcycler = cycle(colors)
for i in plot_samples:
color = next(colorcycler)
ax3d.scatter(F[nbrs[i][1:],0], F[nbrs[i][1:],1], F[nbrs[i][1:],2], marker='*', c=color, s=100)
ax3d.scatter(F[i,0], F[i,1], F[i,2], marker='x', c=color, s=100)
plt.show()
if get_full_ind:
return nbrs, ind
else:
return nbrs
def visualize_graph(X, nbrs):
# Reduce dimensionality
if X.shape[1] > 3:
pca = PCA(n_components=3)
F = pca.fit_transform(X)
else:
F = np.zeros((X.shape[0], 3))
F[:,:X.shape[1]] = X
m = F.shape[0]
fig3d = plt.figure()
ax3d = fig3d.add_subplot(111, projection = '3d')
# Create cubic bounding box to simulate equal aspect ratio
max_range = np.array([F[:,0].max()-F[:,0].min(), F[:,1].max()-F[:,1].min(), F[:,2].max()-F[:,2].min()]).max()
Xb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][0].flatten() + 0.5*(F[:,0].max()+F[:,0].min())
Yb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][1].flatten() + 0.5*(F[:,1].max()+F[:,1].min())
Zb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][2].flatten() + 0.5*(F[:,2].max()+F[:,2].min())
ax3d.scatter(Xb, Yb, Zb, c='white', alpha=0)
# Plot point sets in 3D
ax3d.scatter(F[:,0], F[:,1], F[:,2], c='blue')
# Plot edges
# for i in range(m-1):
# for j in range(i+1, m):
# if j in nbrs[i]:
# line = np.vstack((F[i], F[j]))
# ax3d.plot(line[:,0], line[:,1], line[:,2], c='green')
for i in [3]:
for j in range(i+1, m):
if j in nbrs[i]:
line = np.vstack((F[i], F[j]))
ax3d.plot(line[:,0], line[:,1], line[:,2], c='green')
plt.show()
def get_fname(mname, c, directory, extension='pkl'):
config = ConfigParser.ConfigParser()
config.read('config.ini')
source = config.get('Global', 'source')
noise_scale = config.getfloat('Global', 'noise_scale')
if source == 'sf':
alpha = config.getfloat('Superformula', 'nonlinearity')
beta = config.getint('Superformula', 'n_clusters')
sname = source + '-' + str(beta) + '-' + str(alpha)
elif source[:3] == 'rw-' or source[:3] == 'sf-':
sname = source
if c is None:
fname = '%s/%s_%.4f_%s.%s' % (directory, sname, noise_scale, mname, extension)
else:
fname = '%s/%s_%.4f_%s_%d.%s' % (directory, sname, noise_scale, mname, c, extension)
return fname
def save_model(model, mname, c=None, directory='./trained_models/'):
# Get the file name
fname = get_fname(mname, c, directory)
# Save the model
joblib.dump(model, fname, compress=9)
print 'Model ' + mname + ' saved!'
def load_model(mname, c=None, directory='./trained_models/'):
# Get the file name
fname = get_fname(mname, c, directory)
# Load the model
model = joblib.load(fname)
return model
def save_array(array, dname, c=None, directory='./trained_models/'):
# Get the file name
fname = get_fname(dname, c, directory, extension='npy')
# Save the model
np.save(fname, array)
print 'Model ' + dname + ' saved!'
def load_array(dname, c=None, directory='./trained_models/'):
# Get the file name
fname = get_fname(dname, c, directory, extension='npy')
# Load the model
array = np.load(fname)
return array
def gen_samples(N, d, bounds):
r = bounds[1,:] - bounds[0,:]
samples = np.random.rand(N, d)
samples = samples * r.reshape(1, d) + bounds[0,:].reshape(1, d)
return samples
| |
# -*- coding: utf-8 -*-
"""
we test .agg behavior / note that .apply is tested
generally in test_groupby.py
"""
from __future__ import print_function
import pytest
from datetime import datetime, timedelta
from functools import partial
import numpy as np
from numpy import nan
import pandas as pd
from pandas import (date_range, MultiIndex, DataFrame,
Series, Index, bdate_range, concat)
from pandas.util.testing import assert_frame_equal, assert_series_equal
from pandas.core.groupby import SpecificationError, DataError
from pandas.compat import OrderedDict
from pandas.io.formats.printing import pprint_thing
import pandas.util.testing as tm
class TestGroupByAggregate(object):
def setup_method(self, method):
self.ts = tm.makeTimeSeries()
self.seriesd = tm.getSeriesData()
self.tsd = tm.getTimeSeriesData()
self.frame = DataFrame(self.seriesd)
self.tsframe = DataFrame(self.tsd)
self.df = DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
self.df_mixed_floats = DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.array(
np.random.randn(8), dtype='float32')})
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.mframe = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
self.three_group = DataFrame(
{'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
'dull', 'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def test_agg_api(self):
# GH 6337
# http://stackoverflow.com/questions/21706030/pandas-groupby-agg-function-column-dtype-error
# different api for agg when passed custom function with mixed frame
df = DataFrame({'data1': np.random.randn(5),
'data2': np.random.randn(5),
'key1': ['a', 'a', 'b', 'b', 'a'],
'key2': ['one', 'two', 'one', 'two', 'one']})
grouped = df.groupby('key1')
def peak_to_peak(arr):
return arr.max() - arr.min()
expected = grouped.agg([peak_to_peak])
expected.columns = ['data1', 'data2']
result = grouped.agg(peak_to_peak)
assert_frame_equal(result, expected)
def test_agg_regression1(self):
grouped = self.tsframe.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.agg(np.mean)
expected = grouped.mean()
assert_frame_equal(result, expected)
def test_agg_datetimes_mixed(self):
data = [[1, '2012-01-01', 1.0], [2, '2012-01-02', 2.0], [3, None, 3.0]]
df1 = DataFrame({'key': [x[0] for x in data],
'date': [x[1] for x in data],
'value': [x[2] for x in data]})
data = [[row[0], datetime.strptime(row[1], '%Y-%m-%d').date() if row[1]
else None, row[2]] for row in data]
df2 = DataFrame({'key': [x[0] for x in data],
'date': [x[1] for x in data],
'value': [x[2] for x in data]})
df1['weights'] = df1['value'] / df1['value'].sum()
gb1 = df1.groupby('date').aggregate(np.sum)
df2['weights'] = df1['value'] / df1['value'].sum()
gb2 = df2.groupby('date').aggregate(np.sum)
assert (len(gb1) == len(gb2))
def test_agg_period_index(self):
from pandas import period_range, PeriodIndex
prng = period_range('2012-1-1', freq='M', periods=3)
df = DataFrame(np.random.randn(3, 2), index=prng)
rs = df.groupby(level=0).sum()
assert isinstance(rs.index, PeriodIndex)
# GH 3579
index = period_range(start='1999-01', periods=5, freq='M')
s1 = Series(np.random.rand(len(index)), index=index)
s2 = Series(np.random.rand(len(index)), index=index)
series = [('s1', s1), ('s2', s2)]
df = DataFrame.from_items(series)
grouped = df.groupby(df.index.month)
list(grouped)
def test_agg_dict_parameter_cast_result_dtypes(self):
# GH 12821
df = DataFrame(
{'class': ['A', 'A', 'B', 'B', 'C', 'C', 'D', 'D'],
'time': date_range('1/1/2011', periods=8, freq='H')})
df.loc[[0, 1, 2, 5], 'time'] = None
# test for `first` function
exp = df.loc[[0, 3, 4, 6]].set_index('class')
grouped = df.groupby('class')
assert_frame_equal(grouped.first(), exp)
assert_frame_equal(grouped.agg('first'), exp)
assert_frame_equal(grouped.agg({'time': 'first'}), exp)
assert_series_equal(grouped.time.first(), exp['time'])
assert_series_equal(grouped.time.agg('first'), exp['time'])
# test for `last` function
exp = df.loc[[0, 3, 4, 7]].set_index('class')
grouped = df.groupby('class')
assert_frame_equal(grouped.last(), exp)
assert_frame_equal(grouped.agg('last'), exp)
assert_frame_equal(grouped.agg({'time': 'last'}), exp)
assert_series_equal(grouped.time.last(), exp['time'])
assert_series_equal(grouped.time.agg('last'), exp['time'])
# count
exp = pd.Series([2, 2, 2, 2],
index=Index(list('ABCD'), name='class'),
name='time')
assert_series_equal(grouped.time.agg(len), exp)
assert_series_equal(grouped.time.size(), exp)
exp = pd.Series([0, 1, 1, 2],
index=Index(list('ABCD'), name='class'),
name='time')
assert_series_equal(grouped.time.count(), exp)
def test_agg_cast_results_dtypes(self):
# similar to GH12821
# xref #11444
u = [datetime(2015, x + 1, 1) for x in range(12)]
v = list('aaabbbbbbccd')
df = pd.DataFrame({'X': v, 'Y': u})
result = df.groupby('X')['Y'].agg(len)
expected = df.groupby('X')['Y'].count()
assert_series_equal(result, expected)
def test_agg_must_agg(self):
grouped = self.df.groupby('A')['C']
pytest.raises(Exception, grouped.agg, lambda x: x.describe())
pytest.raises(Exception, grouped.agg, lambda x: x.index[:2])
def test_agg_ser_multi_key(self):
# TODO(wesm): unused
ser = self.df.C # noqa
f = lambda x: x.sum()
results = self.df.C.groupby([self.df.A, self.df.B]).aggregate(f)
expected = self.df.groupby(['A', 'B']).sum()['C']
assert_series_equal(results, expected)
def test_agg_apply_corner(self):
# nothing to group, all NA
grouped = self.ts.groupby(self.ts * np.nan)
assert self.ts.dtype == np.float64
# groupby float64 values results in Float64Index
exp = Series([], dtype=np.float64, index=pd.Index(
[], dtype=np.float64))
assert_series_equal(grouped.sum(), exp)
assert_series_equal(grouped.agg(np.sum), exp)
assert_series_equal(grouped.apply(np.sum), exp, check_index_type=False)
# DataFrame
grouped = self.tsframe.groupby(self.tsframe['A'] * np.nan)
exp_df = DataFrame(columns=self.tsframe.columns, dtype=float,
index=pd.Index([], dtype=np.float64))
assert_frame_equal(grouped.sum(), exp_df, check_names=False)
assert_frame_equal(grouped.agg(np.sum), exp_df, check_names=False)
assert_frame_equal(grouped.apply(np.sum), exp_df.iloc[:, :0],
check_names=False)
def test_agg_grouping_is_list_tuple(self):
from pandas.core.groupby import Grouping
df = tm.makeTimeDataFrame()
grouped = df.groupby(lambda x: x.year)
grouper = grouped.grouper.groupings[0].grouper
grouped.grouper.groupings[0] = Grouping(self.ts.index, list(grouper))
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
grouped.grouper.groupings[0] = Grouping(self.ts.index, tuple(grouper))
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
def test_aggregate_float64_no_int64(self):
# see gh-11199
df = DataFrame({"a": [1, 2, 3, 4, 5],
"b": [1, 2, 2, 4, 5],
"c": [1, 2, 3, 4, 5]})
expected = DataFrame({"a": [1, 2.5, 4, 5]},
index=[1, 2, 4, 5])
expected.index.name = "b"
result = df.groupby("b")[["a"]].mean()
tm.assert_frame_equal(result, expected)
expected = DataFrame({"a": [1, 2.5, 4, 5],
"c": [1, 2.5, 4, 5]},
index=[1, 2, 4, 5])
expected.index.name = "b"
result = df.groupby("b")[["a", "c"]].mean()
tm.assert_frame_equal(result, expected)
def test_aggregate_api_consistency(self):
# GH 9052
# make sure that the aggregates via dict
# are consistent
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': np.random.randn(8) + 1.0,
'D': np.arange(8)})
grouped = df.groupby(['A', 'B'])
c_mean = grouped['C'].mean()
c_sum = grouped['C'].sum()
d_mean = grouped['D'].mean()
d_sum = grouped['D'].sum()
result = grouped['D'].agg(['sum', 'mean'])
expected = pd.concat([d_sum, d_mean],
axis=1)
expected.columns = ['sum', 'mean']
assert_frame_equal(result, expected, check_like=True)
result = grouped.agg([np.sum, np.mean])
expected = pd.concat([c_sum,
c_mean,
d_sum,
d_mean],
axis=1)
expected.columns = MultiIndex.from_product([['C', 'D'],
['sum', 'mean']])
assert_frame_equal(result, expected, check_like=True)
result = grouped[['D', 'C']].agg([np.sum, np.mean])
expected = pd.concat([d_sum,
d_mean,
c_sum,
c_mean],
axis=1)
expected.columns = MultiIndex.from_product([['D', 'C'],
['sum', 'mean']])
assert_frame_equal(result, expected, check_like=True)
result = grouped.agg({'C': 'mean', 'D': 'sum'})
expected = pd.concat([d_sum,
c_mean],
axis=1)
assert_frame_equal(result, expected, check_like=True)
result = grouped.agg({'C': ['mean', 'sum'],
'D': ['mean', 'sum']})
expected = pd.concat([c_mean,
c_sum,
d_mean,
d_sum],
axis=1)
expected.columns = MultiIndex.from_product([['C', 'D'],
['mean', 'sum']])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = grouped[['D', 'C']].agg({'r': np.sum,
'r2': np.mean})
expected = pd.concat([d_sum,
c_sum,
d_mean,
c_mean],
axis=1)
expected.columns = MultiIndex.from_product([['r', 'r2'],
['D', 'C']])
assert_frame_equal(result, expected, check_like=True)
def test_agg_dict_renaming_deprecation(self):
# 15931
df = pd.DataFrame({'A': [1, 1, 1, 2, 2],
'B': range(5),
'C': range(5)})
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False) as w:
df.groupby('A').agg({'B': {'foo': ['sum', 'max']},
'C': {'bar': ['count', 'min']}})
assert "using a dict with renaming" in str(w[0].message)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
df.groupby('A')[['B', 'C']].agg({'ma': 'max'})
with tm.assert_produces_warning(FutureWarning) as w:
df.groupby('A').B.agg({'foo': 'count'})
assert "using a dict on a Series for aggregation" in str(
w[0].message)
def test_agg_compat(self):
# GH 12334
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': np.random.randn(8) + 1.0,
'D': np.arange(8)})
g = df.groupby(['A', 'B'])
expected = pd.concat([g['D'].sum(),
g['D'].std()],
axis=1)
expected.columns = MultiIndex.from_tuples([('C', 'sum'),
('C', 'std')])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = g['D'].agg({'C': ['sum', 'std']})
assert_frame_equal(result, expected, check_like=True)
expected = pd.concat([g['D'].sum(),
g['D'].std()],
axis=1)
expected.columns = ['C', 'D']
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = g['D'].agg({'C': 'sum', 'D': 'std'})
assert_frame_equal(result, expected, check_like=True)
def test_agg_nested_dicts(self):
# API change for disallowing these types of nested dicts
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': np.random.randn(8) + 1.0,
'D': np.arange(8)})
g = df.groupby(['A', 'B'])
def f():
g.aggregate({'r1': {'C': ['mean', 'sum']},
'r2': {'D': ['mean', 'sum']}})
pytest.raises(SpecificationError, f)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = g.agg({'C': {'ra': ['mean', 'std']},
'D': {'rb': ['mean', 'std']}})
expected = pd.concat([g['C'].mean(), g['C'].std(), g['D'].mean(),
g['D'].std()], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('ra', 'mean'), (
'ra', 'std'), ('rb', 'mean'), ('rb', 'std')])
assert_frame_equal(result, expected, check_like=True)
# same name as the original column
# GH9052
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
expected = g['D'].agg({'result1': np.sum, 'result2': np.mean})
expected = expected.rename(columns={'result1': 'D'})
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = g['D'].agg({'D': np.sum, 'result2': np.mean})
assert_frame_equal(result, expected, check_like=True)
def test_agg_python_multiindex(self):
grouped = self.mframe.groupby(['A', 'B'])
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
def test_aggregate_str_func(self):
def _check_results(grouped):
# single series
result = grouped['A'].agg('std')
expected = grouped['A'].std()
assert_series_equal(result, expected)
# group frame by function name
result = grouped.aggregate('var')
expected = grouped.var()
assert_frame_equal(result, expected)
# group frame by function dict
result = grouped.agg(OrderedDict([['A', 'var'], ['B', 'std'],
['C', 'mean'], ['D', 'sem']]))
expected = DataFrame(OrderedDict([['A', grouped['A'].var(
)], ['B', grouped['B'].std()], ['C', grouped['C'].mean()],
['D', grouped['D'].sem()]]))
assert_frame_equal(result, expected)
by_weekday = self.tsframe.groupby(lambda x: x.weekday())
_check_results(by_weekday)
by_mwkday = self.tsframe.groupby([lambda x: x.month,
lambda x: x.weekday()])
_check_results(by_mwkday)
def test_aggregate_item_by_item(self):
df = self.df.copy()
df['E'] = ['a'] * len(self.df)
grouped = self.df.groupby('A')
# API change in 0.11
# def aggfun(ser):
# return len(ser + 'a')
# result = grouped.agg(aggfun)
# assert len(result.columns) == 1
aggfun = lambda ser: ser.size
result = grouped.agg(aggfun)
foo = (self.df.A == 'foo').sum()
bar = (self.df.A == 'bar').sum()
K = len(result.columns)
# GH5782
# odd comparisons can result here, so cast to make easy
exp = pd.Series(np.array([foo] * K), index=list('BCD'),
dtype=np.float64, name='foo')
tm.assert_series_equal(result.xs('foo'), exp)
exp = pd.Series(np.array([bar] * K), index=list('BCD'),
dtype=np.float64, name='bar')
tm.assert_almost_equal(result.xs('bar'), exp)
def aggfun(ser):
return ser.size
result = DataFrame().groupby(self.df.A).agg(aggfun)
assert isinstance(result, DataFrame)
assert len(result) == 0
def test_agg_item_by_item_raise_typeerror(self):
from numpy.random import randint
df = DataFrame(randint(10, size=(20, 10)))
def raiseException(df):
pprint_thing('----------------------------------------')
pprint_thing(df.to_string())
raise TypeError
pytest.raises(TypeError, df.groupby(0).agg, raiseException)
def test_series_agg_multikey(self):
ts = tm.makeTimeSeries()
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.agg(np.sum)
expected = grouped.sum()
assert_series_equal(result, expected)
def test_series_agg_multi_pure_python(self):
data = DataFrame(
{'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
'dull', 'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def bad(x):
assert (len(x.base) > 0)
return 'foo'
result = data.groupby(['A', 'B']).agg(bad)
expected = data.groupby(['A', 'B']).agg(lambda x: 'foo')
assert_frame_equal(result, expected)
def test_cythonized_aggers(self):
data = {'A': [0, 0, 0, 0, 1, 1, 1, 1, 1, 1., nan, nan],
'B': ['A', 'B'] * 6,
'C': np.random.randn(12)}
df = DataFrame(data)
df.loc[2:10:2, 'C'] = nan
def _testit(name):
op = lambda x: getattr(x, name)()
# single column
grouped = df.drop(['B'], axis=1).groupby('A')
exp = {}
for cat, group in grouped:
exp[cat] = op(group['C'])
exp = DataFrame({'C': exp})
exp.index.name = 'A'
result = op(grouped)
assert_frame_equal(result, exp)
# multiple columns
grouped = df.groupby(['A', 'B'])
expd = {}
for (cat1, cat2), group in grouped:
expd.setdefault(cat1, {})[cat2] = op(group['C'])
exp = DataFrame(expd).T.stack(dropna=False)
exp.index.names = ['A', 'B']
exp.name = 'C'
result = op(grouped)['C']
if not tm._incompat_bottleneck_version(name):
assert_series_equal(result, exp)
_testit('count')
_testit('sum')
_testit('std')
_testit('var')
_testit('sem')
_testit('mean')
_testit('median')
_testit('prod')
_testit('min')
_testit('max')
def test_cython_agg_boolean(self):
frame = DataFrame({'a': np.random.randint(0, 5, 50),
'b': np.random.randint(0, 2, 50).astype('bool')})
result = frame.groupby('a')['b'].mean()
expected = frame.groupby('a')['b'].agg(np.mean)
assert_series_equal(result, expected)
def test_cython_agg_nothing_to_agg(self):
frame = DataFrame({'a': np.random.randint(0, 5, 50),
'b': ['foo', 'bar'] * 25})
pytest.raises(DataError, frame.groupby('a')['b'].mean)
frame = DataFrame({'a': np.random.randint(0, 5, 50),
'b': ['foo', 'bar'] * 25})
pytest.raises(DataError, frame[['b']].groupby(frame['a']).mean)
def test_cython_agg_nothing_to_agg_with_dates(self):
frame = DataFrame({'a': np.random.randint(0, 5, 50),
'b': ['foo', 'bar'] * 25,
'dates': pd.date_range('now', periods=50,
freq='T')})
with tm.assert_raises_regex(DataError,
"No numeric types to aggregate"):
frame.groupby('b').dates.mean()
def test_cython_agg_frame_columns(self):
# #2113
df = DataFrame({'x': [1, 2, 3], 'y': [3, 4, 5]})
df.groupby(level=0, axis='columns').mean()
df.groupby(level=0, axis='columns').mean()
df.groupby(level=0, axis='columns').mean()
df.groupby(level=0, axis='columns').mean()
def test_cython_agg_return_dict(self):
# GH 16741
ts = self.df.groupby('A')['B'].agg(
lambda x: x.value_counts().to_dict())
expected = Series([{'two': 1, 'one': 1, 'three': 1},
{'two': 2, 'one': 2, 'three': 1}],
index=Index(['bar', 'foo'], name='A'),
name='B')
assert_series_equal(ts, expected)
def test_cython_fail_agg(self):
dr = bdate_range('1/1/2000', periods=50)
ts = Series(['A', 'B', 'C', 'D', 'E'] * 10, index=dr)
grouped = ts.groupby(lambda x: x.month)
summed = grouped.sum()
expected = grouped.agg(np.sum)
assert_series_equal(summed, expected)
def test_agg_consistency(self):
# agg with ([]) and () not consistent
# GH 6715
def P1(a):
try:
return np.percentile(a.dropna(), q=1)
except:
return np.nan
import datetime as dt
df = DataFrame({'col1': [1, 2, 3, 4],
'col2': [10, 25, 26, 31],
'date': [dt.date(2013, 2, 10), dt.date(2013, 2, 10),
dt.date(2013, 2, 11), dt.date(2013, 2, 11)]})
g = df.groupby('date')
expected = g.agg([P1])
expected.columns = expected.columns.levels[0]
result = g.agg(P1)
assert_frame_equal(result, expected)
def test_wrap_agg_out(self):
grouped = self.three_group.groupby(['A', 'B'])
def func(ser):
if ser.dtype == np.object:
raise TypeError
else:
return ser.sum()
result = grouped.aggregate(func)
exp_grouped = self.three_group.loc[:, self.three_group.columns != 'C']
expected = exp_grouped.groupby(['A', 'B']).aggregate(func)
assert_frame_equal(result, expected)
def test_agg_multiple_functions_maintain_order(self):
# GH #610
funcs = [('mean', np.mean), ('max', np.max), ('min', np.min)]
result = self.df.groupby('A')['C'].agg(funcs)
exp_cols = Index(['mean', 'max', 'min'])
tm.assert_index_equal(result.columns, exp_cols)
def test_multiple_functions_tuples_and_non_tuples(self):
# #1359
funcs = [('foo', 'mean'), 'std']
ex_funcs = [('foo', 'mean'), ('std', 'std')]
result = self.df.groupby('A')['C'].agg(funcs)
expected = self.df.groupby('A')['C'].agg(ex_funcs)
assert_frame_equal(result, expected)
result = self.df.groupby('A').agg(funcs)
expected = self.df.groupby('A').agg(ex_funcs)
assert_frame_equal(result, expected)
def test_agg_multiple_functions_too_many_lambdas(self):
grouped = self.df.groupby('A')
funcs = ['mean', lambda x: x.mean(), lambda x: x.std()]
pytest.raises(SpecificationError, grouped.agg, funcs)
def test_more_flexible_frame_multi_function(self):
grouped = self.df.groupby('A')
exmean = grouped.agg(OrderedDict([['C', np.mean], ['D', np.mean]]))
exstd = grouped.agg(OrderedDict([['C', np.std], ['D', np.std]]))
expected = concat([exmean, exstd], keys=['mean', 'std'], axis=1)
expected = expected.swaplevel(0, 1, axis=1).sort_index(level=0, axis=1)
d = OrderedDict([['C', [np.mean, np.std]], ['D', [np.mean, np.std]]])
result = grouped.aggregate(d)
assert_frame_equal(result, expected)
# be careful
result = grouped.aggregate(OrderedDict([['C', np.mean],
['D', [np.mean, np.std]]]))
expected = grouped.aggregate(OrderedDict([['C', np.mean],
['D', [np.mean, np.std]]]))
assert_frame_equal(result, expected)
def foo(x):
return np.mean(x)
def bar(x):
return np.std(x, ddof=1)
# this uses column selection & renaming
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
d = OrderedDict([['C', np.mean], ['D', OrderedDict(
[['foo', np.mean], ['bar', np.std]])]])
result = grouped.aggregate(d)
d = OrderedDict([['C', [np.mean]], ['D', [foo, bar]]])
expected = grouped.aggregate(d)
assert_frame_equal(result, expected)
def test_multi_function_flexible_mix(self):
# GH #1268
grouped = self.df.groupby('A')
d = OrderedDict([['C', OrderedDict([['foo', 'mean'], [
'bar', 'std'
]])], ['D', 'sum']])
# this uses column selection & renaming
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = grouped.aggregate(d)
d2 = OrderedDict([['C', OrderedDict([['foo', 'mean'], [
'bar', 'std'
]])], ['D', ['sum']]])
# this uses column selection & renaming
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result2 = grouped.aggregate(d2)
d3 = OrderedDict([['C', OrderedDict([['foo', 'mean'], [
'bar', 'std'
]])], ['D', {'sum': 'sum'}]])
# this uses column selection & renaming
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
expected = grouped.aggregate(d3)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
def test_agg_callables(self):
# GH 7929
df = DataFrame({'foo': [1, 2], 'bar': [3, 4]}).astype(np.int64)
class fn_class(object):
def __call__(self, x):
return sum(x)
equiv_callables = [sum, np.sum, lambda x: sum(x), lambda x: x.sum(),
partial(sum), fn_class()]
expected = df.groupby("foo").agg(sum)
for ecall in equiv_callables:
result = df.groupby('foo').agg(ecall)
assert_frame_equal(result, expected)
def test__cython_agg_general(self):
ops = [('mean', np.mean),
('median', np.median),
('var', np.var),
('add', np.sum),
('prod', np.prod),
('min', np.min),
('max', np.max),
('first', lambda x: x.iloc[0]),
('last', lambda x: x.iloc[-1]), ]
df = DataFrame(np.random.randn(1000))
labels = np.random.randint(0, 50, size=1000).astype(float)
for op, targop in ops:
result = df.groupby(labels)._cython_agg_general(op)
expected = df.groupby(labels).agg(targop)
try:
tm.assert_frame_equal(result, expected)
except BaseException as exc:
exc.args += ('operation: %s' % op, )
raise
def test_cython_agg_empty_buckets(self):
ops = [('mean', np.mean),
('median', lambda x: np.median(x) if len(x) > 0 else np.nan),
('var', lambda x: np.var(x, ddof=1)),
('add', lambda x: np.sum(x) if len(x) > 0 else np.nan),
('prod', np.prod),
('min', np.min),
('max', np.max), ]
df = pd.DataFrame([11, 12, 13])
grps = range(0, 55, 5)
for op, targop in ops:
result = df.groupby(pd.cut(df[0], grps))._cython_agg_general(op)
expected = df.groupby(pd.cut(df[0], grps)).agg(lambda x: targop(x))
try:
tm.assert_frame_equal(result, expected)
except BaseException as exc:
exc.args += ('operation: %s' % op,)
raise
def test_agg_over_numpy_arrays(self):
# GH 3788
df = pd.DataFrame([[1, np.array([10, 20, 30])],
[1, np.array([40, 50, 60])],
[2, np.array([20, 30, 40])]],
columns=['category', 'arraydata'])
result = df.groupby('category').agg(sum)
expected_data = [[np.array([50, 70, 90])], [np.array([20, 30, 40])]]
expected_index = pd.Index([1, 2], name='category')
expected_column = ['arraydata']
expected = pd.DataFrame(expected_data,
index=expected_index,
columns=expected_column)
assert_frame_equal(result, expected)
def test_agg_timezone_round_trip(self):
# GH 15426
ts = pd.Timestamp("2016-01-01 12:00:00", tz='US/Pacific')
df = pd.DataFrame({'a': 1, 'b': [ts + timedelta(minutes=nn)
for nn in range(10)]})
result1 = df.groupby('a')['b'].agg(np.min).iloc[0]
result2 = df.groupby('a')['b'].agg(lambda x: np.min(x)).iloc[0]
result3 = df.groupby('a')['b'].min().iloc[0]
assert result1 == ts
assert result2 == ts
assert result3 == ts
dates = [pd.Timestamp("2016-01-0%d 12:00:00" % i, tz='US/Pacific')
for i in range(1, 5)]
df = pd.DataFrame({'A': ['a', 'b'] * 2, 'B': dates})
grouped = df.groupby('A')
ts = df['B'].iloc[0]
assert ts == grouped.nth(0)['B'].iloc[0]
assert ts == grouped.head(1)['B'].iloc[0]
assert ts == grouped.first()['B'].iloc[0]
assert ts == grouped.apply(lambda x: x.iloc[0])[0]
ts = df['B'].iloc[2]
assert ts == grouped.last()['B'].iloc[0]
assert ts == grouped.apply(lambda x: x.iloc[-1])[0]
def test_sum_uint64_overflow(self):
# see gh-14758
# Convert to uint64 and don't overflow
df = pd.DataFrame([[1, 2], [3, 4], [5, 6]],
dtype=object) + 9223372036854775807
index = pd.Index([9223372036854775808, 9223372036854775810,
9223372036854775812], dtype=np.uint64)
expected = pd.DataFrame({1: [9223372036854775809,
9223372036854775811,
9223372036854775813]}, index=index)
expected.index.name = 0
result = df.groupby(0).sum()
tm.assert_frame_equal(result, expected)
| |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""" test byte codecs """
from __future__ import print_function
import numpy as np
import unittest
import faiss
from common_faiss_tests import get_dataset_2
from faiss.contrib.datasets import SyntheticDataset
from faiss.contrib.inspect_tools import get_additive_quantizer_codebooks
class TestEncodeDecode(unittest.TestCase):
def do_encode_twice(self, factory_key):
d = 96
nb = 1000
nq = 0
nt = 2000
xt, x, _ = get_dataset_2(d, nt, nb, nq)
assert x.size > 0
codec = faiss.index_factory(d, factory_key)
codec.train(xt)
codes = codec.sa_encode(x)
x2 = codec.sa_decode(codes)
codes2 = codec.sa_encode(x2)
if 'IVF' not in factory_key:
self.assertTrue(np.all(codes == codes2))
else:
# some rows are not reconstructed exactly because they
# flip into another quantization cell
nrowdiff = (codes != codes2).any(axis=1).sum()
self.assertTrue(nrowdiff < 10)
x3 = codec.sa_decode(codes2)
if 'IVF' not in factory_key:
self.assertTrue(np.allclose(x2, x3))
else:
diffs = np.abs(x2 - x3).sum(axis=1)
avg = np.abs(x2).sum(axis=1).mean()
diffs.sort()
assert diffs[-10] < avg * 1e-5
def test_SQ8(self):
self.do_encode_twice('SQ8')
def test_IVFSQ8(self):
self.do_encode_twice('IVF256,SQ8')
def test_PCAIVFSQ8(self):
self.do_encode_twice('PCAR32,IVF256,SQ8')
def test_PQ6x8(self):
self.do_encode_twice('PQ6np')
def test_PQ6x6(self):
self.do_encode_twice('PQ6x6np')
def test_IVFPQ6x8np(self):
self.do_encode_twice('IVF512,PQ6np')
def test_LSH(self):
self.do_encode_twice('LSHrt')
class TestIndexEquiv(unittest.TestCase):
def do_test(self, key1, key2):
d = 96
nb = 1000
nq = 0
nt = 2000
xt, x, _ = get_dataset_2(d, nt, nb, nq)
codec_ref = faiss.index_factory(d, key1)
codec_ref.train(xt)
code_ref = codec_ref.sa_encode(x)
x_recons_ref = codec_ref.sa_decode(code_ref)
codec_new = faiss.index_factory(d, key2)
codec_new.pq = codec_ref.pq
# replace quantizer, avoiding mem leak
oldq = codec_new.q1.quantizer
oldq.this.own()
codec_new.q1.own_fields = False
codec_new.q1.quantizer = codec_ref.quantizer
codec_new.is_trained = True
code_new = codec_new.sa_encode(x)
x_recons_new = codec_new.sa_decode(code_new)
self.assertTrue(np.all(code_new == code_ref))
self.assertTrue(np.all(x_recons_new == x_recons_ref))
codec_new_2 = faiss.deserialize_index(
faiss.serialize_index(codec_new))
code_new = codec_new_2.sa_encode(x)
x_recons_new = codec_new_2.sa_decode(code_new)
self.assertTrue(np.all(code_new == code_ref))
self.assertTrue(np.all(x_recons_new == x_recons_ref))
def test_IVFPQ(self):
self.do_test("IVF512,PQ6np", "Residual512,PQ6")
def test_IMI(self):
self.do_test("IMI2x5,PQ6np", "Residual2x5,PQ6")
class TestAccuracy(unittest.TestCase):
""" comparative accuracy of a few types of indexes """
def compare_accuracy(self, lowac, highac, max_errs=(1e10, 1e10)):
d = 96
nb = 1000
nq = 0
nt = 2000
xt, x, _ = get_dataset_2(d, nt, nb, nq)
errs = []
for factory_string in lowac, highac:
codec = faiss.index_factory(d, factory_string)
print('sa codec: code size %d' % codec.sa_code_size())
codec.train(xt)
codes = codec.sa_encode(x)
x2 = codec.sa_decode(codes)
err = ((x - x2) ** 2).sum()
errs.append(err)
print(errs)
self.assertGreater(errs[0], errs[1])
self.assertGreater(max_errs[0], errs[0])
self.assertGreater(max_errs[1], errs[1])
# just a small IndexLattice I/O test
if 'Lattice' in highac:
codec2 = faiss.deserialize_index(
faiss.serialize_index(codec))
codes = codec2.sa_encode(x)
x3 = codec2.sa_decode(codes)
self.assertTrue(np.all(x2 == x3))
def test_SQ(self):
self.compare_accuracy('SQ4', 'SQ8')
def test_SQ2(self):
self.compare_accuracy('SQ6', 'SQ8')
def test_SQ3(self):
self.compare_accuracy('SQ8', 'SQfp16')
def test_PQ(self):
self.compare_accuracy('PQ6x8np', 'PQ8x8np')
def test_PQ2(self):
self.compare_accuracy('PQ8x6np', 'PQ8x8np')
def test_IVFvsPQ(self):
self.compare_accuracy('PQ8np', 'IVF256,PQ8np')
def test_Lattice(self):
# measured low/high: 20946.244, 5277.483
self.compare_accuracy('ZnLattice3x10_4',
'ZnLattice3x20_4',
(22000, 5400))
def test_Lattice2(self):
# here the difference is actually tiny
# measured errs: [16403.072, 15967.735]
self.compare_accuracy('ZnLattice3x12_1',
'ZnLattice3x12_7',
(18000, 16000))
swig_ptr = faiss.swig_ptr
class LatticeTest(unittest.TestCase):
""" Low-level lattice tests """
def test_repeats(self):
rs = np.random.RandomState(123)
dim = 32
for _i in range(1000):
vec = np.floor((rs.rand(dim) ** 7) * 3).astype('float32')
vecs = vec.copy()
vecs.sort()
repeats = faiss.Repeats(dim, swig_ptr(vecs))
code = repeats.encode(swig_ptr(vec))
vec2 = np.zeros(dim, dtype='float32')
repeats.decode(code, swig_ptr(vec2))
# print(vec2)
assert np.all(vec == vec2)
def test_ZnSphereCodec_encode_centroid(self):
dim = 8
r2 = 5
ref_codec = faiss.ZnSphereCodec(dim, r2)
codec = faiss.ZnSphereCodecRec(dim, r2)
# print(ref_codec.nv, codec.nv)
assert ref_codec.nv == codec.nv
s = set()
for i in range(ref_codec.nv):
c = np.zeros(dim, dtype='float32')
ref_codec.decode(i, swig_ptr(c))
code = codec.encode_centroid(swig_ptr(c))
assert 0 <= code < codec.nv
s.add(code)
assert len(s) == codec.nv
def test_ZnSphereCodecRec(self):
dim = 16
r2 = 6
codec = faiss.ZnSphereCodecRec(dim, r2)
# print("nv=", codec.nv)
for i in range(codec.nv):
c = np.zeros(dim, dtype='float32')
codec.decode(i, swig_ptr(c))
code = codec.encode_centroid(swig_ptr(c))
assert code == i
def run_ZnSphereCodecAlt(self, dim, r2):
# dim = 32
# r2 = 14
codec = faiss.ZnSphereCodecAlt(dim, r2)
rs = np.random.RandomState(123)
n = 100
codes = rs.randint(codec.nv, size=n, dtype='uint64')
x = np.empty((n, dim), dtype='float32')
codec.decode_multi(n, swig_ptr(codes), swig_ptr(x))
codes2 = np.empty(n, dtype='uint64')
codec.encode_multi(n, swig_ptr(x), swig_ptr(codes2))
assert np.all(codes == codes2)
def test_ZnSphereCodecAlt32(self):
self.run_ZnSphereCodecAlt(32, 14)
def test_ZnSphereCodecAlt24(self):
self.run_ZnSphereCodecAlt(24, 14)
class TestBitstring(unittest.TestCase):
""" Low-level bit string tests """
def test_rw(self):
rs = np.random.RandomState(1234)
nbyte = 1000
sz = 0
bs = np.ones(nbyte, dtype='uint8')
bw = faiss.BitstringWriter(swig_ptr(bs), nbyte)
if False:
ctrl = [(7, 0x35), (13, 0x1d74)]
for nbit, x in ctrl:
bw.write(x, nbit)
else:
ctrl = []
while True:
nbit = int(1 + 62 * rs.rand() ** 4)
if sz + nbit > nbyte * 8:
break
x = int(rs.randint(1 << nbit, dtype='int64'))
bw.write(x, nbit)
ctrl.append((nbit, x))
sz += nbit
bignum = 0
sz = 0
for nbit, x in ctrl:
bignum |= x << sz
sz += nbit
for i in range(nbyte):
self.assertTrue(((bignum >> (i * 8)) & 255) == bs[i])
for i in range(nbyte):
print(bin(bs[i] + 256)[3:], end=' ')
print()
br = faiss.BitstringReader(swig_ptr(bs), nbyte)
for nbit, xref in ctrl:
xnew = br.read(nbit)
print('nbit %d xref %x xnew %x' % (nbit, xref, xnew))
self.assertTrue(xnew == xref)
class TestIVFTransfer(unittest.TestCase):
def test_transfer(self):
ds = SyntheticDataset(32, 2000, 200, 100)
index = faiss.index_factory(ds.d, "IVF20,SQ8")
index.train(ds.get_train())
index.add(ds.get_database())
Dref, Iref = index.search(ds.get_queries(), 10)
index.reset()
codes = index.sa_encode(ds.get_database())
index.add_sa_codes(codes)
Dnew, Inew = index.search(ds.get_queries(), 10)
np.testing.assert_array_equal(Iref, Inew)
np.testing.assert_array_equal(Dref, Dnew)
class TestRefine(unittest.TestCase):
def test_refine(self):
""" Make sure that IndexRefine can function as a standalone codec """
ds = SyntheticDataset(32, 500, 100, 0)
index = faiss.index_factory(ds.d, "RQ2x5,Refine(ITQ,LSHt)")
index.train(ds.get_train())
index1 = index.base_index
index2 = index.refine_index
codes12 = index.sa_encode(ds.get_database())
codes1 = index1.sa_encode(ds.get_database())
codes2 = index2.sa_encode(ds.get_database())
np.testing.assert_array_equal(
codes12,
np.hstack((codes1, codes2))
)
def test_equiv_rcq_rq(self):
""" make sure that the codes generated by the standalone codec are the same
between an
IndexRefine with ResidualQuantizer
and
IVF with ResidualCoarseQuantizer
both are the centroid id concatenated with the code.
"""
ds = SyntheticDataset(16, 400, 100, 0)
index1 = faiss.index_factory(ds.d, "RQ2x3,Refine(Flat)")
index1.train(ds.get_train())
irq = faiss.downcast_index(index1.base_index)
# because the default beam factor for RCQ is 4
irq.rq.max_beam_size = 4
index2 = faiss.index_factory(ds.d, "IVF64(RCQ2x3),Flat")
index2.train(ds.get_train())
quantizer = faiss.downcast_index(index2.quantizer)
quantizer.rq = irq.rq
index2.is_trained = True
codes1 = index1.sa_encode(ds.get_database())
codes2 = index2.sa_encode(ds.get_database())
np.testing.assert_array_equal(codes1, codes2)
def test_equiv_sh(self):
""" make sure that the IVFSpectralHash sa_encode function gives the same
result as the concatenated RQ + LSH index sa_encode """
ds = SyntheticDataset(32, 500, 100, 0)
index1 = faiss.index_factory(ds.d, "RQ1x4,Refine(ITQ16,LSH)")
index1.train(ds.get_train())
# reproduce this in an IndexIVFSpectralHash
coarse_quantizer = faiss.IndexFlat(ds.d)
rq = faiss.downcast_index(index1.base_index).rq
centroids = get_additive_quantizer_codebooks(rq)[0]
coarse_quantizer.add(centroids)
encoder = faiss.downcast_index(index1.refine_index)
# larger than the magnitude of the vectors
# negative because otherwise the bits are flipped
period = -100000.0
index2 = faiss.IndexIVFSpectralHash(
coarse_quantizer,
ds.d,
coarse_quantizer.ntotal,
encoder.sa_code_size() * 8,
period
)
# replace with the vt of the encoder. Binarization is performed by
# the IndexIVFSpectralHash itself
index2.replace_vt(encoder)
codes1 = index1.sa_encode(ds.get_database())
codes2 = index2.sa_encode(ds.get_database())
np.testing.assert_array_equal(codes1, codes2)
| |
###############################################################################
# Tested so far:
#
# OpenClassesRoot
# OpenCurrentUser
# OpenLocalMachine
# OpenPerformanceData
# OpenUsers
# BaseRegCloseKey
# BaseRegCreateKey
# BaseRegDeleteKey
# BaseRegFlushKey
# BaseRegGetKeySecurity
# BaseRegOpenKey
# BaseRegQueryInfoKey
# BaseRegQueryValue
# BaseRegReplaceKey
# BaseRegRestoreKey
# BaseRegSaveKey
# BaseRegSetValue
# BaseRegEnumValue
# BaseRegEnumKey
# BaseRegGetVersion
# OpenCurrentConfig
# BaseRegQueryMultipleValues
# BaseRegSaveKeyEx
# OpenPerformanceText
# OpenPerformanceNlsText
# BaseRegQueryMultipleValues2
# BaseRegDeleteKeyEx
# BaseRegLoadKey
# BaseRegUnLoadKey
# BaseRegDeleteValue
#
# Not yet:
#
# BaseRegSetKeySecurity
#
# Shouldn't dump errors against a win7
#
################################################################################
import unittest
import ConfigParser
from impacket.dcerpc.v5 import transport
from impacket.dcerpc.v5 import epm, rrp
from impacket.dcerpc.v5.dtypes import NULL, MAXIMUM_ALLOWED, OWNER_SECURITY_INFORMATION
class RRPTests(unittest.TestCase):
def connect(self):
rpctransport = transport.DCERPCTransportFactory(self.stringBinding)
if len(self.hashes) > 0:
lmhash, nthash = self.hashes.split(':')
else:
lmhash = ''
nthash = ''
if hasattr(rpctransport, 'set_credentials'):
# This method exists only for selected protocol sequences.
rpctransport.set_credentials(self.username,self.password, self.domain, lmhash, nthash)
dce = rpctransport.get_dce_rpc()
#dce.set_auth_level(RPC_C_AUTHN_LEVEL_PKT_INTEGRITY)
dce.connect()
dce.bind(rrp.MSRPC_UUID_RRP, transfer_syntax = self.ts)
resp = rrp.hOpenLocalMachine(dce, MAXIMUM_ALLOWED | rrp.KEY_WOW64_32KEY | rrp.KEY_ENUMERATE_SUB_KEYS)
return dce, rpctransport, resp['phKey']
def test_OpenClassesRoot(self):
dce, rpctransport, phKey = self.connect()
request = rrp.OpenClassesRoot()
request['ServerName'] = NULL
request['samDesired'] = MAXIMUM_ALLOWED
resp = dce.request(request)
resp.dump()
def test_OpenCurrentUser(self):
dce, rpctransport, phKey = self.connect()
request = rrp.OpenCurrentUser()
request['ServerName'] = NULL
request['samDesired'] = MAXIMUM_ALLOWED
resp = dce.request(request)
resp.dump()
def test_OpenLocalMachine(self):
dce, rpctransport, phKey = self.connect()
request = rrp.OpenLocalMachine()
request['ServerName'] = NULL
request['samDesired'] = MAXIMUM_ALLOWED
resp = dce.request(request)
resp.dump()
def test_OpenPerformanceData(self):
dce, rpctransport, phKey = self.connect()
request = rrp.OpenPerformanceData()
request['ServerName'] = NULL
request['samDesired'] = MAXIMUM_ALLOWED
resp = dce.request(request)
resp.dump()
def test_OpenUsers(self):
dce, rpctransport, phKey = self.connect()
request = rrp.OpenUsers()
request['ServerName'] = NULL
request['samDesired'] = MAXIMUM_ALLOWED
resp = dce.request(request)
resp.dump()
def test_BaseRegCloseKey(self):
dce, rpctransport, phKey = self.connect()
request = rrp.BaseRegCloseKey()
request['hKey'] = phKey
resp = dce.request(request)
resp.dump()
def test_hBaseRegCreateKey_hBaseRegSetValue_hBaseRegDeleteKey(self):
dce, rpctransport, phKey = self.connect()
resp = rrp.hOpenClassesRoot(dce)
resp.dump()
regHandle = resp['phKey']
resp = rrp.hBaseRegCreateKey(dce, regHandle, 'BETO\x00')
resp.dump()
phKey = resp['phkResult']
try:
resp = rrp.hBaseRegSetValue(dce, phKey, 'BETO2\x00', rrp.REG_SZ, 'HOLA COMO TE VA\x00')
resp.dump()
except Exception, e:
print e
type, data = rrp.hBaseRegQueryValue(dce, phKey, 'BETO2\x00')
#print data
resp = rrp.hBaseRegDeleteValue(dce, phKey, 'BETO2\x00')
resp.dump()
resp = rrp.hBaseRegDeleteKey(dce, regHandle, 'BETO\x00')
resp.dump()
self.assertTrue( 'HOLA COMO TE VA\x00' == data )
def test_BaseRegCreateKey_BaseRegSetValue_BaseRegDeleteKey(self):
dce, rpctransport, phKey = self.connect()
request = rrp.OpenClassesRoot()
request['ServerName'] = NULL
request['samDesired'] = MAXIMUM_ALLOWED
resp = dce.request(request)
resp.dump()
regHandle = resp['phKey']
request = rrp.BaseRegCreateKey()
request['hKey'] = regHandle
request['lpSubKey'] = 'BETO\x00'
request['lpClass'] = NULL
request['dwOptions'] = 0x00000001
request['samDesired'] = MAXIMUM_ALLOWED
request['lpSecurityAttributes']['RpcSecurityDescriptor']['lpSecurityDescriptor'] = NULL
request['lpdwDisposition'] = rrp.REG_CREATED_NEW_KEY
resp = dce.request(request)
resp.dump()
phKey = resp['phkResult']
request = rrp.BaseRegSetValue()
request['hKey'] = phKey
request['lpValueName'] = 'BETO\x00'
request['dwType'] = rrp.REG_SZ
request['lpData'] = 'HOLA COMO TE VA\x00'.encode('utf-16le')
request['cbData'] = len('HOLA COMO TE VA\x00')*2
try:
resp = dce.request(request)
resp.dump()
except Exception, e:
print e
request = rrp.BaseRegQueryValue()
request['hKey'] = phKey
request['lpValueName'] = 'BETO\x00'
request['lpData'] = ' '*100
request['lpcbData'] = 100
request['lpcbLen'] = 100
resp = dce.request(request)
resp.dump()
resData = resp['lpData']
request = rrp.BaseRegDeleteKey()
request['hKey'] = regHandle
request['lpSubKey'] = 'BETO\x00'
resp = dce.request(request)
resp.dump()
self.assertTrue( 'HOLA COMO TE VA\x00' == ''.join(resData).decode('utf-16le'))
def test_BaseRegEnumKey(self):
dce, rpctransport, phKey = self.connect()
request = rrp.BaseRegOpenKey()
request['hKey'] = phKey
request['lpSubKey'] = 'SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\x00'
request['dwOptions'] = 0x00000001
request['samDesired'] = MAXIMUM_ALLOWED | rrp.KEY_ENUMERATE_SUB_KEYS
resp = dce.request(request)
request = rrp.BaseRegEnumKey()
request['hKey'] = resp['phkResult']
request['dwIndex'] = 1
# I gotta access the fields mannually :s
request.fields['lpNameIn'].fields['MaximumLength'] = 510
request.fields['lpNameIn'].fields['Data'].fields['Data'].fields['MaximumCount'] = 255
request['lpClassIn'] = ' '*100
request['lpftLastWriteTime'] = NULL
resp = dce.request(request)
resp.dump()
def test_hBaseRegEnumKey(self):
dce, rpctransport, phKey = self.connect()
request = rrp.BaseRegOpenKey()
request['hKey'] = phKey
request['lpSubKey'] = 'SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\x00'
request['dwOptions'] = 0x00000001
request['samDesired'] = MAXIMUM_ALLOWED | rrp.KEY_ENUMERATE_SUB_KEYS
resp = dce.request(request)
resp = rrp.hBaseRegEnumKey(dce, resp['phkResult'], 1 )
resp.dump()
def test_BaseRegEnumValue(self):
dce, rpctransport, phKey = self.connect()
request = rrp.BaseRegOpenKey()
request['hKey'] = phKey
request['lpSubKey'] = 'SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\x00'
request['dwOptions'] = 0x00000001
request['samDesired'] = MAXIMUM_ALLOWED
resp = dce.request(request)
request = rrp.BaseRegEnumValue()
request['hKey'] = resp['phkResult']
request['dwIndex'] = 6
request['lpValueNameIn'] = ' '*100
request['lpData'] = ' '*100
request['lpcbData'] = 100
request['lpcbLen'] = 100
resp = dce.request(request)
resp.dump()
def test_hBaseRegEnumValue(self):
dce, rpctransport, phKey = self.connect()
request = rrp.BaseRegOpenKey()
request['hKey'] = phKey
request['lpSubKey'] = 'SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\x00'
request['dwOptions'] = 0x00000001
request['samDesired'] = MAXIMUM_ALLOWED
resp = dce.request(request)
resp = rrp.hBaseRegEnumValue(dce, resp['phkResult'], 7, 10)
resp.dump()
def test_BaseRegFlushKey(self):
dce, rpctransport, phKey = self.connect()
resp = rrp.hBaseRegFlushKey(dce,phKey)
resp.dump()
def test_BaseRegGetKeySecurity(self):
dce, rpctransport, phKey = self.connect()
resp = rrp.hBaseRegGetKeySecurity(dce, phKey, OWNER_SECURITY_INFORMATION)
resp.dump()
def test_BaseRegOpenKey(self):
dce, rpctransport, phKey = self.connect()
request = rrp.BaseRegOpenKey()
request['hKey'] = phKey
request['lpSubKey'] = 'SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\x00'
request['dwOptions'] = 0x00000001
request['samDesired'] = MAXIMUM_ALLOWED
resp = dce.request(request)
resp.dump()
def test_hBaseRegQueryInfoKey(self):
dce, rpctransport, phKey = self.connect()
resp = rrp.hBaseRegOpenKey(dce, phKey, 'SYSTEM\\CurrentControlSet\\Control\\Lsa\\JD\x00' )
resp = rrp.hBaseRegQueryInfoKey(dce,resp['phkResult'])
resp.dump()
def test_BaseRegQueryValue(self):
dce, rpctransport, phKey = self.connect()
request = rrp.BaseRegOpenKey()
request['hKey'] = phKey
request['lpSubKey'] = 'SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\x00'
request['dwOptions'] = 0x00000001
request['samDesired'] = MAXIMUM_ALLOWED
resp = dce.request(request)
resp.dump()
request = rrp.BaseRegQueryValue()
request['hKey'] = resp['phkResult']
request['lpValueName'] = 'ProductName\x00'
request['lpData'] = ' '*100
request['lpcbData'] = 100
request['lpcbLen'] = 100
resp = dce.request(request)
resp.dump()
def test_hBaseRegQueryValue(self):
dce, rpctransport, phKey = self.connect()
resp = rrp.hBaseRegOpenKey(dce, phKey, 'SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\x00' )
resp.dump()
resp = rrp.hBaseRegQueryValue(dce, resp['phkResult'], 'ProductName\x00')
def test_BaseRegReplaceKey(self):
dce, rpctransport, phKey = self.connect()
request = rrp.BaseRegReplaceKey()
request['hKey'] = phKey
request['lpSubKey'] = 'SOFTWARE\x00'
request['lpNewFile'] = 'SOFTWARE\x00'
request['lpOldFile'] = 'SOFTWARE\x00'
try:
resp = dce.request(request)
resp.dump()
except Exception, e:
if str(e).find('ERROR_FILE_NOT_FOUND') < 0:
raise
def test_hBaseRegReplaceKey(self):
dce, rpctransport, phKey = self.connect()
try:
resp = rrp.hBaseRegReplaceKey(dce, phKey, 'SOFTWARE\x00', 'SOFTWARE\x00', 'SOFTWARE\x00')
resp.dump()
except Exception, e:
if str(e).find('ERROR_FILE_NOT_FOUND') < 0:
raise
def test_BaseRegRestoreKey(self):
dce, rpctransport, phKey = self.connect()
request = rrp.BaseRegRestoreKey()
request['hKey'] = phKey
request['lpFile'] = 'SOFTWARE\x00'
request['Flags'] = rrp.REG_REFRESH_HIVE
try:
resp = dce.request(request)
resp.dump()
except Exception, e:
if str(e).find('ERROR_FILE_NOT_FOUND') < 0:
raise
def test_hBaseRegRestoreKey(self):
dce, rpctransport, phKey = self.connect()
try:
resp = rrp.hBaseRegRestoreKey(dce, phKey, 'SOFTWARE\x00')
resp.dump()
except Exception, e:
if str(e).find('ERROR_FILE_NOT_FOUND') < 0:
raise
def test_BaseRegSaveKey(self):
dce, rpctransport, phKey = self.connect()
request = rrp.OpenCurrentUser()
request['ServerName'] = NULL
request['samDesired'] = MAXIMUM_ALLOWED
resp = dce.request(request)
resp.dump()
request = rrp.BaseRegSaveKey()
request['hKey'] = resp['phKey']
request['lpFile'] = 'BETUSFILE2\x00'
request['pSecurityAttributes'] = NULL
resp = dce.request(request)
resp.dump()
# I gotta remove the file now :s
smb = rpctransport.get_smb_connection()
smb.deleteFile('ADMIN$', 'System32\\BETUSFILE2')
def test_hBaseRegSaveKey(self):
dce, rpctransport, phKey = self.connect()
resp = rrp.hOpenCurrentUser(dce)
resp.dump()
resp = rrp.hBaseRegSaveKey(dce,resp['phKey'],'BETUSFILE2\x00')
resp.dump()
# I gotta remove the file now :s
smb = rpctransport.get_smb_connection()
smb.deleteFile('ADMIN$', 'System32\\BETUSFILE2')
def test_BaseRegGetVersion(self):
dce, rpctransport, phKey = self.connect()
request = rrp.BaseRegGetVersion()
request['hKey'] = phKey
resp = dce.request(request)
resp.dump()
def test_hBaseRegGetVersion(self):
dce, rpctransport, phKey = self.connect()
resp = rrp.hBaseRegGetVersion(dce, phKey)
resp.dump()
def test_OpenCurrentConfig(self):
dce, rpctransport, phKey = self.connect()
request = rrp.OpenCurrentConfig()
request['ServerName'] = NULL
request['samDesired'] = MAXIMUM_ALLOWED
resp = dce.request(request)
resp.dump()
def test_hOpenCurrentConfig(self):
dce, rpctransport, phKey = self.connect()
resp = rrp.hOpenCurrentConfig(dce)
resp.dump()
def test_BaseRegQueryMultipleValues(self):
dce, rpctransport, phKey = self.connect()
request = rrp.BaseRegOpenKey()
request['hKey'] = phKey
request['lpSubKey'] = 'SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\x00'
request['dwOptions'] = 0x00000001
request['samDesired'] = MAXIMUM_ALLOWED | rrp.KEY_QUERY_VALUE
resp = dce.request(request)
resp.dump()
request = rrp.BaseRegQueryMultipleValues()
item1 = rrp.RVALENT()
item1['ve_valuename'] = 'ProductName\x00'
item1['ve_valuelen'] = len('ProductName\x00')
item1['ve_valueptr'] = NULL
item1['ve_type'] = rrp.REG_SZ
item2 = rrp.RVALENT()
item2['ve_valuename'] = 'SystemRoot\x00'
item2['ve_valuelen'] = len('SystemRoot\x00')
item1['ve_valueptr'] = NULL
item2['ve_type'] = rrp.REG_SZ
item3 = rrp.RVALENT()
item3['ve_valuename'] = 'EditionID\x00'
item3['ve_valuelen'] = len('EditionID\x00')
item3['ve_valueptr'] = NULL
item3['ve_type'] = rrp.REG_SZ
request['hKey'] = resp['phkResult']
request['val_listIn'].append(item1)
request['val_listIn'].append(item2)
request['val_listIn'].append(item3)
request['num_vals'] = len(request['val_listIn'])
request['lpvalueBuf'] = list(' '*128)
request['ldwTotsize'] = 128
resp = dce.request(request)
resp.dump()
def test_hBaseRegQueryMultipleValues(self):
dce, rpctransport, phKey = self.connect()
resp = rrp.hBaseRegOpenKey(dce, phKey, 'SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\x00')
resp.dump()
valueIn = list()
item1 = {}
item1['ValueName'] = 'ProductName\x00'
item1['ValueType'] = rrp.REG_SZ
valueIn.append(item1)
item2 = {}
item2['ValueName'] = 'InstallDate\x00'
item2['ValueType'] = rrp.REG_DWORD
valueIn.append(item2)
item3 = {}
item3['ValueName'] = 'DigitalProductId\x00'
item3['ValueType'] = rrp.REG_BINARY
#valueIn.append(item3)
resp = rrp.hBaseRegQueryMultipleValues(dce, resp['phkResult'], valueIn)
#print resp
def test_BaseRegSaveKeyEx(self):
dce, rpctransport, phKey = self.connect()
request = rrp.OpenCurrentUser()
request['ServerName'] = NULL
request['samDesired'] = MAXIMUM_ALLOWED
resp = dce.request(request)
resp.dump()
request = rrp.BaseRegSaveKeyEx()
request['hKey'] = resp['phKey']
request['lpFile'] = 'BETUSFILE2\x00'
request['pSecurityAttributes'] = NULL
request['Flags'] = 4
resp = dce.request(request)
resp.dump()
# I gotta remove the file now :s
smb = rpctransport.get_smb_connection()
smb.deleteFile('ADMIN$', 'System32\\BETUSFILE2')
def test_hBaseRegSaveKeyEx(self):
dce, rpctransport, phKey = self.connect()
resp = rrp.hOpenCurrentUser(dce)
resp.dump()
resp = rrp.hBaseRegSaveKeyEx(dce, resp['phKey'], 'BETUSFILE2\x00')
resp.dump()
# I gotta remove the file now :s
smb = rpctransport.get_smb_connection()
smb.deleteFile('ADMIN$', 'System32\\BETUSFILE2')
def test_OpenPerformanceText(self):
dce, rpctransport, phKey = self.connect()
request = rrp.OpenPerformanceText()
request['ServerName'] = NULL
request['samDesired'] = MAXIMUM_ALLOWED
resp = dce.request(request)
resp.dump()
def test_hOpenPerformanceText(self):
dce, rpctransport, phKey = self.connect()
resp = rrp.hOpenPerformanceText(dce)
resp.dump()
def test_OpenPerformanceNlsText(self):
dce, rpctransport, phKey = self.connect()
request = rrp.OpenPerformanceNlsText()
request['ServerName'] = NULL
request['samDesired'] = MAXIMUM_ALLOWED
resp = dce.request(request)
resp.dump()
def test_hOpenPerformanceNlsText(self):
dce, rpctransport, phKey = self.connect()
resp = rrp.hOpenPerformanceNlsText(dce)
resp.dump()
def test_BaseRegQueryMultipleValues2(self):
dce, rpctransport, phKey = self.connect()
request = rrp.BaseRegOpenKey()
request['hKey'] = phKey
request['lpSubKey'] = 'SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\x00'
request['dwOptions'] = 0x00000001
request['samDesired'] = MAXIMUM_ALLOWED | rrp.KEY_QUERY_VALUE
resp = dce.request(request)
resp.dump()
request = rrp.BaseRegQueryMultipleValues2()
item1 = rrp.RVALENT()
item1['ve_valuename'] = 'ProductName\x00'
item1['ve_valuelen'] = len('ProductName\x00')
item1['ve_valueptr'] = NULL
item1['ve_type'] = rrp.REG_SZ
item2 = rrp.RVALENT()
item2['ve_valuename'] = 'SystemRoot\x00'
item2['ve_valuelen'] = len('SystemRoot\x00')
item1['ve_valueptr'] = NULL
item2['ve_type'] = rrp.REG_SZ
item3 = rrp.RVALENT()
item3['ve_valuename'] = 'EditionID\x00'
item3['ve_valuelen'] = len('EditionID\x00')
item3['ve_valueptr'] = NULL
item3['ve_type'] = rrp.REG_SZ
request['hKey'] = resp['phkResult']
request['val_listIn'].append(item1)
request['val_listIn'].append(item2)
request['val_listIn'].append(item3)
request['num_vals'] = len(request['val_listIn'])
request['lpvalueBuf'] = list(' '*128)
request['ldwTotsize'] = 128
resp = dce.request(request)
resp.dump()
def test_BaseRegDeleteKeyEx(self):
dce, rpctransport, phKey = self.connect()
request = rrp.OpenClassesRoot()
request['ServerName'] = NULL
request['samDesired'] = MAXIMUM_ALLOWED
resp = dce.request(request)
resp.dump()
regHandle = resp['phKey']
request = rrp.BaseRegCreateKey()
request['hKey'] = regHandle
request['lpSubKey'] = 'BETO\x00'
request['lpClass'] = NULL
request['dwOptions'] = 0x00000001
request['samDesired'] = MAXIMUM_ALLOWED
request['lpSecurityAttributes']['RpcSecurityDescriptor']['lpSecurityDescriptor'] = NULL
request['lpdwDisposition'] = rrp.REG_CREATED_NEW_KEY
resp = dce.request(request)
resp.dump()
phKey = resp['phkResult']
request = rrp.BaseRegDeleteKeyEx()
request['hKey'] = regHandle
request['lpSubKey'] = 'BETO\x00'
request['AccessMask'] = rrp.KEY_WOW64_32KEY
request['Reserved'] = 0
resp = dce.request(request)
resp.dump()
def test_BaseRegLoadKey_BaseRegUnLoadKey(self):
dce, rpctransport, phKey = self.connect()
request = rrp.BaseRegOpenKey()
request['hKey'] = phKey
request['lpSubKey'] = 'SECURITY\x00'
request['dwOptions'] = 0x00000001
request['samDesired'] = MAXIMUM_ALLOWED
resp = dce.request(request)
resp.dump()
request = rrp.BaseRegSaveKey()
request['hKey'] = resp['phkResult']
request['lpFile'] = 'SEC\x00'
request['pSecurityAttributes'] = NULL
resp = dce.request(request)
resp.dump()
request = rrp.BaseRegLoadKey()
request['hKey'] = phKey
request['lpSubKey'] = 'BETUS\x00'
request['lpFile'] = 'SEC\x00'
resp = dce.request(request)
resp.dump()
request = rrp.BaseRegUnLoadKey()
request['hKey'] = phKey
request['lpSubKey'] = 'BETUS\x00'
resp = dce.request(request)
resp.dump()
smb = rpctransport.get_smb_connection()
smb.deleteFile('ADMIN$', 'System32\\SEC')
def test_hBaseRegLoadKey_hBaseRegUnLoadKey(self):
dce, rpctransport, phKey = self.connect()
resp = rrp.hBaseRegOpenKey(dce,phKey, 'SECURITY\x00')
resp.dump()
request = rrp.BaseRegSaveKey()
request['hKey'] = resp['phkResult']
request['lpFile'] = 'SEC\x00'
request['pSecurityAttributes'] = NULL
resp = dce.request(request)
resp.dump()
resp = rrp.hBaseRegLoadKey(dce, phKey,'BETUS\x00', 'SEC\x00' )
resp.dump()
resp = rrp.hBaseRegUnLoadKey(dce, phKey, 'BETUS\x00')
resp.dump()
smb = rpctransport.get_smb_connection()
smb.deleteFile('ADMIN$', 'System32\\SEC')
class SMBTransport(RRPTests):
def setUp(self):
RRPTests.setUp(self)
configFile = ConfigParser.ConfigParser()
configFile.read('dcetests.cfg')
self.username = configFile.get('SMBTransport', 'username')
self.domain = configFile.get('SMBTransport', 'domain')
self.serverName = configFile.get('SMBTransport', 'servername')
self.password = configFile.get('SMBTransport', 'password')
self.machine = configFile.get('SMBTransport', 'machine')
self.hashes = configFile.get('SMBTransport', 'hashes')
self.stringBinding = r'ncacn_np:%s[\PIPE\winreg]' % self.machine
self.ts = ('8a885d04-1ceb-11c9-9fe8-08002b104860', '2.0')
class SMBTransport64(RRPTests):
def setUp(self):
RRPTests.setUp(self)
configFile = ConfigParser.ConfigParser()
configFile.read('dcetests.cfg')
self.username = configFile.get('SMBTransport', 'username')
self.domain = configFile.get('SMBTransport', 'domain')
self.serverName = configFile.get('SMBTransport', 'servername')
self.password = configFile.get('SMBTransport', 'password')
self.machine = configFile.get('SMBTransport', 'machine')
self.hashes = configFile.get('SMBTransport', 'hashes')
self.stringBinding = r'ncacn_np:%s[\PIPE\winreg]' % self.machine
self.ts = ('71710533-BEBA-4937-8319-B5DBEF9CCC36', '1.0')
class TCPTransport(RRPTests):
def setUp(self):
RRPTests.setUp(self)
configFile = ConfigParser.ConfigParser()
configFile.read('dcetests.cfg')
self.username = configFile.get('TCPTransport', 'username')
self.domain = configFile.get('TCPTransport', 'domain')
self.serverName = configFile.get('TCPTransport', 'servername')
self.password = configFile.get('TCPTransport', 'password')
self.machine = configFile.get('TCPTransport', 'machine')
self.hashes = configFile.get('TCPTransport', 'hashes')
self.stringBinding = epm.hept_map(self.machine, rrp.MSRPC_UUID_RRP, protocol = 'ncacn_ip_tcp')
# Process command-line arguments.
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
testcase = sys.argv[1]
suite = unittest.TestLoader().loadTestsFromTestCase(globals()[testcase])
else:
suite = unittest.TestLoader().loadTestsFromTestCase(SMBTransport)
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(SMBTransport64))
unittest.TextTestRunner(verbosity=1).run(suite)
| |
"""
Geckoboard decorators.
"""
import base64
from types import ListType, TupleType
from xml.dom.minidom import Document
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.4 fallback
from django.conf import settings
from django.http import HttpResponse, HttpResponseForbidden
from django.views.decorators.csrf import csrf_exempt
from django.utils.datastructures import SortedDict
from django.utils.decorators import available_attrs
from django.utils import simplejson
TEXT_NONE = 0
TEXT_INFO = 2
TEXT_WARN = 1
class WidgetDecorator(object):
"""
Geckoboard widget decorator.
The decorated view must return a data structure suitable for
serialization to XML or JSON for Geckoboard. See the Geckoboard
API docs or the source of extending classes for details.
If the ``GECKOBOARD_API_KEY`` setting is used, the request must
contain the correct API key, or a 403 Forbidden response is
returned.
"""
def __new__(cls, *args, **kwargs):
obj = object.__new__(cls)
obj.data = kwargs
try:
return obj(args[0])
except IndexError:
return obj
def __call__(self, view_func):
def _wrapped_view(request, *args, **kwargs):
if not _is_api_key_correct(request):
return HttpResponseForbidden("Geckoboard API key incorrect")
view_result = view_func(request, *args, **kwargs)
data = self._convert_view_result(view_result)
try:
self.data.update(data)
except ValueError:
self.data = data
content, content_type = _render(request, self.data)
return HttpResponse(content, content_type=content_type)
wrapper = wraps(view_func, assigned=available_attrs(view_func))
return csrf_exempt(wrapper(_wrapped_view))
def _convert_view_result(self, data):
# Extending classes do view result mangling here.
return data
widget = WidgetDecorator
class NumberWidgetDecorator(WidgetDecorator):
"""
Geckoboard Number widget decorator.
The decorated view must return a tuple `(current, [previous])`, where
`current` is the current value and `previous` is the previous value
of the measured quantity..
"""
def _convert_view_result(self, result):
if not isinstance(result, (tuple, list)):
result = [result]
result = list(result)
for k, v in enumerate(result):
result[k] = v if isinstance(v, dict) else {'value': v}
return {'item': result}
number_widget = NumberWidgetDecorator
class RAGWidgetDecorator(WidgetDecorator):
"""
Geckoboard Red-Amber-Green (RAG) widget decorator.
The decorated view must return a tuple with three tuples `(value,
[text])`. The `value` parameters are the numbers shown in red,
amber and green (in that order). The `text` parameters are optional
and will be displayed next to the respective values in the
dashboard.
"""
def _convert_view_result(self, result):
items = []
for elem in result:
if not isinstance(elem, (tuple, list)):
elem = [elem]
item = SortedDict()
if elem[0] is None:
item['value'] = ''
else:
item['value'] = elem[0]
if len(elem) > 1:
item['text'] = elem[1]
items.append(item)
return {'item': items}
rag_widget = RAGWidgetDecorator
class TextWidgetDecorator(WidgetDecorator):
"""
Geckoboard Text widget decorator.
The decorated view must return a list of tuples `(message, [type])`.
The `message` parameters are strings that will be shown in the
widget. The `type` parameters are optional and tell Geckoboard how
to annotate the messages. Use ``TEXT_INFO`` for informational
messages, ``TEXT_WARN`` for for warnings and ``TEXT_NONE`` for plain
text (the default).
"""
def _convert_view_result(self, result):
items = []
if not isinstance(result, (tuple, list)):
result = [result]
for elem in result:
if not isinstance(elem, (tuple, list)):
elem = [elem]
item = SortedDict()
item['text'] = elem[0]
if len(elem) > 1 and elem[1] is not None:
item['type'] = elem[1]
else:
item['type'] = TEXT_NONE
items.append(item)
return {'item': items}
text_widget = TextWidgetDecorator
class PieChartWidgetDecorator(WidgetDecorator):
"""
Geckoboard Pie chart decorator.
The decorated view must return a list of tuples `(value, label,
color)`. The color parameter is a string 'RRGGBB[TT]' representing
red, green, blue and optionally transparency.
"""
def _convert_view_result(self, result):
items = []
for elem in result:
if not isinstance(elem, (tuple, list)):
elem = [elem]
item = SortedDict()
item['value'] = elem[0]
if len(elem) > 1:
item['label'] = elem[1]
if len(elem) > 2:
item['colour'] = elem[2]
items.append(item)
return {'item': items}
pie_chart = PieChartWidgetDecorator
class LineChartWidgetDecorator(WidgetDecorator):
"""
Geckoboard Line chart decorator.
The decorated view must return a tuple `(values, x_axis, y_axis,
[color])`. The `values` parameter is a list of data points. The
`x-axis` parameter is a label string or a list of strings, that will
be placed on the X-axis. The `y-axis` parameter works similarly for
the Y-axis. If there are more than one axis label, they are placed
evenly along the axis. The optional `color` parameter is a string
``'RRGGBB[TT]'`` representing red, green, blue and optionally
transparency.
"""
def _convert_view_result(self, result):
data = SortedDict()
data['item'] = list(result[0])
data['settings'] = SortedDict()
if len(result) > 1:
x_axis = result[1]
if x_axis is None:
x_axis = ''
if not isinstance(x_axis, (tuple, list)):
x_axis = [x_axis]
data['settings']['axisx'] = x_axis
if len(result) > 2:
y_axis = result[2]
if y_axis is None:
y_axis = ''
if not isinstance(y_axis, (tuple, list)):
y_axis = [y_axis]
data['settings']['axisy'] = y_axis
if len(result) > 3:
data['settings']['colour'] = result[3]
return data
line_chart = LineChartWidgetDecorator
class GeckOMeterWidgetDecorator(WidgetDecorator):
"""
Geckoboard Geck-O-Meter decorator.
The decorated view must return a tuple `(value, min, max)`. The
`value` parameter represents the current value. The `min` and `max`
parameters represent the minimum and maximum value respectively.
They are either a value, or a tuple `(value, text)`. If used, the
`text` parameter will be displayed next to the minimum or maximum
value.
"""
def _convert_view_result(self, result):
value, min, max = result
data = SortedDict()
data['item'] = value
data['max'] = SortedDict()
data['min'] = SortedDict()
if not isinstance(max, (tuple, list)):
max = [max]
data['max']['value'] = max[0]
if len(max) > 1:
data['max']['text'] = max[1]
if not isinstance(min, (tuple, list)):
min = [min]
data['min']['value'] = min[0]
if len(min) > 1:
data['min']['text'] = min[1]
return data
geck_o_meter = GeckOMeterWidgetDecorator
class FunnelWidgetDecorator(WidgetDecorator):
"""
Geckoboard Funnel decorator.
The decorated view must return a dictionary with at least an `items`
entry: `{'items': [(100, '100 %'), (50, '50 %')]}`.
Optional keys are:
type: 'standard' (default) or 'reverse'. Determines the
order of the colours.
percentage: 'show' (default) or 'hide'. Determines whether or
not the percentage value is shown.
sort: `False` (default) or `True`. Sort the entries by
value or not.
"""
def _convert_view_result(self, result):
data = SortedDict()
items = result.get('items', [])
# sort the items in order if so desired
if result.get('sort'):
items.sort(reverse=True)
data["item"] = [dict(zip(("value","label"), item)) for item in items]
data["type"] = result.get('type', 'standard')
data["percentage"] = result.get('percentage','show')
return data
funnel = FunnelWidgetDecorator
class BulletWidgetDecorator(WidgetDecorator):
"""
See http://support.geckoboard.com/entries/274940-custom-chart-widget-type-definitions
for more information.
The decorated method must return a dictionary containing these keys:
Required keys:
label: Main label, eg. "Revenue 2011 YTD".
axis_points: Points on the axis, eg. [0, 200, 400, 600, 800, 1000].
current: Current value range, eg. 500 or [100, 500]. A singleton
500 is internally converted to [0, 500].
comparative: Comparative value, eg. 600.
Optional keys:
orientation: One of 'horizontal' or 'vertical'. Defaults to horizontal.
sublabel: Appears below main label.
red: Red start and end, eg. [0,100]. Defaults are calculated
from axis_points.
amber: Amber start and end, eg. [0,100]. Defaults are calculated
from axis_points.
green: Green start and end, eg. [0,100]. Defaults are calculated
from axis_points.
projected: Projected value range, eg. 900 or [100, 900]. A singleton
900 is internally converted to [0, 900].
auto_scale: If true then values will be scaled down if they
do not fit into Geckoboard's UI, eg. a value of 1100
is represented as 1.1. If scaling takes place the sublabel
is suffixed with that information. Default is true.
"""
def _convert_view_result(self, result):
# Check required keys. We do not do type checking since this level of
# competence is assumed.
for key in ('label', 'axis_points', 'current', 'comparative'):
if not result.has_key(key):
raise RuntimeError, "Key %s is required" % key
# Handle singleton current and projected
current = result['current']
projected = result.get('projected', None)
if not isinstance(current, (ListType, TupleType)):
current = [0, current]
if (projected is not None) and not isinstance(projected, (ListType,
TupleType)):
projected = [0, projected]
# If red, amber and green are not *all* supplied calculate defaults
axis_points = result['axis_points']
red = result.get('red', None)
amber = result.get('amber', None)
green = result.get('green', None)
if (red is None) or (amber is None) or (green is None):
if axis_points:
max_point = max(axis_points)
min_point = min(axis_points)
third = (max_point - min_point) / 3
red = (min_point, min_point + third - 1)
amber = (min_point + third, max_point - third - 1)
green = (max_point - third, max_point)
else:
red = amber = green = (0, 0)
# Scan axis points for largest value and scale to avoid overflow in
# Geckoboard's UI.
auto_scale = result.get('auto_scale', True)
if auto_scale and axis_points:
scale_label_map = {1000000000: 'billions', 1000000: 'millions',
1000: 'thousands'}
scale = 1
value = max(axis_points)
for n in (1000000000, 1000000, 1000):
if value >= n:
scale = n
break
# Little fixedpoint helper.
# todo: use a fixedpoint library
def scaler(value, scale):
return float('%.2f' % (value*1.0 / scale))
# Apply scale to all values
if scale > 1:
axis_points = [scaler(v, scale) for v in axis_points]
current = (scaler(current[0], scale), scaler(current[1], scale))
if projected is not None:
projected = (scaler(projected[0], scale),
scaler(projected[1], scale))
red = (scaler(red[0], scale), scaler(red[1], scale))
amber = (scaler(amber[0], scale), scaler(amber[1], scale))
green = (scaler(green[0], scale), scaler(green[1], scale))
result['comparative'] = scaler(result['comparative'], scale)
# Suffix sublabel
sublabel = result.get('sublabel', '')
if sublabel:
result['sublabel'] = '%s (%s)' % \
(sublabel, scale_label_map[scale])
else:
result['sublabel'] = scale_label_map[scale].capitalize()
# Assemble structure
data = dict(
orientation=result.get('orientation', 'horizontal'),
item=dict(
label=result['label'],
axis=dict(point=axis_points),
range=dict(
red=dict(start=red[0], end=red[1]),
amber=dict(start=amber[0], end=amber[1]),
green=dict(start=green[0], end=green[1])
),
measure=dict(current=dict(start=current[0], end=current[1])),
comparative=dict(point=result['comparative'])
)
)
# Add optional items
if result.has_key('sublabel'):
data['item']['sublabel'] = result['sublabel']
if projected is not None:
data['item']['measure']['projected'] = dict(start=projected[0],
end=projected[1])
return data
bullet = BulletWidgetDecorator
def _is_api_key_correct(request):
"""Return whether the Geckoboard API key on the request is correct."""
api_key = getattr(settings, 'GECKOBOARD_API_KEY', None)
if api_key is None:
return True
auth = request.META.get('HTTP_AUTHORIZATION', '').split()
if len(auth) == 2:
if auth[0].lower() == 'basic':
request_key = base64.b64decode(auth[1]).split(':')[0]
return request_key == api_key
return False
def _render(request, data):
"""Render the data to Geckoboard based on the format request parameter."""
format = request.POST.get('format', '')
if not format:
format = request.GET.get('format', '')
if format == '2':
return _render_json(data)
else:
return _render_xml(data)
def _render_json(data):
return simplejson.dumps(data), 'application/json'
def _render_xml(data):
doc = Document()
root = doc.createElement('root')
doc.appendChild(root)
_build_xml(doc, root, data)
return doc.toxml(), 'application/xml'
def _build_xml(doc, parent, data):
if isinstance(data, (tuple, list)):
_build_list_xml(doc, parent, data)
elif isinstance(data, dict):
_build_dict_xml(doc, parent, data)
else:
_build_str_xml(doc, parent, data)
def _build_str_xml(doc, parent, data):
parent.appendChild(doc.createTextNode(unicode(data)))
def _build_list_xml(doc, parent, data):
for item in data:
_build_xml(doc, parent, item)
def _build_dict_xml(doc, parent, data):
for tag, item in data.items():
if isinstance(item, (list, tuple)):
for subitem in item:
elem = doc.createElement(tag)
_build_xml(doc, elem, subitem)
parent.appendChild(elem)
else:
elem = doc.createElement(tag)
_build_xml(doc, elem, item)
parent.appendChild(elem)
class GeckoboardException(Exception):
"""
Represents an error with the Geckoboard decorators.
"""
| |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import unittest
from trac.core import TracError
from trac.resource import ResourceNotFound
from trac.test import EnvironmentStub, Mock, MockPerm, locale_en
from trac.ticket.model import Ticket
from trac.ticket.web_ui import TicketModule
from trac.util.datefmt import utc
from trac.web.api import RequestDone, _RequestArgs
from trac.web.chrome import Chrome
class TicketModuleTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub()
self.ticket_module = TicketModule(self.env)
def tearDown(self):
self.env.reset_db()
def _create_request(self, authname='anonymous', **kwargs):
kw = {'path_info': '/', 'perm': MockPerm(), 'args': _RequestArgs(),
'href': self.env.href, 'abs_href': self.env.abs_href,
'tz': utc, 'locale': None, 'lc_time': locale_en,
'session': {}, 'authname': authname,
'chrome': {'notices': [], 'warnings': []},
'method': None, 'get_header': lambda v: None, 'is_xhr': False,
'form_token': None}
if 'args' in kwargs:
kw['args'].update(kwargs.pop('args'))
kw.update(kwargs)
def redirect(url, permanent=False):
raise RequestDone
return Mock(add_redirect_listener=lambda x: [].append(x),
redirect=redirect, **kw)
def _create_ticket_with_change(self, old_props, new_props):
"""Create a ticket with `old_props` and apply properties
in `new_props`.
"""
t = Ticket(self.env)
t.populate(old_props)
t.insert()
t.populate(new_props)
t.save_changes('actor')
return t
def _insert_ticket(self, **kw):
"""Helper for inserting a ticket into the database"""
ticket = Ticket(self.env)
for k, v in kw.items():
ticket[k] = v
return ticket.insert()
def test_ticket_module_as_default_handler(self):
"""The New Ticket mainnav entry is active when TicketModule is the
`default_handler` and navigating to the base url. Test for regression
of http://trac.edgewall.org/ticket/8791.
"""
req = self._create_request()
chrome = Chrome(self.env).prepare_request(req, self.ticket_module)
name = None
for item in chrome['nav']['mainnav']:
if item['active'] is True:
name = item['name']
break
self.assertEqual('newticket', name)
def test_ticket_property_diff_owner_change(self):
"""Property diff message when ticket owner is changed."""
t = self._create_ticket_with_change({'owner': 'owner1'},
{'owner': 'owner2'})
req = self._create_request(args={'id': t.id})
data = self.ticket_module.process_request(req)[1]
field = data['changes'][0]['fields']['owner']
self.assertEqual("changed from <em>owner1</em> to <em>owner2</em>",
str(field['rendered']))
def test_ticket_property_diff_owner_add(self):
"""Property diff message when ticket owner is added."""
t = self._create_ticket_with_change({'owner': ''},
{'owner': 'owner2'})
req = self._create_request(args={'id': t.id})
data = self.ticket_module.process_request(req)[1]
field = data['changes'][0]['fields']['owner']
self.assertEqual("set to <em>owner2</em>", str(field['rendered']))
def test_ticket_property_diff_owner_remove(self):
"""Property diff message when ticket owner is removed."""
t = self._create_ticket_with_change({'owner': 'owner1'},
{'owner': ''})
req = self._create_request(args={'id': t.id})
data = self.ticket_module.process_request(req)[1]
field = data['changes'][0]['fields']['owner']
self.assertEqual("<em>owner1</em> deleted", str(field['rendered']))
def test_ticket_property_diff_reporter_change(self):
"""Property diff message when ticket reporter is changed."""
t = self._create_ticket_with_change({'reporter': 'reporter1'},
{'reporter': 'reporter2'})
req = self._create_request(args={'id': t.id})
data = self.ticket_module.process_request(req)[1]
field = data['changes'][0]['fields']['reporter']
self.assertEqual("changed from <em>reporter1</em> to "
"<em>reporter2</em>", str(field['rendered']))
def test_ticket_property_diff_reporter_add(self):
"""Property diff message when ticket reporter is added."""
t = self._create_ticket_with_change({'reporter': ''},
{'reporter': 'reporter2'})
req = self._create_request(args={'id': t.id})
data = self.ticket_module.process_request(req)[1]
field = data['changes'][0]['fields']['reporter']
self.assertEqual("set to <em>reporter2</em>", str(field['rendered']))
def test_ticket_property_diff_reporter_remove(self):
"""Property diff message when ticket reporter is removed."""
t = self._create_ticket_with_change({'reporter': 'reporter1'},
{'reporter': ''})
req = self._create_request(args={'id': t.id})
data = self.ticket_module.process_request(req)[1]
field = data['changes'][0]['fields']['reporter']
self.assertEqual("<em>reporter1</em> deleted", str(field['rendered']))
def _test_invalid_cnum_raises(self, action, cnum=None):
self._insert_ticket()
req = self._create_request(args={'action': action, 'id': '1'})
if cnum is not None:
req.args.update({'cnum': cnum})
self.assertRaises(TracError, self.ticket_module.process_request, req)
def test_comment_history_cnum_missing_raises(self):
self._test_invalid_cnum_raises('comment-history')
def test_comment_history_cnum_invalid_type_raises(self):
self._test_invalid_cnum_raises('comment-history', 'a')
def test_comment_history_cnum_empty_raises(self):
self._test_invalid_cnum_raises('comment-history', '')
def test_comment_history_cnum_out_of_range(self):
"""Out of range cnum returns an empty history."""
self._insert_ticket()
req = self._create_request(args={'action': 'comment-history',
'id': '1', 'cnum': '1'})
resp = self.ticket_module.process_request(req)
self.assertEqual([], resp[1]['history'])
def test_comment_diff_cnum_missing_raises(self):
self._test_invalid_cnum_raises('comment-diff')
def test_comment_diff_cnum_invalid_type_raises(self):
self._test_invalid_cnum_raises('comment-diff', 'a')
def test_comment_diff_cnum_empty_raises(self):
self._test_invalid_cnum_raises('comment-diff', '')
def test_comment_diff_cnum_out_of_range_raises(self):
self._insert_ticket()
req = self._create_request(args={'action': 'comment-diff',
'id': '1', 'cnum': '1'})
self.assertRaises(ResourceNotFound,
self.ticket_module.process_request, req)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TicketModuleTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| |
#
# Copyright (c) 2015, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
import unittest
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../lib'))
from systestlib import DutSystemTest
from testlib import random_string
VIRT_NULL = 'no ip virtual-router mac-address'
VIRT_ENTRY_A = 'ip virtual-router mac-address 00:11:22:33:44:55'
VIRT_ENTRY_B = 'ip virtual-router mac-address 00:11:22:33:44:56'
VIRT_ENTRY_C = 'ip virtual-router mac-address 00:11:22:33:44:57'
IP_CMD = 'ip virtual-router address'
class TestApiVarp(DutSystemTest):
def test_basic_get(self):
for dut in self.duts:
dut.config([VIRT_NULL])
response = dut.api('varp').get()
self.assertIsNotNone(response)
def test_get_with_value(self):
for dut in self.duts:
dut.config([VIRT_NULL, VIRT_ENTRY_A])
response = dut.api('varp').get()
self.assertIsNotNone(response)
self.assertEqual(response['mac_address'], '00:11:22:33:44:55')
def test_get_none(self):
for dut in self.duts:
dut.config([VIRT_NULL])
response = dut.api('varp').get()
self.assertIsNotNone(response)
self.assertEqual(response['mac_address'], None)
def test_set_mac_address_with_value(self):
for dut in self.duts:
dut.config([VIRT_NULL])
api = dut.api('varp')
self.assertNotIn(VIRT_ENTRY_A, api.config)
result = dut.api('varp').set_mac_address('00:11:22:33:44:55')
self.assertTrue(result)
self.assertIn(VIRT_ENTRY_A, api.config)
def test_change_mac_address(self):
for dut in self.duts:
dut.config([VIRT_NULL, VIRT_ENTRY_A])
api = dut.api('varp')
self.assertIn(VIRT_ENTRY_A, api.config)
result = dut.api('varp').set_mac_address('00:11:22:33:44:56')
self.assertTrue(result)
self.assertIn(VIRT_ENTRY_B, api.config)
def test_remove_mac_address(self):
for dut in self.duts:
dut.config([VIRT_NULL, VIRT_ENTRY_A])
api = dut.api('varp')
self.assertIn(VIRT_ENTRY_A, api.config)
result = dut.api('varp').set_mac_address(disable=True)
self.assertTrue(result)
self.assertNotIn(VIRT_ENTRY_A, api.config)
def test_set_mac_address_with_bad_value(self):
for dut in self.duts:
dut.config([VIRT_NULL])
api = dut.api('varp')
self.assertNotIn(VIRT_ENTRY_A, api.config)
with self.assertRaises(ValueError):
dut.api('varp').set_mac_address('0011.2233.4455')
class TestApiVarpInterfaces(DutSystemTest):
def test_set_virtual_addr_with_values_clean(self):
for dut in self.duts:
dut.config(['no interface Vlan1000', 'interface Vlan1000',
'ip address 1.1.1.1/24'])
api = dut.api('varp')
self.assertNotIn('ip virtual-router address 1.1.1.2',
api.get_block('interface Vlan1000'))
result = dut.api('varp').interfaces.set_addresses('Vlan1000',
['1.1.1.2',
'1.1.1.3'])
self.assertTrue(result)
self.assertIn('ip virtual-router address 1.1.1.2',
api.get_block('interface Vlan1000'))
self.assertIn('ip virtual-router address 1.1.1.3',
api.get_block('interface Vlan1000'))
def test_set_virtual_addr_with_values_dirty(self):
for dut in self.duts:
dut.config(['no interface Vlan1000', 'interface Vlan1000',
'ip address 1.1.1.1/24',
'ip virtual-router address 1.1.1.20'])
api = dut.api('varp')
self.assertIn('ip virtual-router address 1.1.1.20',
api.get_block('interface Vlan1000'))
result = dut.api('varp').interfaces.set_addresses('Vlan1000',
['1.1.1.2',
'1.1.1.3'])
self.assertTrue(result)
self.assertIn('ip virtual-router address 1.1.1.2',
api.get_block('interface Vlan1000'))
self.assertIn('ip virtual-router address 1.1.1.3',
api.get_block('interface Vlan1000'))
self.assertNotIn('ip virtual-router address 1.1.1.20',
api.get_block('interface Vlan1000'))
def test_default_virtual_addrs(self):
for dut in self.duts:
dut.config(['no interface Vlan1000', 'interface Vlan1000',
'ip address 1.1.1.1/24',
'ip virtual-router address 1.1.1.20',
'ip virtual-router address 1.1.1.21'])
api = dut.api('varp')
self.assertIn('ip virtual-router address 1.1.1.20',
api.get_block('interface Vlan1000'))
self.assertIn('ip virtual-router address 1.1.1.21',
api.get_block('interface Vlan1000'))
result = dut.api('varp').interfaces.set_addresses('Vlan1000',
default=True)
self.assertTrue(result)
self.assertNotIn('ip virtual-router address 1.1.1.20',
api.get_block('interface Vlan1000'))
self.assertNotIn('ip virtual-router address 1.1.1.21',
api.get_block('interface Vlan1000'))
def test_negate_virtual_addrs(self):
for dut in self.duts:
dut.config(['no interface Vlan1000', 'interface Vlan1000',
'ip address 1.1.1.1/24',
'ip virtual-router address 1.1.1.20',
'ip virtual-router address 1.1.1.21'])
api = dut.api('varp')
self.assertIn('ip virtual-router address 1.1.1.20',
api.get_block('interface Vlan1000'))
self.assertIn('ip virtual-router address 1.1.1.21',
api.get_block('interface Vlan1000'))
result = dut.api('varp').interfaces.set_addresses('Vlan1000',
addresses=None)
self.assertTrue(result)
self.assertNotIn('ip virtual-router address 1.1.1.20',
api.get_block('interface Vlan1000'))
self.assertNotIn('ip virtual-router address 1.1.1.21',
api.get_block('interface Vlan1000'))
def test_negate_virtual_addrs_with_disable(self):
for dut in self.duts:
dut.config(['no interface Vlan1000', 'interface Vlan1000',
'ip address 1.1.1.1/24',
'ip virtual-router address 1.1.1.20',
'ip virtual-router address 1.1.1.21'])
api = dut.api('varp')
self.assertIn('ip virtual-router address 1.1.1.20',
api.get_block('interface Vlan1000'))
self.assertIn('ip virtual-router address 1.1.1.21',
api.get_block('interface Vlan1000'))
result = dut.api('varp').interfaces.set_addresses('Vlan1000',
disable=True)
self.assertTrue(result)
self.assertNotIn('ip virtual-router address 1.1.1.20',
api.get_block('interface Vlan1000'))
self.assertNotIn('ip virtual-router address 1.1.1.21',
api.get_block('interface Vlan1000'))
def test_empty_list_virtual_addrs(self):
for dut in self.duts:
dut.config(['no interface Vlan1000', 'interface Vlan1000',
'ip address 1.1.1.1/24',
'ip virtual-router address 1.1.1.20',
'ip virtual-router address 1.1.1.21'])
api = dut.api('varp')
self.assertIn('ip virtual-router address 1.1.1.20',
api.get_block('interface Vlan1000'))
self.assertIn('ip virtual-router address 1.1.1.21',
api.get_block('interface Vlan1000'))
result = dut.api('varp').interfaces.set_addresses('Vlan1000',
addresses=[])
self.assertTrue(result)
self.assertNotIn('ip virtual-router address 1.1.1.20',
api.get_block('interface Vlan1000'))
self.assertNotIn('ip virtual-router address 1.1.1.21',
api.get_block('interface Vlan1000'))
def test_no_attr_virtual_addrs(self):
for dut in self.duts:
dut.config(['no interface Vlan1000', 'interface Vlan1000',
'ip address 1.1.1.1/24',
'ip virtual-router address 1.1.1.20',
'ip virtual-router address 1.1.1.21'])
api = dut.api('varp')
self.assertIn('ip virtual-router address 1.1.1.20',
api.get_block('interface Vlan1000'))
self.assertIn('ip virtual-router address 1.1.1.21',
api.get_block('interface Vlan1000'))
result = dut.api('varp').interfaces.set_addresses('Vlan1000')
self.assertTrue(result)
self.assertNotIn('ip virtual-router address 1.1.1.20',
api.get_block('interface Vlan1000'))
self.assertNotIn('ip virtual-router address 1.1.1.21',
api.get_block('interface Vlan1000'))
if __name__ == '__main__':
unittest.main()
| |
"""
Component to interface with cameras.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/camera/
"""
import asyncio
import base64
import collections
from contextlib import suppress
from datetime import timedelta
import logging
import hashlib
from random import SystemRandom
import attr
from aiohttp import web
import async_timeout
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.const import ATTR_ENTITY_ID, SERVICE_TURN_OFF, \
SERVICE_TURN_ON
from homeassistant.exceptions import HomeAssistantError
from homeassistant.loader import bind_hass
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
from homeassistant.components.http import HomeAssistantView, KEY_AUTHENTICATED
from homeassistant.components import websocket_api
import homeassistant.helpers.config_validation as cv
DOMAIN = 'camera'
DEPENDENCIES = ['http']
_LOGGER = logging.getLogger(__name__)
SERVICE_ENABLE_MOTION = 'enable_motion_detection'
SERVICE_DISABLE_MOTION = 'disable_motion_detection'
SERVICE_SNAPSHOT = 'snapshot'
SCAN_INTERVAL = timedelta(seconds=30)
ENTITY_ID_FORMAT = DOMAIN + '.{}'
ATTR_FILENAME = 'filename'
STATE_RECORDING = 'recording'
STATE_STREAMING = 'streaming'
STATE_IDLE = 'idle'
# Bitfield of features supported by the camera entity
SUPPORT_ON_OFF = 1
DEFAULT_CONTENT_TYPE = 'image/jpeg'
ENTITY_IMAGE_URL = '/api/camera_proxy/{0}?token={1}'
TOKEN_CHANGE_INTERVAL = timedelta(minutes=5)
_RND = SystemRandom()
FALLBACK_STREAM_INTERVAL = 1 # seconds
MIN_STREAM_INTERVAL = 0.5 # seconds
CAMERA_SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.comp_entity_ids,
})
CAMERA_SERVICE_SNAPSHOT = CAMERA_SERVICE_SCHEMA.extend({
vol.Required(ATTR_FILENAME): cv.template
})
WS_TYPE_CAMERA_THUMBNAIL = 'camera_thumbnail'
SCHEMA_WS_CAMERA_THUMBNAIL = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend({
vol.Required('type'): WS_TYPE_CAMERA_THUMBNAIL,
vol.Required('entity_id'): cv.entity_id
})
@attr.s
class Image:
"""Represent an image."""
content_type = attr.ib(type=str)
content = attr.ib(type=bytes)
@bind_hass
async def async_get_image(hass, entity_id, timeout=10):
"""Fetch an image from a camera entity."""
camera = _get_camera_from_entity_id(hass, entity_id)
with suppress(asyncio.CancelledError, asyncio.TimeoutError):
with async_timeout.timeout(timeout, loop=hass.loop):
image = await camera.async_camera_image()
if image:
return Image(camera.content_type, image)
raise HomeAssistantError('Unable to get image')
@bind_hass
async def async_get_mjpeg_stream(hass, request, entity_id):
"""Fetch an mjpeg stream from a camera entity."""
camera = _get_camera_from_entity_id(hass, entity_id)
return await camera.handle_async_mjpeg_stream(request)
async def async_get_still_stream(request, image_cb, content_type, interval):
"""Generate an HTTP MJPEG stream from camera images.
This method must be run in the event loop.
"""
response = web.StreamResponse()
response.content_type = ('multipart/x-mixed-replace; '
'boundary=--frameboundary')
await response.prepare(request)
async def write_to_mjpeg_stream(img_bytes):
"""Write image to stream."""
await response.write(bytes(
'--frameboundary\r\n'
'Content-Type: {}\r\n'
'Content-Length: {}\r\n\r\n'.format(
content_type, len(img_bytes)),
'utf-8') + img_bytes + b'\r\n')
last_image = None
while True:
img_bytes = await image_cb()
if not img_bytes:
break
if img_bytes != last_image:
await write_to_mjpeg_stream(img_bytes)
# Chrome seems to always ignore first picture,
# print it twice.
if last_image is None:
await write_to_mjpeg_stream(img_bytes)
last_image = img_bytes
await asyncio.sleep(interval)
return response
def _get_camera_from_entity_id(hass, entity_id):
"""Get camera component from entity_id."""
component = hass.data.get(DOMAIN)
if component is None:
raise HomeAssistantError('Camera component not set up')
camera = component.get_entity(entity_id)
if camera is None:
raise HomeAssistantError('Camera not found')
if not camera.is_on:
raise HomeAssistantError('Camera is off')
return camera
async def async_setup(hass, config):
"""Set up the camera component."""
component = hass.data[DOMAIN] = \
EntityComponent(_LOGGER, DOMAIN, hass, SCAN_INTERVAL)
hass.http.register_view(CameraImageView(component))
hass.http.register_view(CameraMjpegStream(component))
hass.components.websocket_api.async_register_command(
WS_TYPE_CAMERA_THUMBNAIL, websocket_camera_thumbnail,
SCHEMA_WS_CAMERA_THUMBNAIL
)
await component.async_setup(config)
@callback
def update_tokens(time):
"""Update tokens of the entities."""
for entity in component.entities:
entity.async_update_token()
hass.async_create_task(entity.async_update_ha_state())
hass.helpers.event.async_track_time_interval(
update_tokens, TOKEN_CHANGE_INTERVAL)
component.async_register_entity_service(
SERVICE_ENABLE_MOTION, CAMERA_SERVICE_SCHEMA,
'async_enable_motion_detection'
)
component.async_register_entity_service(
SERVICE_DISABLE_MOTION, CAMERA_SERVICE_SCHEMA,
'async_disable_motion_detection'
)
component.async_register_entity_service(
SERVICE_TURN_OFF, CAMERA_SERVICE_SCHEMA,
'async_turn_off'
)
component.async_register_entity_service(
SERVICE_TURN_ON, CAMERA_SERVICE_SCHEMA,
'async_turn_on'
)
component.async_register_entity_service(
SERVICE_SNAPSHOT, CAMERA_SERVICE_SNAPSHOT,
async_handle_snapshot_service
)
return True
async def async_setup_entry(hass, entry):
"""Set up a config entry."""
return await hass.data[DOMAIN].async_setup_entry(entry)
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
return await hass.data[DOMAIN].async_unload_entry(entry)
class Camera(Entity):
"""The base class for camera entities."""
def __init__(self):
"""Initialize a camera."""
self.is_streaming = False
self.content_type = DEFAULT_CONTENT_TYPE
self.access_tokens = collections.deque([], 2)
self.async_update_token()
@property
def should_poll(self):
"""No need to poll cameras."""
return False
@property
def entity_picture(self):
"""Return a link to the camera feed as entity picture."""
return ENTITY_IMAGE_URL.format(self.entity_id, self.access_tokens[-1])
@property
def supported_features(self):
"""Flag supported features."""
return 0
@property
def is_recording(self):
"""Return true if the device is recording."""
return False
@property
def brand(self):
"""Return the camera brand."""
return None
@property
def motion_detection_enabled(self):
"""Return the camera motion detection status."""
return None
@property
def model(self):
"""Return the camera model."""
return None
@property
def frame_interval(self):
"""Return the interval between frames of the mjpeg stream."""
return 0.5
def camera_image(self):
"""Return bytes of camera image."""
raise NotImplementedError()
@callback
def async_camera_image(self):
"""Return bytes of camera image.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(self.camera_image)
async def handle_async_still_stream(self, request, interval):
"""Generate an HTTP MJPEG stream from camera images.
This method must be run in the event loop.
"""
return await async_get_still_stream(request, self.async_camera_image,
self.content_type, interval)
async def handle_async_mjpeg_stream(self, request):
"""Serve an HTTP MJPEG stream from the camera.
This method can be overridden by camera plaforms to proxy
a direct stream from the camera.
This method must be run in the event loop.
"""
return await self.handle_async_still_stream(
request, self.frame_interval)
@property
def state(self):
"""Return the camera state."""
if self.is_recording:
return STATE_RECORDING
if self.is_streaming:
return STATE_STREAMING
return STATE_IDLE
@property
def is_on(self):
"""Return true if on."""
return True
def turn_off(self):
"""Turn off camera."""
raise NotImplementedError()
@callback
def async_turn_off(self):
"""Turn off camera."""
return self.hass.async_add_job(self.turn_off)
def turn_on(self):
"""Turn off camera."""
raise NotImplementedError()
@callback
def async_turn_on(self):
"""Turn off camera."""
return self.hass.async_add_job(self.turn_on)
def enable_motion_detection(self):
"""Enable motion detection in the camera."""
raise NotImplementedError()
@callback
def async_enable_motion_detection(self):
"""Call the job and enable motion detection."""
return self.hass.async_add_job(self.enable_motion_detection)
def disable_motion_detection(self):
"""Disable motion detection in camera."""
raise NotImplementedError()
@callback
def async_disable_motion_detection(self):
"""Call the job and disable motion detection."""
return self.hass.async_add_job(self.disable_motion_detection)
@property
def state_attributes(self):
"""Return the camera state attributes."""
attrs = {
'access_token': self.access_tokens[-1],
}
if self.model:
attrs['model_name'] = self.model
if self.brand:
attrs['brand'] = self.brand
if self.motion_detection_enabled:
attrs['motion_detection'] = self.motion_detection_enabled
return attrs
@callback
def async_update_token(self):
"""Update the used token."""
self.access_tokens.append(
hashlib.sha256(
_RND.getrandbits(256).to_bytes(32, 'little')).hexdigest())
class CameraView(HomeAssistantView):
"""Base CameraView."""
requires_auth = False
def __init__(self, component):
"""Initialize a basic camera view."""
self.component = component
async def get(self, request, entity_id):
"""Start a GET request."""
camera = self.component.get_entity(entity_id)
if camera is None:
raise web.HTTPNotFound()
authenticated = (request[KEY_AUTHENTICATED] or
request.query.get('token') in camera.access_tokens)
if not authenticated:
raise web.HTTPUnauthorized()
if not camera.is_on:
_LOGGER.debug('Camera is off.')
raise web.HTTPServiceUnavailable()
return await self.handle(request, camera)
async def handle(self, request, camera):
"""Handle the camera request."""
raise NotImplementedError()
class CameraImageView(CameraView):
"""Camera view to serve an image."""
url = '/api/camera_proxy/{entity_id}'
name = 'api:camera:image'
async def handle(self, request, camera):
"""Serve camera image."""
with suppress(asyncio.CancelledError, asyncio.TimeoutError):
with async_timeout.timeout(10, loop=request.app['hass'].loop):
image = await camera.async_camera_image()
if image:
return web.Response(body=image,
content_type=camera.content_type)
raise web.HTTPInternalServerError()
class CameraMjpegStream(CameraView):
"""Camera View to serve an MJPEG stream."""
url = '/api/camera_proxy_stream/{entity_id}'
name = 'api:camera:stream'
async def handle(self, request, camera):
"""Serve camera stream, possibly with interval."""
interval = request.query.get('interval')
if interval is None:
return await camera.handle_async_mjpeg_stream(request)
try:
# Compose camera stream from stills
interval = float(request.query.get('interval'))
if interval < MIN_STREAM_INTERVAL:
raise ValueError("Stream interval must be be > {}"
.format(MIN_STREAM_INTERVAL))
return await camera.handle_async_still_stream(request, interval)
except ValueError:
raise web.HTTPBadRequest()
@websocket_api.async_response
async def websocket_camera_thumbnail(hass, connection, msg):
"""Handle get camera thumbnail websocket command.
Async friendly.
"""
try:
image = await async_get_image(hass, msg['entity_id'])
connection.send_message(websocket_api.result_message(
msg['id'], {
'content_type': image.content_type,
'content': base64.b64encode(image.content).decode('utf-8')
}
))
except HomeAssistantError:
connection.send_message(websocket_api.error_message(
msg['id'], 'image_fetch_failed', 'Unable to fetch image'))
async def async_handle_snapshot_service(camera, service):
"""Handle snapshot services calls."""
hass = camera.hass
filename = service.data[ATTR_FILENAME]
filename.hass = hass
snapshot_file = filename.async_render(
variables={ATTR_ENTITY_ID: camera})
# check if we allow to access to that file
if not hass.config.is_allowed_path(snapshot_file):
_LOGGER.error(
"Can't write %s, no access to path!", snapshot_file)
return
image = await camera.async_camera_image()
def _write_image(to_file, image_data):
"""Executor helper to write image."""
with open(to_file, 'wb') as img_file:
img_file.write(image_data)
try:
await hass.async_add_executor_job(
_write_image, snapshot_file, image)
except OSError as err:
_LOGGER.error("Can't write image to file: %s", err)
| |
#!/usr/bin/env python
#
# Copyright 2006 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Calculates Javascript dependencies without requiring Google3.
It iterates over a number of search paths and builds a dependency tree. With
the inputs provided, it walks the dependency tree and outputs all the files
required for compilation.\n
"""
try:
import distutils.version
except ImportError:
# distutils is not available in all environments
distutils = None
import logging
import optparse
import os
import re
import subprocess
import sys
req_regex = re.compile('goog\.require\s*\(\s*[\'\"]([^\)]+)[\'\"]\s*\)')
prov_regex = re.compile('goog\.provide\s*\(\s*[\'\"]([^\)]+)[\'\"]\s*\)')
ns_regex = re.compile('^ns:((\w+\.)*(\w+))$')
version_regex = re.compile('[\.0-9]+')
def IsValidFile(ref):
"""Returns true if the provided reference is a file and exists."""
return os.path.isfile(ref)
def IsJsFile(ref):
"""Returns true if the provided reference is a Javascript file."""
return ref.endswith('.js')
def IsNamespace(ref):
"""Returns true if the provided reference is a namespace."""
return re.match(ns_regex, ref) is not None
def IsDirectory(ref):
"""Returns true if the provided reference is a directory."""
return os.path.isdir(ref)
def ExpandDirectories(refs):
"""Expands any directory references into inputs.
Description:
Looks for any directories in the provided references. Found directories
are recursively searched for .js files, which are then added to the result
list.
Args:
refs: a list of references such as files, directories, and namespaces
Returns:
A list of references with directories removed and replaced by any
.js files that are found in them. Also, the paths will be normalized.
"""
result = []
for ref in refs:
if IsDirectory(ref):
# Disable 'Unused variable' for subdirs
# pylint: disable-msg=W0612
for (directory, subdirs, filenames) in os.walk(ref):
for filename in filenames:
if IsJsFile(filename):
result.append(os.path.join(directory, filename))
else:
result.append(ref)
return map(os.path.normpath, result)
class DependencyInfo(object):
"""Represents a dependency that is used to build and walk a tree."""
def __init__(self, filename):
self.filename = filename
self.provides = []
self.requires = []
def __str__(self):
return '%s Provides: %s Requires: %s' % (self.filename,
repr(self.provides),
repr(self.requires))
def BuildDependenciesFromFiles(files):
"""Build a list of dependencies from a list of files.
Description:
Takes a list of files, extracts their provides and requires, and builds
out a list of dependency objects.
Args:
files: a list of files to be parsed for goog.provides and goog.requires.
Returns:
A list of dependency objects, one for each file in the files argument.
"""
result = []
filenames = set()
for filename in files:
if filename in filenames:
continue
# Python 3 requires the file encoding to be specified
if (sys.version_info[0] < 3):
file_handle = open(filename, 'r')
else:
file_handle = open(filename, 'r', encoding='utf8')
dep = DependencyInfo(filename)
try:
for line in file_handle:
if re.match(req_regex, line):
dep.requires.append(re.search(req_regex, line).group(1))
if re.match(prov_regex, line):
dep.provides.append(re.search(prov_regex, line).group(1))
finally:
file_handle.close()
result.append(dep)
filenames.add(filename)
return result
def BuildDependencyHashFromDependencies(deps):
"""Builds a hash for searching dependencies by the namespaces they provide.
Description:
Dependency objects can provide multiple namespaces. This method enumerates
the provides of each dependency and adds them to a hash that can be used
to easily resolve a given dependency by a namespace it provides.
Args:
deps: a list of dependency objects used to build the hash.
Raises:
Exception: If a multiple files try to provide the same namepace.
Returns:
A hash table { namespace: dependency } that can be used to resolve a
dependency by a namespace it provides.
"""
dep_hash = {}
for dep in deps:
for provide in dep.provides:
if provide in dep_hash:
raise Exception('Duplicate provide (%s) in (%s, %s)' % (
provide,
dep_hash[provide].filename,
dep.filename))
dep_hash[provide] = dep
return dep_hash
def CalculateDependencies(paths, inputs):
"""Calculates the dependencies for given inputs.
Description:
This method takes a list of paths (files, directories) and builds a
searchable data structure based on the namespaces that each .js file
provides. It then parses through each input, resolving dependencies
against this data structure. The final output is a list of files,
including the inputs, that represent all of the code that is needed to
compile the given inputs.
Args:
paths: the references (files, directories) that are used to build the
dependency hash.
inputs: the inputs (files, directories, namespaces) that have dependencies
that need to be calculated.
Raises:
Exception: if a provided input is invalid.
Returns:
A list of all files, including inputs, that are needed to compile the given
inputs.
"""
deps = BuildDependenciesFromFiles(paths + inputs)
search_hash = BuildDependencyHashFromDependencies(deps)
result_list = []
seen_list = []
for input_file in inputs:
if IsNamespace(input_file):
namespace = re.search(ns_regex, input_file).group(1)
if namespace not in search_hash:
raise Exception('Invalid namespace (%s)' % namespace)
input_file = search_hash[namespace].filename
if not IsValidFile(input_file) or not IsJsFile(input_file):
raise Exception('Invalid file (%s)' % input_file)
seen_list.append(input_file)
file_handle = open(input_file, 'r')
try:
for line in file_handle:
if re.match(req_regex, line):
require = re.search(req_regex, line).group(1)
ResolveDependencies(require, search_hash, result_list, seen_list)
finally:
file_handle.close()
result_list.append(input_file)
# All files depend on base.js, so put it first.
base_js_path = FindClosureBasePath(paths)
if base_js_path:
result_list.insert(0, base_js_path)
else:
logging.warning('Closure Library base.js not found.')
return result_list
def FindClosureBasePath(paths):
"""Given a list of file paths, return Closure base.js path, if any.
Args:
paths: A list of paths.
Returns:
The path to Closure's base.js file including filename, if found.
"""
for path in paths:
pathname, filename = os.path.split(path)
if filename == 'base.js':
f = open(path)
is_base = False
# Sanity check that this is the Closure base file. Check that this
# is where goog is defined.
for line in f:
if line.startswith('var goog = goog || {};'):
is_base = True
break
f.close()
if is_base:
return path
def ResolveDependencies(require, search_hash, result_list, seen_list):
"""Takes a given requirement and resolves all of the dependencies for it.
Description:
A given requirement may require other dependencies. This method
recursively resolves all dependencies for the given requirement.
Raises:
Exception: when require does not exist in the search_hash.
Args:
require: the namespace to resolve dependencies for.
search_hash: the data structure used for resolving dependencies.
result_list: a list of filenames that have been calculated as dependencies.
This variable is the output for this function.
seen_list: a list of filenames that have been 'seen'. This is required
for the dependency->dependant ordering.
"""
if require not in search_hash:
raise Exception('Missing provider for (%s)' % require)
dep = search_hash[require]
if not dep.filename in seen_list:
seen_list.append(dep.filename)
for sub_require in dep.requires:
ResolveDependencies(sub_require, search_hash, result_list, seen_list)
result_list.append(dep.filename)
def GetDepsLine(dep, base_path):
"""Returns a JS string for a dependency statement in the deps.js file.
Args:
dep: The dependency that we're printing.
base_path: The path to Closure's base.js including filename.
"""
return 'goog.addDependency("%s", %s, %s);' % (
GetRelpath(dep.filename, base_path), dep.provides, dep.requires)
def GetRelpath(path, start):
"""Return a relative path to |path| from |start|."""
# NOTE: Python 2.6 provides os.path.relpath, which has almost the same
# functionality as this function. Since we want to support 2.4, we have
# to implement it manually. :(
path_list = os.path.abspath(os.path.normpath(path)).split(os.sep)
start_list = os.path.abspath(
os.path.normpath(os.path.dirname(start))).split(os.sep)
common_prefix_count = 0
for i in range(0, min(len(path_list), len(start_list))):
if path_list[i] != start_list[i]:
break
common_prefix_count += 1
# Always use forward slashes, because this will get expanded to a url,
# not a file path.
return '/'.join(['..'] * (len(start_list) - common_prefix_count) +
path_list[common_prefix_count:])
def PrintLine(msg, out):
out.write(msg)
out.write('\n')
def PrintDeps(source_paths, deps, out):
"""Print out a deps.js file from a list of source paths.
Args:
source_paths: Paths that we should generate dependency info for.
deps: Paths that provide dependency info. Their dependency info should
not appear in the deps file.
out: The output file.
Returns:
True on success, false if it was unable to find the base path
to generate deps relative to.
"""
base_path = FindClosureBasePath(source_paths + deps)
if not base_path:
return False
PrintLine('// This file was autogenerated by calcdeps.py', out)
excludesSet = set(deps)
for dep in BuildDependenciesFromFiles(source_paths + deps):
if not dep.filename in excludesSet:
PrintLine(GetDepsLine(dep, base_path), out)
return True
def PrintScript(source_paths, out):
for index, dep in enumerate(source_paths):
PrintLine('// Input %d' % index, out)
f = open(dep, 'r')
PrintLine(f.read(), out)
f.close()
def GetJavaVersion():
"""Returns the string for the current version of Java installed."""
proc = subprocess.Popen(['java', '-version'], stderr=subprocess.PIPE)
proc.wait()
version_line = proc.stderr.read().splitlines()[0]
return version_regex.search(version_line).group()
def FilterByExcludes(options, files):
"""Filters the given files by the exlusions specified at the command line.
Args:
options: The flags to calcdeps.
files: The files to filter.
Returns:
A list of files.
"""
excludes = []
if options.excludes:
excludes = ExpandDirectories(options.excludes)
excludesSet = set(excludes)
return [i for i in files if not i in excludesSet]
def GetPathsFromOptions(options):
"""Generates the path files from flag options.
Args:
options: The flags to calcdeps.
Returns:
A list of files in the specified paths. (strings).
"""
search_paths = options.paths
if not search_paths:
search_paths = ['.'] # Add default folder if no path is specified.
search_paths = ExpandDirectories(search_paths)
return FilterByExcludes(options, search_paths)
def GetInputsFromOptions(options):
"""Generates the inputs from flag options.
Args:
options: The flags to calcdeps.
Returns:
A list of inputs (strings).
"""
inputs = options.inputs
if not inputs: # Parse stdin
logging.info('No inputs specified. Reading from stdin...')
inputs = filter(None, [line.strip('\n') for line in sys.stdin.readlines()])
logging.info('Scanning files...')
inputs = ExpandDirectories(inputs)
return FilterByExcludes(options, inputs)
def Compile(compiler_jar_path, source_paths, out, flags=None):
"""Prepares command-line call to Closure compiler.
Args:
compiler_jar_path: Path to the Closure compiler .jar file.
source_paths: Source paths to build, in order.
flags: A list of additional flags to pass on to Closure compiler.
"""
args = ['java', '-jar', compiler_jar_path]
for path in source_paths:
args += ['--js', path]
if flags:
args += flags
logging.info('Compiling with the following command: %s', ' '.join(args))
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
(stdoutdata, stderrdata) = proc.communicate()
if proc.returncode != 0:
logging.error('JavaScript compilation failed.')
sys.exit(1)
else:
out.write(stdoutdata)
def main():
"""The entrypoint for this script."""
logging.basicConfig(format='calcdeps.py: %(message)s', level=logging.INFO)
usage = 'usage: %prog [options] arg'
parser = optparse.OptionParser(usage)
parser.add_option('-i',
'--input',
dest='inputs',
action='append',
help='The inputs to calculate dependencies for. Valid '
'values can be files, directories, or namespaces '
'(ns:goog.net.XhrLite). Only relevant to "list" and '
'"script" output.')
parser.add_option('-p',
'--path',
dest='paths',
action='append',
help='The paths that should be traversed to build the '
'dependencies.')
parser.add_option('-d',
'--dep',
dest='deps',
action='append',
help='Directories or files that should be traversed to '
'find required dependencies for the deps file. '
'Does not generate dependency information for names '
'provided by these files. Only useful in "deps" mode.')
parser.add_option('-e',
'--exclude',
dest='excludes',
action='append',
help='Files or directories to exclude from the --path '
'and --input flags')
parser.add_option('-o',
'--output_mode',
dest='output_mode',
action='store',
default='list',
help='The type of output to generate from this script. '
'Options are "list" for a list of filenames, "script" '
'for a single script containing the contents of all the '
'file, "deps" to generate a deps.js file for all '
'paths, or "compiled" to produce compiled output with '
'the Closure compiler.')
parser.add_option('-c',
'--compiler_jar',
dest='compiler_jar',
action='store',
help='The location of the Closure compiler .jar file.')
parser.add_option('-f',
'--compiler_flag',
'--compiler_flags', # for backwards compatability
dest='compiler_flags',
action='append',
help='Additional flag to pass to the Closure compiler. '
'May be specified multiple times to pass multiple flags.')
parser.add_option('--output_file',
dest='output_file',
action='store',
help=('If specified, write output to this path instead of '
'writing to standard output.'))
(options, args) = parser.parse_args()
search_paths = GetPathsFromOptions(options)
if options.output_file:
out = open(options.output_file, 'w')
else:
out = sys.stdout
if options.output_mode == 'deps':
result = PrintDeps(search_paths, ExpandDirectories(options.deps or []), out)
if not result:
logging.error('Could not find Closure Library in the specified paths')
sys.exit(1)
return
inputs = GetInputsFromOptions(options)
logging.info('Finding Closure dependencies...')
deps = CalculateDependencies(search_paths, inputs)
output_mode = options.output_mode
if output_mode == 'script':
PrintScript(deps, out)
elif output_mode == 'list':
# Just print out a dep per line
for dep in deps:
PrintLine(dep, out)
elif output_mode == 'compiled':
# Make sure a .jar is specified.
if not options.compiler_jar:
logging.error('--compiler_jar flag must be specified if --output is '
'"compiled"')
sys.exit(1)
# User friendly version check.
if distutils and not (distutils.version.LooseVersion(GetJavaVersion()) >
distutils.version.LooseVersion('1.6')):
logging.error('Closure Compiler requires Java 1.6 or higher.')
logging.error('Please visit http://www.java.com/getjava')
sys.exit(1)
Compile(options.compiler_jar, deps, out, options.compiler_flags)
else:
logging.error('Invalid value for --output flag.')
sys.exit(1)
if __name__ == '__main__':
main()
| |
from __future__ import unicode_literals
import datetime
from decimal import Decimal
import re
from django.db import connection
from django.db.models import Avg, Sum, Count, Max, Min
from django.test import TestCase
from django.test.utils import Approximate
from django.test.utils import CaptureQueriesContext
from .models import Author, Publisher, Book, Store
class BaseAggregateTestCase(TestCase):
fixtures = ["aggregation.json"]
def test_empty_aggregate(self):
self.assertEqual(Author.objects.all().aggregate(), {})
def test_single_aggregate(self):
vals = Author.objects.aggregate(Avg("age"))
self.assertEqual(vals, {"age__avg": Approximate(37.4, places=1)})
def test_multiple_aggregates(self):
vals = Author.objects.aggregate(Sum("age"), Avg("age"))
self.assertEqual(vals, {"age__sum": 337, "age__avg": Approximate(37.4, places=1)})
def test_filter_aggregate(self):
vals = Author.objects.filter(age__gt=29).aggregate(Sum("age"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["age__sum"], 254)
def test_related_aggregate(self):
vals = Author.objects.aggregate(Avg("friends__age"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["friends__age__avg"], 34.07, places=2)
vals = Book.objects.filter(rating__lt=4.5).aggregate(Avg("authors__age"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["authors__age__avg"], 38.2857, places=2)
vals = Author.objects.all().filter(name__contains="a").aggregate(Avg("book__rating"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__rating__avg"], 4.0)
vals = Book.objects.aggregate(Sum("publisher__num_awards"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["publisher__num_awards__sum"], 30)
vals = Publisher.objects.aggregate(Sum("book__price"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__price__sum"], Decimal("270.27"))
def test_aggregate_multi_join(self):
vals = Store.objects.aggregate(Max("books__authors__age"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["books__authors__age__max"], 57)
vals = Author.objects.aggregate(Min("book__publisher__num_awards"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__publisher__num_awards__min"], 1)
def test_aggregate_alias(self):
vals = Store.objects.filter(name="Amazon.com").aggregate(amazon_mean=Avg("books__rating"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["amazon_mean"], 4.08, places=2)
def test_annotate_basic(self):
self.assertQuerysetEqual(
Book.objects.annotate().order_by('pk'), [
"The Definitive Guide to Django: Web Development Done Right",
"Sams Teach Yourself Django in 24 Hours",
"Practical Django Projects",
"Python Web Development with Django",
"Artificial Intelligence: A Modern Approach",
"Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp"
],
lambda b: b.name
)
books = Book.objects.annotate(mean_age=Avg("authors__age"))
b = books.get(pk=1)
self.assertEqual(
b.name,
'The Definitive Guide to Django: Web Development Done Right'
)
self.assertEqual(b.mean_age, 34.5)
def test_annotate_m2m(self):
books = Book.objects.filter(rating__lt=4.5).annotate(Avg("authors__age")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 51.5),
('Practical Django Projects', 29.0),
('Python Web Development with Django', Approximate(30.3, places=1)),
('Sams Teach Yourself Django in 24 Hours', 45.0)
],
lambda b: (b.name, b.authors__age__avg),
)
books = Book.objects.annotate(num_authors=Count("authors")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Practical Django Projects', 1),
('Python Web Development with Django', 3),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 2)
],
lambda b: (b.name, b.num_authors)
)
def test_backwards_m2m_annotate(self):
authors = Author.objects.filter(name__contains="a").annotate(Avg("book__rating")).order_by("name")
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 4.5),
('Brad Dayley', 3.0),
('Jacob Kaplan-Moss', 4.5),
('James Bennett', 4.0),
('Paul Bissex', 4.0),
('Stuart Russell', 4.0)
],
lambda a: (a.name, a.book__rating__avg)
)
authors = Author.objects.annotate(num_books=Count("book")).order_by("name")
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 1),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 1),
('Peter Norvig', 2),
('Stuart Russell', 1),
('Wesley J. Chun', 1)
],
lambda a: (a.name, a.num_books)
)
def test_reverse_fkey_annotate(self):
books = Book.objects.annotate(Sum("publisher__num_awards")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 7),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 9),
('Practical Django Projects', 3),
('Python Web Development with Django', 7),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 3)
],
lambda b: (b.name, b.publisher__num_awards__sum)
)
publishers = Publisher.objects.annotate(Sum("book__price")).order_by("name")
self.assertQuerysetEqual(
publishers, [
('Apress', Decimal("59.69")),
("Jonno's House of Books", None),
('Morgan Kaufmann', Decimal("75.00")),
('Prentice Hall', Decimal("112.49")),
('Sams', Decimal("23.09"))
],
lambda p: (p.name, p.book__price__sum)
)
def test_annotate_values(self):
books = list(Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values())
self.assertEqual(
books, [
{
"contact_id": 1,
"id": 1,
"isbn": "159059725",
"mean_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": 1,
"rating": 4.5,
}
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg('authors__age')).values('pk', 'isbn', 'mean_age')
self.assertEqual(
list(books), [
{
"pk": 1,
"isbn": "159059725",
"mean_age": 34.5,
}
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values("name")
self.assertEqual(
list(books), [
{
"name": "The Definitive Guide to Django: Web Development Done Right"
}
]
)
books = Book.objects.filter(pk=1).values().annotate(mean_age=Avg('authors__age'))
self.assertEqual(
list(books), [
{
"contact_id": 1,
"id": 1,
"isbn": "159059725",
"mean_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": 1,
"rating": 4.5,
}
]
)
books = Book.objects.values("rating").annotate(n_authors=Count("authors__id"), mean_age=Avg("authors__age")).order_by("rating")
self.assertEqual(
list(books), [
{
"rating": 3.0,
"n_authors": 1,
"mean_age": 45.0,
},
{
"rating": 4.0,
"n_authors": 6,
"mean_age": Approximate(37.16, places=1)
},
{
"rating": 4.5,
"n_authors": 2,
"mean_age": 34.5,
},
{
"rating": 5.0,
"n_authors": 1,
"mean_age": 57.0,
}
]
)
authors = Author.objects.annotate(Avg("friends__age")).order_by("name")
self.assertEqual(len(authors), 9)
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 32.0),
('Brad Dayley', None),
('Jacob Kaplan-Moss', 29.5),
('James Bennett', 34.0),
('Jeffrey Forcier', 27.0),
('Paul Bissex', 31.0),
('Peter Norvig', 46.0),
('Stuart Russell', 57.0),
('Wesley J. Chun', Approximate(33.66, places=1))
],
lambda a: (a.name, a.friends__age__avg)
)
def test_count(self):
vals = Book.objects.aggregate(Count("rating"))
self.assertEqual(vals, {"rating__count": 6})
vals = Book.objects.aggregate(Count("rating", distinct=True))
self.assertEqual(vals, {"rating__count": 4})
def test_fkey_aggregate(self):
explicit = list(Author.objects.annotate(Count('book__id')))
implicit = list(Author.objects.annotate(Count('book')))
self.assertEqual(explicit, implicit)
def test_annotate_ordering(self):
books = Book.objects.values('rating').annotate(oldest=Max('authors__age')).order_by('oldest', 'rating')
self.assertEqual(
list(books), [
{
"rating": 4.5,
"oldest": 35,
},
{
"rating": 3.0,
"oldest": 45
},
{
"rating": 4.0,
"oldest": 57,
},
{
"rating": 5.0,
"oldest": 57,
}
]
)
books = Book.objects.values("rating").annotate(oldest=Max("authors__age")).order_by("-oldest", "-rating")
self.assertEqual(
list(books), [
{
"rating": 5.0,
"oldest": 57,
},
{
"rating": 4.0,
"oldest": 57,
},
{
"rating": 3.0,
"oldest": 45,
},
{
"rating": 4.5,
"oldest": 35,
}
]
)
def test_aggregate_annotation(self):
vals = Book.objects.annotate(num_authors=Count("authors__id")).aggregate(Avg("num_authors"))
self.assertEqual(vals, {"num_authors__avg": Approximate(1.66, places=1)})
def test_filtering(self):
p = Publisher.objects.create(name='Expensive Publisher', num_awards=0)
Book.objects.create(
name='ExpensiveBook1',
pages=1,
isbn='111',
rating=3.5,
price=Decimal("1000"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008, 12, 1)
)
Book.objects.create(
name='ExpensiveBook2',
pages=1,
isbn='222',
rating=4.0,
price=Decimal("1000"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008, 12, 2)
)
Book.objects.create(
name='ExpensiveBook3',
pages=1,
isbn='333',
rating=4.5,
price=Decimal("35"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008, 12, 3)
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Apress",
"Sams",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1, book__price__lt=Decimal("40.0")).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 3]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 2]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__in=[1, 3]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Sams",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__isnull=True)
self.assertEqual(len(publishers), 0)
def test_annotation(self):
vals = Author.objects.filter(pk=1).aggregate(Count("friends__id"))
self.assertEqual(vals, {"friends__id__count": 2})
books = Book.objects.annotate(num_authors=Count("authors__name")).filter(num_authors__exact=2).order_by("pk")
self.assertQuerysetEqual(
books, [
"The Definitive Guide to Django: Web Development Done Right",
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name
)
authors = Author.objects.annotate(num_friends=Count("friends__id", distinct=True)).filter(num_friends=0).order_by("pk")
self.assertQuerysetEqual(
authors, [
"Brad Dayley",
],
lambda a: a.name
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
],
lambda p: p.name
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).annotate(num_books=Count("book__id")).filter(num_books__gt=1)
self.assertQuerysetEqual(
publishers, [
"Apress",
],
lambda p: p.name
)
books = Book.objects.annotate(num_authors=Count("authors__id")).filter(authors__name__contains="Norvig", num_authors__gt=1)
self.assertQuerysetEqual(
books, [
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name
)
def test_more_aggregation(self):
a = Author.objects.get(name__contains='Norvig')
b = Book.objects.get(name__contains='Done Right')
b.authors.add(a)
b.save()
vals = Book.objects.annotate(num_authors=Count("authors__id")).filter(authors__name__contains="Norvig", num_authors__gt=1).aggregate(Avg("rating"))
self.assertEqual(vals, {"rating__avg": 4.25})
def test_even_more_aggregate(self):
publishers = Publisher.objects.annotate(earliest_book=Min("book__pubdate")).exclude(earliest_book=None).order_by("earliest_book").values()
self.assertEqual(
list(publishers), [
{
'earliest_book': datetime.date(1991, 10, 15),
'num_awards': 9,
'id': 4,
'name': 'Morgan Kaufmann'
},
{
'earliest_book': datetime.date(1995, 1, 15),
'num_awards': 7,
'id': 3,
'name': 'Prentice Hall'
},
{
'earliest_book': datetime.date(2007, 12, 6),
'num_awards': 3,
'id': 1,
'name': 'Apress'
},
{
'earliest_book': datetime.date(2008, 3, 3),
'num_awards': 1,
'id': 2,
'name': 'Sams'
}
]
)
vals = Store.objects.aggregate(Max("friday_night_closing"), Min("original_opening"))
self.assertEqual(
vals,
{
"friday_night_closing__max": datetime.time(23, 59, 59),
"original_opening__min": datetime.datetime(1945, 4, 25, 16, 24, 14),
}
)
def test_annotate_values_list(self):
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values_list("pk", "isbn", "mean_age")
self.assertEqual(
list(books), [
(1, "159059725", 34.5),
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values_list("isbn")
self.assertEqual(
list(books), [
('159059725',)
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values_list("mean_age")
self.assertEqual(
list(books), [
(34.5,)
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values_list("mean_age", flat=True)
self.assertEqual(list(books), [34.5])
books = Book.objects.values_list("price").annotate(count=Count("price")).order_by("-count", "price")
self.assertEqual(
list(books), [
(Decimal("29.69"), 2),
(Decimal('23.09'), 1),
(Decimal('30'), 1),
(Decimal('75'), 1),
(Decimal('82.8'), 1),
]
)
def test_dates_with_aggregation(self):
"""
Test that .dates() returns a distinct set of dates when applied to a
QuerySet with aggregation.
Refs #18056. Previously, .dates() would return distinct (date_kind,
aggregation) sets, in this case (year, num_authors), so 2008 would be
returned twice because there are books from 2008 with a different
number of authors.
"""
dates = Book.objects.annotate(num_authors=Count("authors")).dates('pubdate', 'year')
self.assertQuerysetEqual(
dates, [
"datetime.date(1991, 1, 1)",
"datetime.date(1995, 1, 1)",
"datetime.date(2007, 1, 1)",
"datetime.date(2008, 1, 1)"
]
)
def test_values_aggregation(self):
# Refs #20782
max_rating = Book.objects.values('rating').aggregate(max_rating=Max('rating'))
self.assertEqual(max_rating['max_rating'], 5)
max_books_per_rating = Book.objects.values('rating').annotate(
books_per_rating=Count('id')
).aggregate(Max('books_per_rating'))
self.assertEqual(
max_books_per_rating,
{'books_per_rating__max': 3})
def test_ticket17424(self):
"""
Check that doing exclude() on a foreign model after annotate()
doesn't crash.
"""
all_books = list(Book.objects.values_list('pk', flat=True).order_by('pk'))
annotated_books = Book.objects.order_by('pk').annotate(one=Count("id"))
# The value doesn't matter, we just need any negative
# constraint on a related model that's a noop.
excluded_books = annotated_books.exclude(publisher__name="__UNLIKELY_VALUE__")
# Try to generate query tree
str(excluded_books.query)
self.assertQuerysetEqual(excluded_books, all_books, lambda x: x.pk)
# Check internal state
self.assertIsNone(annotated_books.query.alias_map["aggregation_book"].join_type)
self.assertIsNone(excluded_books.query.alias_map["aggregation_book"].join_type)
def test_ticket12886(self):
"""
Check that aggregation over sliced queryset works correctly.
"""
qs = Book.objects.all().order_by('-rating')[0:3]
vals = qs.aggregate(average_top3_rating=Avg('rating'))['average_top3_rating']
self.assertAlmostEqual(vals, 4.5, places=2)
def test_ticket11881(self):
"""
Check that subqueries do not needlessly contain ORDER BY, SELECT FOR UPDATE
or select_related() stuff.
"""
qs = Book.objects.all().select_for_update().order_by(
'pk').select_related('publisher').annotate(max_pk=Max('pk'))
with CaptureQueriesContext(connection) as captured_queries:
qs.aggregate(avg_pk=Avg('max_pk'))
self.assertEqual(len(captured_queries), 1)
qstr = captured_queries[0]['sql'].lower()
self.assertNotIn('for update', qstr)
forced_ordering = connection.ops.force_no_ordering()
if forced_ordering:
# If the backend needs to force an ordering we make sure it's
# the only "ORDER BY" clause present in the query.
self.assertEqual(
re.findall(r'order by (\w+)', qstr),
[', '.join(forced_ordering).lower()]
)
else:
self.assertNotIn('order by', qstr)
self.assertEqual(qstr.count(' join '), 0)
| |
#!/usr/bin/env python2
import argparse
import subprocess
import shlex
import re
import os
import sys
import json
import hashlib
from collections import defaultdict, OrderedDict
import time
from StringIO import StringIO
import webbrowser
from distutils.version import LooseVersion
import logging
import paramiko
logging.basicConfig()
log = logging.getLogger('cstar_docker')
log.setLevel(logging.DEBUG)
from fabric import api as fab
from fabric.contrib.files import append as fab_append
from cstar_perf.tool import fab_deploy
from fabric.tasks import execute as fab_execute
import tasks
CONTAINER_DEFAULT_MEMORY = '2G'
fab.env.user = 'cstar'
docker_image_name = 'datastax/cstar_docker'
# Dockerfile for cstar_perf, there are string format parameters in here:
# ssh_pub_key - the text of the ssh public key
#
dockerfile = """
FROM ubuntu:latest
MAINTAINER Ryan McGuire <ryan@datastax.com>
RUN \
apt-get update && \
apt-get -y upgrade && \
apt-get install -y \
build-essential \
software-properties-common \
git \
unzip \
python \
python-dev \
python-pip \
openssh-server \
libssl-dev \
ant \
libjna-java \
psmisc \
python-software-properties \
libjpeg-dev \
lxc
RUN echo oracle-java8-installer shared/accepted-oracle-license-v1-1 select true | sudo /usr/bin/debconf-set-selections && \
add-apt-repository ppa:webupd8team/java && \
apt-get update && \
apt-get install -y \
oracle-java8-installer \
oracle-java7-installer \
oracle-java8-set-default
# Download and compile cassandra, we don't use this verison, but what
# this does is provide a git cache and primes the ~/.m2 directory to
# speed things up:
RUN groupadd -g 999 docker
RUN useradd -ms /bin/bash -G docker cstar
USER cstar
RUN git clone http://github.com/apache/cassandra.git ~/.docker_cassandra.git
RUN cd ~/.docker_cassandra.git && \
JAVA_TOOL_OPTIONS=-Dfile.encoding=UTF8 ant clean jar
USER root
#### Setup SSH
RUN mkdir /var/run/sshd && \
echo 'root:root' | chpasswd && \
sed -i 's/PermitRootLogin without-password/PermitRootLogin yes/' /etc/ssh/sshd_config && \
`### SSH login fix. Otherwise user is kicked off after login` && \
sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd
ENV NOTVISIBLE "in users profile"
RUN echo "export VISIBLE=now" >> /etc/profile
RUN mkdir -p /home/cstar/.ssh && \
chmod 700 /home/cstar/.ssh && \
echo '{ssh_pub_key}' > /home/cstar/.ssh/authorized_keys && \
ssh-keygen -P '' -f /home/cstar/.ssh/id_rsa && \
cat /home/cstar/.ssh/id_rsa.pub >> /home/cstar/.ssh/authorized_keys && \
chmod 600 /home/cstar/.ssh/authorized_keys && \
echo 'Host *' > /home/cstar/.ssh/config && \
echo ' StrictHostKeyChecking no' >> /home/cstar/.ssh/config &&\
echo ' UserKnownHostsFile=/dev/null' >> /home/cstar/.ssh/config && \
chown -R cstar:cstar /home/cstar/.ssh
RUN mkdir -p /root/.ssh && \
chmod 700 /root/.ssh && \
cp /home/cstar/.ssh/authorized_keys /root/.ssh/authorized_keys && \
cp /home/cstar/.ssh/id_rsa /root/.ssh/id_rsa && \
cp /home/cstar/.ssh/config /root/.ssh/config
RUN mkdir -p /home/cstar/git/cstar_perf && \
chown -R cstar:cstar /home/cstar/git && \
mkdir -p /data/cstar_perf && \
chown -R cstar:cstar /data
VOLUME ["/home/cstar/git/cstar_perf"]
RUN echo "%wheel ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers && \
groupadd wheel && \
gpasswd -a cstar wheel
### Expose SSH and Cassandra ports
EXPOSE 22 7000 7001 7199 9042 9160 61620 61621
RUN pip install supervisor
RUN echo "[unix_http_server]" > /supervisord.conf && \
echo "file=/tmp/supervisor.sock" >> /supervisord.conf && \
echo "" >> /supervisord.conf && \
echo "[supervisord]" >> /supervisord.conf && \
echo "logfile=/tmp/supervisord.log " >> /supervisord.conf && \
echo "logfile_maxbytes=50MB " >> /supervisord.conf && \
echo "logfile_backups=10 " >> /supervisord.conf && \
echo "loglevel=info " >> /supervisord.conf && \
echo "pidfile=/tmp/supervisord.pid " >> /supervisord.conf && \
echo "nodaemon=false " >> /supervisord.conf && \
echo "minfds=1024 " >> /supervisord.conf && \
echo "minprocs=200 " >> /supervisord.conf && \
echo "" >> /supervisord.conf && \
echo "[rpcinterface:supervisor]" >> /supervisord.conf && \
echo "supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface" >> /supervisord.conf && \
echo "" >> /supervisord.conf && \
echo "" >> /supervisord.conf && \
echo "[supervisorctl]" >> /supervisord.conf && \
echo "serverurl=unix:///tmp/supervisor.sock " >> /supervisord.conf && \
echo "" >> /supervisord.conf && \
echo "[program:sshd]" >> /supervisord.conf && \
echo "command=/usr/sbin/sshd -D" >> /supervisord.conf && \
echo "user=root" >> /supervisord.conf && \
echo "autostart=true" >> /supervisord.conf && \
echo "autorestart=true" >> /supervisord.conf && \
echo "redirect_stderr=true" >> /supervisord.conf
CMD ["supervisord", "-n", "-c", "/supervisord.conf"]
"""
def check_docker_version(expected_version='1.6.0'):
version_cmd = shlex.split("docker --version")
try:
p = subprocess.Popen(version_cmd, stdout=subprocess.PIPE)
version_string = p.communicate()[0]
except OSError:
raise AssertionError('Failed to run docker, it may not be installed?')
m = re.match('Docker version ([^,]+), .*', version_string)
if m:
version = m.groups()[0]
if LooseVersion(version) < expected_version:
raise AssertionError(
'Found docker version {}. This tool requires version {}+'.format(
version, expected_version))
def get_dockerfile():
ssh_pub_file=get_ssh_key_pair()[1]
with open(ssh_pub_file) as f:
return dockerfile.format(ssh_pub_key=f.read().strip())
def build_docker_image(tag=docker_image_name, force=False):
if force:
rmi_cmd = shlex.split("docker rmi -f {} -".format(tag))
log.info('Removing docker image...')
p=subprocess.call(rmi_cmd)
build_cmd = shlex.split("docker build -t {} {} -".format(tag, '--no-cache' if force else ''))
p=subprocess.Popen(build_cmd, stdin=subprocess.PIPE)
p.communicate(get_dockerfile())
if p.returncode == 0:
# Save the hash of the dockerfile so we can know if we need to
# rebuild the image:
dockerfile_hash = os.path.join(os.path.expanduser("~"), ".cstar_perf","cstar_docker_image_hash")
docker_image_hash = hashlib.sha256(get_dockerfile()).hexdigest()
with open(dockerfile_hash, 'w') as f:
f.write(docker_image_hash)
def check_if_build_necessary(exit_if_not_ready=True):
"""Checks the previous hash of the dockerfile against the latest
version to determine if a rebuild is nescessary"""
current_dockerfile_hash = hashlib.sha256(get_dockerfile()).hexdigest()
try:
with open(os.path.join(os.path.expanduser("~"), ".cstar_perf","cstar_docker_image_hash")) as f:
previous_dockerfile_hash = f.read().strip()
needs_rebuild = not current_dockerfile_hash == previous_dockerfile_hash
except IOError:
needs_rebuild = True
if needs_rebuild and exit_if_not_ready:
print("The Dockerfile has changed since you last built the image. You must rebuild your image:")
print (" cstar_docker build")
exit(1)
def get_container_data(container):
inspect_cmd = shlex.split("docker inspect {}".format(container))
p = subprocess.Popen(inspect_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
return json.loads(p.communicate()[0])[0]
except IndexError:
raise AssertionError('No docker container or image with id: {}'.format(container))
def get_ssh_key_pair():
"""Create a cstar_docker specific SSH key, or return the previously generated one"""
key_path = os.path.join(os.path.expanduser("~"), ".cstar_perf","cstar_docker_key")
pub_key_path = key_path + '.pub'
if not (os.path.exists(key_path) and os.path.exists(pub_key_path)):
try:
os.mkdir(os.path.join(os.path.expanduser("~"), ".cstar_perf"))
except IOError:
pass
key = paramiko.rsakey.RSAKey.generate(2048)
with open(key_path, 'w') as f:
key.write_private_key(f)
with open(pub_key_path, 'w') as f:
f.write("ssh-rsa ")
f.write(key.get_base64())
f.write(" cstar_docker generated {}".format(time.ctime()))
f.write("\n")
os.chmod(key_path, 0600)
os.chmod(pub_key_path, 0600)
return (key_path, pub_key_path)
def get_clusters(cluster_regex='all', all_metadata=False):
"""Get all clusters matching the cluster name regex.
Returns a list of names, unless all_metadata=True, then a map of
all container inspection data is returned.
"""
cluster_regex = cluster_regex + ("" if cluster_regex.endswith("$") else "$")
clusters = defaultdict(list) # {cluster_name : [first_node_metadata, 2nd...], ...}
cluster_nodes = defaultdict(list)
p = subprocess.Popen(shlex.split("docker ps -aq"), stdout=subprocess.PIPE)
containers = p.communicate()[0].strip()
class NoContainersException(Exception):
pass
try:
if containers == '':
raise NoContainersException
containers = containers.split('\n')
for container in containers:
data = get_container_data(container)
try:
labels = data['Config']['Labels']
if labels['cstar_node'] == 'true':
container_name = data['Name'] = data['Name'].lstrip('/')
node_num = labels['node'] = int(labels['node'])
if cluster_regex.lower() == 'all$' or re.match(cluster_regex, labels['cluster_name']):
clusters[labels['cluster_name']].append(data)
cluster_nodes[labels['cluster_name']].append(container_name)
except KeyError:
pass
except NoContainersException:
pass
# Sort cluster lists by node number:
for cluster_name, cluster_data in clusters.items():
cluster_data.sort(key=lambda x:x['Config']['Labels']['node'])
# spot check for inconsistencies:
cluster_types = set([x['Config']['Labels']['cluster_type'] for x in cluster_data])
assert len(cluster_types) == 1, "{} has more than one cluster_type: {}".format(cluster_name, cluster_types)
for cluster_name, nodes in cluster_nodes.items():
nodes.sort()
if all_metadata:
return clusters
else:
return cluster_nodes
def get_ips(cluster_name):
clusters = get_clusters(cluster_name, all_metadata=True)
cluster = clusters[cluster_name]
return tuple((c['Name'], c['NetworkSettings']['IPAddress']) for c in cluster)
def check_cluster_exists(cluster_regex):
existing_nodes = get_clusters(cluster_regex)
return bool(len(existing_nodes))
def launch(num_nodes, cluster_name='cnode', destroy_existing=False,
install_tool=True, frontend=False, mount_host_src=False, verbose=False,
client_double_duty=False):
"""Launch cluster nodes, return metadata (ip addresses etc) for the nodes"""
assert num_nodes > 0, "Cannot start a cluster with {} nodes".format(num_nodes)
if frontend:
assert num_nodes == 1 and client_double_duty, "Can only start a frontend with a single node"
cluster_type = 'frontend' if frontend else 'cluster'
try:
get_container_data(docker_image_name)
except AssertionError:
print("The docker image {} was not found, build the docker image first "
"with: 'cstar_docker build'".format(docker_image_name))
exit(1)
check_if_build_necessary()
existing_nodes = get_clusters(cluster_name)
if len(existing_nodes):
if destroy_existing:
destroy(cluster_name)
else:
log.error('Cannot launch cluster \'{}\' as it already exists.'.format(cluster_name))
log.error('You must destroy the existing cluster, or use --destroy-existing '
'in your launch command')
exit(1)
first_cassandra_node = 1
if client_double_duty:
first_cassandra_node = 0
log.info('Launching a {} node cluster...'.format(num_nodes))
else:
# We need one more node than requested to run the client
num_nodes += 1
log.info('Launching a {} node cluster with a separate client node ...'.format(num_nodes))
node_data = OrderedDict()
for i in range(num_nodes):
node_name = "%s_%02d" % (cluster_name,i)
ssh_path = os.path.split(get_ssh_key_pair()[0])[0]
run_cmd = ('docker run --ulimit memlock=100000000:100000000 --privileged --label cstar_node=true --label '
'cluster_name={cluster_name} --label cluster_type={cluster_type} --label node={node_num} '
' -v /var/run/docker.sock:/var/run/docker.sock -v /usr/bin/docker:/bin/docker '
'-d -m {CONTAINER_DEFAULT_MEMORY} --name={node_name} {port_settings} -h {node_name}'.format(
cluster_name=cluster_name, node_num=i, node_name=node_name, cluster_type=cluster_type,
CONTAINER_DEFAULT_MEMORY=CONTAINER_DEFAULT_MEMORY, ssh_path=ssh_path,
port_settings="-p 127.0.0.1:8000:8000" if frontend else ""))
if mount_host_src:
# Try to find the user's git clone of cstar_perf:
candidates = [
# Get the directory relative to this file - only works
# if user installed in-place (pip install -e)
os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir, os.pardir)),
# In the current directory:
os.getcwd()
]
for d in candidates:
if os.path.exists(os.path.join(d, '.git')) and \
os.path.exists(os.path.join(d, 'tool')) and \
os.path.exists(os.path.join(d, 'frontend')):
cstar_dir = d
break
else:
log.error("Could not mount your git checkout of cstar_perf because none could be found. Try installing cstar_perf in developer mode: 'pip install -e ./tool' or try running cstar_docker from the same directory as your checkout")
exit(1)
run_cmd = run_cmd + " -v {cstar_dir}:/home/cstar/git/cstar_perf".format(cstar_dir=cstar_dir)
run_cmd = run_cmd + ' ' + docker_image_name
log.debug(run_cmd)
p=subprocess.Popen(shlex.split(run_cmd),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
container_id = p.communicate()[0].strip()
node_data[node_name] = get_container_data(container_id)
hosts = OrderedDict()
for name, data in node_data.items():
hosts[name] = data['NetworkSettings']['IPAddress']
# Write /etc/hosts
with fab.settings(hosts=[n for n in hosts.values()]):
fab_execute(fab_deploy.setup_hosts_file, hosts)
if frontend:
log.info("Installing cstar_perf.frontend ... ")
__install_cstar_perf_frontend(cluster_name, hosts, mount_host_src=mount_host_src)
elif install_tool:
log.info("Installing cstar_perf.tool ... ")
__install_cstar_perf_tool(cluster_name, hosts, mount_host_src=mount_host_src,
first_cassandra_node=first_cassandra_node)
if verbose:
print("Started {} nodes:".format(num_nodes))
print("")
info(cluster_name)
return node_data
def __install_cstar_perf_frontend(cluster_name, hosts, mount_host_src=False):
assert len(hosts) == 1, "Cannot install frontend onto more than one node"
host, ip = hosts.popitem()
with fab.settings(hosts=ip):
# Setup cstar_perf.tool, not normally needed on the frontend, but we'll use it to
# easily bootstrap the frontend's C* backend:
fab_execute(fab_deploy.setup_fab_dir)
__install_cstar_perf_tool(cluster_name, {host:ip}, mount_host_src=mount_host_src, first_cassandra_node=0)
# Setup C* and add it to the supervisor to start on boot:
def setup_cassandra():
__update_node_ip_addresses(cluster_name, static_ips={host:'127.0.0.1'})
fab.run("cstar_perf_bootstrap -v cassandra-2.1.8")
with fab.settings(hosts=ip):
fab_execute(setup_cassandra)
def setup_boot_items():
boot_items = "\n".join([
'',
'[program:cassandra]',
'command=/home/cstar/fab/cassandra/bin/cassandra -f',
'priority=1',
'user=cstar',
'autostart=true',
'autorestart=false',
'redirect_stderr=true',
'',
'[program:cstar_perf_notifications]',
'command=cstar_perf_notifications -F',
'priority=1',
'user=cstar',
'autostart=true',
'autorestart=true',
'startretries=30',
'redirect_stderr=true',
'',
'[program:cstar_perf_server]',
'command=cstar_perf_server',
'priority=2',
'user=cstar',
'environment=HOME=/home/cstar',
'autostart=true',
'startretries=30',
'autorestart=true',
'redirect_stderr=true',
''
])
fab_append("/supervisord.conf", boot_items)
with fab.settings(hosts=ip, user="root"):
fab_execute(setup_boot_items)
# Install the frontend as well as Cassandra to hold the frontend DB
fab_execute(fab_deploy.install_cstar_perf_frontend)
# Generate and save the credentials
with fab.settings(hosts=ip):
fab_execute(fab_deploy.generate_frontend_credentials)
# Restart the container so all the auto boot stuff is applied:
subprocess.call(shlex.split("docker restart {}".format(host)))
# Post Restart setup
frontend_name, frontend_ip = get_ips(cluster_name)[0]
with fab.settings(hosts=frontend_ip):
fab_execute(fab_deploy.create_default_frontend_users)
log.info("cstar_perf service started, opening in your browser: http://localhost:8000")
webbrowser.open("http://localhost:8000")
log.info("Log in with email: admin@example.com and password: admin")
log.info("You will need to use the 'cstar_docker associate' command to link up a cluster")
def __install_cstar_perf_tool(cluster_name, hosts, mount_host_src=False, first_cassandra_node=None):
first_node = hosts.values()[0]
other_nodes = hosts.values()[1:]
if first_cassandra_node is None:
# If a first cluster node was not explicitly set, assume we
# mean the second node of the cluster, unless it's a single
# node cluster, then it's node 0.
if len(hosts) > 1:
first_cassandra_node = 1
else:
first_cassandra_node = 0
# Create the cluster config file
cluster_config = {
"block_devices": [],
"blockdev_readahead": None,
"hosts": {
host : {
"hostname": host,
"internal_ip": ip,
"external_ip": ip,
"seed": True,
"datacenter": 'dc1'
} for host, ip in hosts.items()[first_cassandra_node:]
},
"name": cluster_name,
"stress_node": first_node,
"user":"cstar",
"data_file_directories": ['/data/cstar_perf/data'],
"commitlog_directory": '/data/cstar_perf/commitlog',
"saved_caches_directory": '/data/cstar_perf/saved_caches',
"docker": True
}
with fab.settings(hosts=first_node):
fab_execute(fab_deploy.copy_cluster_config, cluster_config)
# Setup ~/fab directory (java, ant, stress, etc) on the first node
with fab.settings(hosts=first_node):
fab_execute(fab_deploy.setup_fab_dir)
# Install cstar_perf
fab_execute(fab_deploy.install_cstar_perf_tool)
# Install cstar_perf.frontend
fab_execute(fab_deploy.install_cstar_perf_frontend)
# rsync ~/fab to the other nodes:
if len(other_nodes) > 0:
with fab.settings(hosts=other_nodes):
fab_execute(fab_deploy.copy_fab_dir, first_node)
def info(cluster_name):
clusters = get_clusters(cluster_name, all_metadata=True)
containers = clusters[cluster_name]
node_names = [n['Name'] for n in containers]
if len(containers) == 0:
print("No cluster named {} found".format(cluster_name))
else:
print("Cluster: {}, {} nodes".format(cluster_name, len(node_names)))
for n, node_name in enumerate(node_names):
data = containers[n]
if data['State']['Running']:
print(" {} : {}".format(node_name, data['NetworkSettings']['IPAddress']))
else:
print(" {} : offline".format(node_name))
def destroy(cluster_regex):
"""Destroy clusters"""
clusters = get_clusters(cluster_regex)
for cluster, containers in clusters.items():
if len(containers) > 0:
log.info('Destroying {} containers...'.format(cluster_regex))
for container in containers:
destroy_cmd = shlex.split("docker rm -f {}".format(container))
subprocess.call(destroy_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def associate(frontend_name, cluster_names, with_dse=False):
try:
frontend = get_clusters(frontend_name, all_metadata=True)[frontend_name][0]
except IndexError:
raise ValueError("No frontend cluster named {} found".format(frontend_name))
clusters = []
for c in cluster_names:
try:
cluster = get_clusters(c, all_metadata=True)[c][0]
except IndexError:
raise ValueError("No cluster named {} found".format(c))
clusters.append(cluster)
frontend_ip = frontend['NetworkSettings']['IPAddress']
# Configure the client credentials on all clusters
with fab.settings(hosts=frontend_ip):
frontend_credentials = fab_execute(fab_deploy.get_frontend_credentials).values()[0]
for cluster in clusters:
cluster = cluster
cluster_name = cluster['Config']['Labels']['cluster_name']
nodes = get_clusters(c)[cluster_name][1:]
cluster_ip = cluster['NetworkSettings']['IPAddress']
with fab.settings(hosts=cluster_ip):
fab_execute(fab_deploy.generate_client_credentials, cluster_name,
frontend_credentials['public_key'],
frontend_credentials['verify_code'])
# Get the cluster credentials and jvms list
cluster_credentials = fab_execute(fab_deploy.get_client_credentials).values()[0]
jvms = fab_execute(fab_deploy.get_client_jvms).values()[0]
# Link the cluster to the frontend
with fab.settings(hosts=frontend_ip):
fab_execute(fab_deploy.add_cluster_to_frontend, cluster_name, nodes,
cluster_credentials['public_key'])
for jvm in jvms:
fab_execute(fab_deploy.add_jvm_to_cluster, cluster_name, jvm)
if with_dse:
fab_execute(fab_deploy.add_product_to_cluster, cluster_name, 'dse')
with fab.settings(hosts=cluster_ip, user="root"):
fab_execute(tasks.setup_client_daemon, frontend['Name'])
fab_execute(tasks.add_or_update_host_ips, ((frontend['Name'], frontend_ip),))
def enable_dse(cluster_name, dse_url, dse_username, dse_password, dse_source_build_artifactory_url,
dse_source_build_artifactory_username, dse_source_build_artifactory_password,
dse_source_build_oauth_token):
try:
cluster = get_clusters(cluster_name, all_metadata=True)[cluster_name][0]
except IndexError:
raise ValueError("No cluster named {} found".format(cluster_name))
cluster_ip = cluster['NetworkSettings']['IPAddress']
with fab.settings(hosts=cluster_ip):
fab_execute(fab_deploy.enable_dse, dse_url, dse_username, dse_password, dse_source_build_artifactory_url,
dse_source_build_artifactory_username, dse_source_build_artifactory_password,
dse_source_build_oauth_token)
with fab.settings(hosts=cluster_ip, user="root"):
fab_execute(tasks.restart_all_services)
def __update_node_ip_addresses(cluster_name, static_ips=None):
"""Update node ip addresses
This is necessary because docker assigns new IP addresses each time a container is restarted
if static_ips is provided, interpret as a dictionary mapping hosts to ips.
"""
# Retrieve the current ~/.cstar_perf/cluster_config.json on node 00:
clusters = get_clusters(cluster_name, all_metadata=True)
cluster = clusters[cluster_name]
current_ips = dict([(c['Name'], c['NetworkSettings']['IPAddress']) for c in cluster])
if static_ips:
updated_ips = static_ips
else:
updated_ips = current_ips
node0 = cluster[0]['Name']
with fab.settings(hosts=current_ips[node0]):
def get_cluster_config():
cfg = StringIO()
fab.get("~/.cstar_perf/cluster_config.json", cfg)
cfg.seek(0)
return json.load(cfg)
cluster_config = fab_execute(get_cluster_config).values()[0]
# Update cluster_config with the current node IP addresses:
for host, cfg in cluster_config['hosts'].items():
cluster_config['hosts'][host]['internal_ip'] = cluster_config['hosts'][host]['external_ip'] = updated_ips[host]
cluster_config['stress_node'] = updated_ips[node0]
# Replace the config file onto node 0:
with fab.settings(hosts=cluster[0]['NetworkSettings']['IPAddress']):
def put_cluster_config():
cfg = StringIO()
json.dump(cluster_config, cfg, indent=2)
fab.put(cfg, "~/.cstar_perf/cluster_config.json")
fab_execute(put_cluster_config)
# Update all /etc/hosts file with latest ips
hosts = []
clusters = get_clusters('all', all_metadata=True)
for cluster_name in clusters.keys():
hosts.extend(get_ips(cluster_name))
with fab.settings(hosts=[ip for host, ip in hosts], user="root"):
fab_execute(tasks.add_or_update_host_ips, hosts)
fab_execute(tasks.restart_all_services)
def start(cluster_name):
"""start cluster"""
clusters = get_clusters(cluster_name)
cluster = clusters[cluster_name]
for container in cluster:
start_cmd = shlex.split("docker start {}".format(container))
subprocess.call(start_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
__update_node_ip_addresses(cluster_name)
def stop(cluster_name):
"""stop cluster"""
clusters = get_clusters(cluster_name)
cluster = clusters[cluster_name]
for container in cluster:
stop_cmd = shlex.split("docker stop {}".format(container))
subprocess.call(stop_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def list_clusters():
"""List clusters"""
clusters = get_clusters('all', all_metadata=True)
for cluster, containers in clusters.items():
print("{name}, {num_nodes} node{plural} ({cluster_type})".format(
name=cluster, num_nodes=len(containers),
cluster_type=containers[0]['Config']['Labels']['cluster_type'],
plural="s" if len(containers) > 1 else ""))
def ssh(cluster_name, node, user='cstar', ssh_key_path=os.path.join(os.path.expanduser("~"),'.ssh','id_rsa')):
clusters = get_clusters(cluster_name, all_metadata=True)
containers = clusters[cluster_name]
node_names = [c['Name'] for c in containers]
if len(containers) == 0:
print("No cluster named {} found".format(cluster_name))
elif containers[node]['State']['Running'] is False:
log.error("Node is not running. Try starting the cluster: cstar_docker start {}".format(cluster_name))
else:
command = 'ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=%s -o User=%s -i %s %s' \
% (os.devnull,
user,
get_ssh_key_pair()[0],
containers[node]['NetworkSettings']['IPAddress'])
proc = subprocess.Popen(command, shell=True)
proc.wait()
def execute_cmd(cmd, args):
if cmd == 'launch':
if not check_cluster_exists(args.name) or args.destroy_existing:
try:
launch(args.num_nodes, cluster_name=args.name,
destroy_existing=args.destroy_existing,
install_tool=not args.no_install,
mount_host_src=args.mount, verbose=True,
client_double_duty=args.client_double_duty)
except:
destroy(args.name)
raise
else:
log.error('Cannot launch cluster \'{}\' as it already exists.'.format(args.name))
log.error('You must destroy the existing cluster, or use --destroy-existing '
'in your launch command')
exit(1)
elif cmd == 'frontend':
if not check_cluster_exists(args.name) or args.destroy_existing:
try:
launch(1, cluster_name=args.name,
destroy_existing=args.destroy_existing,
frontend=True, client_double_duty=True,
mount_host_src=args.mount, verbose=True)
except:
destroy(args.name)
raise
else:
log.error('Cannot launch cluster \'{}\' as it already exists.'.format(args.name))
log.error('You must destroy the existing cluster, or use --destroy-existing '
'in your launch command')
exit(1)
elif cmd == 'associate':
associate(args.frontend, args.clusters, with_dse=args.with_dse)
elif cmd == 'start':
start(cluster_name=args.name)
elif cmd == 'stop':
stop(cluster_name=args.name)
elif cmd == 'restart':
stop(cluster_name=args.name)
start(cluster_name=args.name)
elif cmd == 'destroy':
destroy(args.cluster_regex)
elif cmd == 'list':
list_clusters()
elif cmd == 'info':
info(args.cluster_name)
elif cmd == 'ssh':
ssh(args.cluster_name, args.node, user=args.login_name)
elif cmd == 'build':
build_docker_image(force=args.force)
elif cmd == 'enable_dse':
enable_dse(args.frontend, args.dse_repo_url, args.dse_repo_username, args.dse_repo_password,
args.dse_source_build_artifactory_url, args.dse_source_build_artifactory_username,
args.dse_source_build_artifactory_password, args.dse_source_build_oauth_token)
else:
raise AssertionError('Unknown command: {cmd}'.format(cmd=cmd))
def main():
parser = argparse.ArgumentParser(description='cstar_docker.py - '
'Interact with cstar_perf docker clusters',
formatter_class=argparse.RawDescriptionHelpFormatter)
parser_subparsers = parser.add_subparsers(dest='command')
launch = parser_subparsers.add_parser('launch', description="Launch a cluster with the given name and number of nodes")
launch.add_argument('name', help='The name of the cluster')
launch.add_argument('num_nodes', type=int, help='The number of Cassandra nodes to launch')
launch.add_argument('-c', '--client-double-duty', action="store_true", help='Use node 00 as another Cassandra node, in addition to running the client')
launch.add_argument(
'--no-install', help='Don\'t install cstar_perf.tool', action='store_true')
launch.add_argument(
'-m', '--mount', help='Mount the host system\'s cstar_perf checkout rather than install from github', action='store_true')
launch.add_argument(
'--destroy-existing', help='Destroy any existing cluster with the same name before launching', action="store_true")
frontend = parser_subparsers.add_parser('frontend', description="Launch a single node frontend instance")
frontend.add_argument('name', help='The name of the frontend node')
frontend.add_argument(
'-m', '--mount', help='Mount the host system\'s cstar_perf checkout rather than install from github', action='store_true')
frontend.add_argument(
'--destroy-existing', help='Destroy any existing cluster with the same name before launching', action="store_true")
associate = parser_subparsers.add_parser('associate', description="Hook up one or more clusters to a cluster")
associate.add_argument('frontend', help='The name of the frontend cluster')
associate.add_argument('clusters', help='The names of the clusters to hook up to the frontend', nargs='+')
associate.add_argument('--with-dse', help='Enable DSE product for this cluster', action='store_true', default=False)
destroy = parser_subparsers.add_parser('destroy', description='Destroy clusters - specify a regex of cluster names to destroy, or specify \'all\' to destroy all clusters created')
destroy.add_argument('cluster_regex', help='The regex of the names of clusters to destroy')
list_clusters = parser_subparsers.add_parser('list', description='List clusters')
info = parser_subparsers.add_parser('info', description='Print cluster information')
info.add_argument('cluster_name', help='The name of the cluster')
ssh = parser_subparsers.add_parser('ssh', description='SSH to cluster node')
ssh.add_argument('cluster_name', help='The name of the cluster')
ssh.add_argument('node', help='The node number', type=int, nargs='?', default=0)
ssh.add_argument('-l', '--login_name', help='User to login as (default: cstar)', default='cstar')
build = parser_subparsers.add_parser('build', description='Build the Docker image')
build.add_argument(
'-f', '--force', help='Force building the image by removing any existing image first', action='store_true')
start = parser_subparsers.add_parser('start', description='Start an existing cluster')
start.add_argument('name', help='The name of the cluster to start')
stop = parser_subparsers.add_parser('stop', description='Stop an existing cluster')
stop.add_argument('name', help='The name of the cluster to stop')
restart = parser_subparsers.add_parser('restart', description='Restart an existing cluster')
restart.add_argument('name', help='The name of the cluster to restart')
enable_dse = parser_subparsers.add_parser('enable_dse', description="Enable DSE support")
enable_dse.add_argument('frontend', help='The name of the frontend node')
enable_dse.add_argument('dse_repo_url', help='DSE Tarball Repo url')
enable_dse.add_argument('dse_repo_username', nargs='?', default=None, help='DSE Tarball Repo username')
enable_dse.add_argument('dse_repo_password', nargs='?', default=None, help='DSE Tarball Repo password')
enable_dse.add_argument('dse_source_build_artifactory_url', nargs='?', default=None, help='DSE Artifactory URL')
enable_dse.add_argument('dse_source_build_artifactory_username', nargs='?', default=None, help='DSE Artifactory username')
enable_dse.add_argument('dse_source_build_artifactory_password', nargs='?', default=None, help='DSE Artifactory password')
enable_dse.add_argument('dse_source_build_oauth_token', nargs='?', default=None, help='DSE OAuth token for accessing GitHub Repo')
try:
args = parser.parse_args()
finally:
# Print verbose help if they didn't give any command:
if len(sys.argv) == 1:
parser.print_help()
check_docker_version()
execute_cmd(args.command, args)
fab.env.key_filename = get_ssh_key_pair()[0]
if __name__ == "__main__":
main()
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import inspect
from nova.compute import flavors
from nova.db import base
from nova import exception
from nova.network import floating_ips
from nova.network import model as network_model
from nova.network import rpcapi as network_rpcapi
from nova.objects import instance_info_cache as info_cache_obj
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import policy
from nova import utils
LOG = logging.getLogger(__name__)
def refresh_cache(f):
"""
Decorator to update the instance_info_cache
Requires context and instance as function args
"""
argspec = inspect.getargspec(f)
@functools.wraps(f)
def wrapper(self, context, *args, **kwargs):
res = f(self, context, *args, **kwargs)
try:
# get the instance from arguments (or raise ValueError)
instance = kwargs.get('instance')
if not instance:
instance = args[argspec.args.index('instance') - 2]
except ValueError:
msg = _('instance is a required argument to use @refresh_cache')
raise Exception(msg)
update_instance_cache_with_nw_info(self, context, instance,
nw_info=res)
# return the original function's return value
return res
return wrapper
def update_instance_cache_with_nw_info(api, context, instance, nw_info=None,
update_cells=True):
try:
LOG.debug(_('Updating cache with info: %s'), nw_info)
if not isinstance(nw_info, network_model.NetworkInfo):
nw_info = None
if nw_info is None:
nw_info = api._get_instance_nw_info(context, instance)
# NOTE(comstud): The save() method actually handles updating or
# creating the instance. We don't need to retrieve the object
# from the DB first.
ic = info_cache_obj.InstanceInfoCache.new(context,
instance['uuid'])
ic.network_info = nw_info
ic.save(update_cells=update_cells)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_('Failed storing info cache'), instance=instance)
def wrap_check_policy(func):
"""Check policy corresponding to the wrapped methods prior to execution."""
@functools.wraps(func)
def wrapped(self, context, *args, **kwargs):
action = func.__name__
check_policy(context, action)
return func(self, context, *args, **kwargs)
return wrapped
def check_policy(context, action):
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
_action = 'network:%s' % action
policy.enforce(context, _action, target)
class API(base.Base):
"""API for doing networking via the nova-network network manager.
This is a pluggable module - other implementations do networking via
other services (such as Neutron).
"""
_sentinel = object()
def __init__(self, **kwargs):
self.network_rpcapi = network_rpcapi.NetworkAPI()
helper = utils.ExceptionHelper
# NOTE(vish): this local version of floating_manager has to convert
# ClientExceptions back since they aren't going over rpc.
self.floating_manager = helper(floating_ips.LocalManager())
super(API, self).__init__(**kwargs)
@wrap_check_policy
def get_all(self, context):
"""Get all the networks.
If it is an admin user, api will return all the networks,
if it is a normal user, api will only return the networks which
belong to the user's project.
"""
try:
return self.db.network_get_all(context, project_only=True)
except exception.NoNetworksFound:
return []
@wrap_check_policy
def get(self, context, network_uuid):
return self.db.network_get_by_uuid(context.elevated(), network_uuid)
@wrap_check_policy
def create(self, context, **kwargs):
return self.network_rpcapi.create_networks(context, **kwargs)
@wrap_check_policy
def delete(self, context, network_uuid):
return self.network_rpcapi.delete_network(context, network_uuid, None)
@wrap_check_policy
def disassociate(self, context, network_uuid):
network = self.get(context, network_uuid)
self.db.network_disassociate(context, network['id'])
@wrap_check_policy
def get_fixed_ip(self, context, id):
return self.db.fixed_ip_get(context, id)
@wrap_check_policy
def get_fixed_ip_by_address(self, context, address):
return self.db.fixed_ip_get_by_address(context, address)
@wrap_check_policy
def get_floating_ip(self, context, id):
return self.db.floating_ip_get(context, id)
@wrap_check_policy
def get_floating_ip_pools(self, context):
return self.db.floating_ip_get_pools(context)
@wrap_check_policy
def get_floating_ip_by_address(self, context, address):
return self.db.floating_ip_get_by_address(context, address)
@wrap_check_policy
def get_floating_ips_by_project(self, context):
return self.db.floating_ip_get_all_by_project(context,
context.project_id)
@wrap_check_policy
def get_floating_ips_by_fixed_address(self, context, fixed_address):
floating_ips = self.db.floating_ip_get_by_fixed_address(context,
fixed_address)
return [floating_ip['address'] for floating_ip in floating_ips]
@wrap_check_policy
def get_instance_id_by_floating_address(self, context, address):
fixed_ip = self.db.fixed_ip_get_by_floating_address(context, address)
if fixed_ip is None:
return None
else:
return fixed_ip['instance_uuid']
@wrap_check_policy
def get_vifs_by_instance(self, context, instance):
vifs = self.db.virtual_interface_get_by_instance(context,
instance['uuid'])
for vif in vifs:
if vif.get('network_id') is not None:
network = self.db.network_get(context, vif['network_id'],
project_only="allow_none")
vif['net_uuid'] = network['uuid']
return vifs
@wrap_check_policy
def get_vif_by_mac_address(self, context, mac_address):
vif = self.db.virtual_interface_get_by_address(context,
mac_address)
if vif.get('network_id') is not None:
network = self.db.network_get(context, vif['network_id'],
project_only="allow_none")
vif['net_uuid'] = network['uuid']
return vif
@wrap_check_policy
def allocate_floating_ip(self, context, pool=None):
"""Adds (allocates) a floating ip to a project from a pool."""
return self.floating_manager.allocate_floating_ip(context,
context.project_id, False, pool)
@wrap_check_policy
def release_floating_ip(self, context, address,
affect_auto_assigned=False):
"""Removes (deallocates) a floating ip with address from a project."""
return self.floating_manager.deallocate_floating_ip(context, address,
affect_auto_assigned)
@wrap_check_policy
@refresh_cache
def associate_floating_ip(self, context, instance,
floating_address, fixed_address,
affect_auto_assigned=False):
"""Associates a floating ip with a fixed ip.
Ensures floating ip is allocated to the project in context.
Does not verify ownership of the fixed ip. Caller is assumed to have
checked that the instance is properly owned.
"""
orig_instance_uuid = self.floating_manager.associate_floating_ip(
context, floating_address, fixed_address, affect_auto_assigned)
if orig_instance_uuid:
msg_dict = dict(address=floating_address,
instance_id=orig_instance_uuid)
LOG.info(_('re-assign floating IP %(address)s from '
'instance %(instance_id)s') % msg_dict)
orig_instance = self.db.instance_get_by_uuid(context,
orig_instance_uuid)
# purge cached nw info for the original instance
update_instance_cache_with_nw_info(self, context, orig_instance)
@wrap_check_policy
@refresh_cache
def disassociate_floating_ip(self, context, instance, address,
affect_auto_assigned=False):
"""Disassociates a floating ip from fixed ip it is associated with."""
return self.floating_manager.disassociate_floating_ip(context, address,
affect_auto_assigned)
@wrap_check_policy
@refresh_cache
def allocate_for_instance(self, context, instance, vpn,
requested_networks, macs=None,
conductor_api=None, security_groups=None,
dhcp_options=None):
"""Allocates all network structures for an instance.
:param context: The request context.
:param instance: An Instance dict.
:param vpn: A boolean, if True, indicate a vpn to access the instance.
:param requested_networks: A dictionary of requested_networks,
Optional value containing network_id, fixed_ip, and port_id.
:param macs: None or a set of MAC addresses that the instance
should use. macs is supplied by the hypervisor driver (contrast
with requested_networks which is user supplied).
:param conductor_api: The conductor api.
:param security_groups: None or security groups to allocate for
instance.
:param dhcp_options: None or a set of key/value pairs that should
determine the DHCP BOOTP response, eg. for PXE booting an instance
configured with the baremetal hypervisor. It is expected that these
are already formatted for the neutron v2 api.
See nova/virt/driver.py:dhcp_options_for_instance for an example.
:returns: network info as from get_instance_nw_info() below
"""
# NOTE(vish): We can't do the floating ip allocation here because
# this is called from compute.manager which shouldn't
# have db access so we do it on the other side of the
# rpc.
instance_type = flavors.extract_flavor(instance)
args = {}
args['vpn'] = vpn
args['requested_networks'] = requested_networks
args['instance_id'] = instance['uuid']
args['project_id'] = instance['project_id']
args['host'] = instance['host']
args['rxtx_factor'] = instance_type['rxtx_factor']
args['macs'] = macs
args['dhcp_options'] = dhcp_options
nw_info = self.network_rpcapi.allocate_for_instance(context, **args)
return network_model.NetworkInfo.hydrate(nw_info)
@wrap_check_policy
def deallocate_for_instance(self, context, instance,
requested_networks=None):
"""Deallocates all network structures related to instance."""
# NOTE(vish): We can't do the floating ip deallocation here because
# this is called from compute.manager which shouldn't
# have db access so we do it on the other side of the
# rpc.
args = {}
args['instance_id'] = instance['uuid']
args['project_id'] = instance['project_id']
args['host'] = instance['host']
args['requested_networks'] = requested_networks
self.network_rpcapi.deallocate_for_instance(context, **args)
# NOTE(danms): Here for neutron compatibility
def allocate_port_for_instance(self, context, instance, port_id,
network_id=None, requested_ip=None,
conductor_api=None):
raise NotImplementedError()
# NOTE(danms): Here for neutron compatibility
def deallocate_port_for_instance(self, context, instance, port_id,
conductor_api=None):
raise NotImplementedError()
# NOTE(danms): Here for neutron compatibility
def list_ports(self, *args, **kwargs):
raise NotImplementedError()
# NOTE(danms): Here for neutron compatibility
def show_port(self, *args, **kwargs):
raise NotImplementedError()
@wrap_check_policy
@refresh_cache
def add_fixed_ip_to_instance(self, context, instance, network_id,
conductor_api=None):
"""Adds a fixed ip to instance from specified network."""
instance_type = flavors.extract_flavor(instance)
args = {'instance_id': instance['uuid'],
'rxtx_factor': instance_type['rxtx_factor'],
'host': instance['host'],
'network_id': network_id}
self.network_rpcapi.add_fixed_ip_to_instance(context, **args)
@wrap_check_policy
@refresh_cache
def remove_fixed_ip_from_instance(self, context, instance, address,
conductor_api=None):
"""Removes a fixed ip from instance from specified network."""
instance_type = flavors.extract_flavor(instance)
args = {'instance_id': instance['uuid'],
'rxtx_factor': instance_type['rxtx_factor'],
'host': instance['host'],
'address': address}
self.network_rpcapi.remove_fixed_ip_from_instance(context, **args)
@wrap_check_policy
def add_network_to_project(self, context, project_id, network_uuid=None):
"""Force adds another network to a project."""
self.network_rpcapi.add_network_to_project(context, project_id,
network_uuid)
@wrap_check_policy
def associate(self, context, network_uuid, host=_sentinel,
project=_sentinel):
"""Associate or disassociate host or project to network."""
network_id = self.get(context, network_uuid)['id']
if host is not API._sentinel:
if host is None:
self.db.network_disassociate(context, network_id,
disassociate_host=True,
disassociate_project=False)
else:
self.db.network_set_host(context, network_id, host)
if project is not API._sentinel:
if project is None:
self.db.network_disassociate(context, network_id,
disassociate_host=False,
disassociate_project=True)
else:
self.db.network_associate(context, project, network_id, True)
@wrap_check_policy
def get_instance_nw_info(self, context, instance):
"""Returns all network info related to an instance."""
result = self._get_instance_nw_info(context, instance)
# NOTE(comstud): Don't update API cell with new info_cache every
# time we pull network info for an instance. The periodic healing
# of info_cache causes too many cells messages. Healing the API
# will happen separately.
update_instance_cache_with_nw_info(self, context, instance,
result, update_cells=False)
return result
def _get_instance_nw_info(self, context, instance):
"""Returns all network info related to an instance."""
instance_type = flavors.extract_flavor(instance)
args = {'instance_id': instance['uuid'],
'rxtx_factor': instance_type['rxtx_factor'],
'host': instance['host'],
'project_id': instance['project_id']}
nw_info = self.network_rpcapi.get_instance_nw_info(context, **args)
return network_model.NetworkInfo.hydrate(nw_info)
@wrap_check_policy
def validate_networks(self, context, requested_networks):
"""validate the networks passed at the time of creating
the server
"""
if not requested_networks:
return
return self.network_rpcapi.validate_networks(context,
requested_networks)
@wrap_check_policy
def get_instance_uuids_by_ip_filter(self, context, filters):
"""Returns a list of dicts in the form of
{'instance_uuid': uuid, 'ip': ip} that matched the ip_filter
"""
return self.network_rpcapi.get_instance_uuids_by_ip_filter(context,
filters)
@wrap_check_policy
def get_dns_domains(self, context):
"""Returns a list of available dns domains.
These can be used to create DNS entries for floating ips.
"""
return self.network_rpcapi.get_dns_domains(context)
@wrap_check_policy
def add_dns_entry(self, context, address, name, dns_type, domain):
"""Create specified DNS entry for address."""
args = {'address': address,
'name': name,
'dns_type': dns_type,
'domain': domain}
return self.network_rpcapi.add_dns_entry(context, **args)
@wrap_check_policy
def modify_dns_entry(self, context, name, address, domain):
"""Create specified DNS entry for address."""
args = {'address': address,
'name': name,
'domain': domain}
return self.network_rpcapi.modify_dns_entry(context, **args)
@wrap_check_policy
def delete_dns_entry(self, context, name, domain):
"""Delete the specified dns entry."""
args = {'name': name, 'domain': domain}
return self.network_rpcapi.delete_dns_entry(context, **args)
@wrap_check_policy
def delete_dns_domain(self, context, domain):
"""Delete the specified dns domain."""
return self.network_rpcapi.delete_dns_domain(context, domain=domain)
@wrap_check_policy
def get_dns_entries_by_address(self, context, address, domain):
"""Get entries for address and domain."""
args = {'address': address, 'domain': domain}
return self.network_rpcapi.get_dns_entries_by_address(context, **args)
@wrap_check_policy
def get_dns_entries_by_name(self, context, name, domain):
"""Get entries for name and domain."""
args = {'name': name, 'domain': domain}
return self.network_rpcapi.get_dns_entries_by_name(context, **args)
@wrap_check_policy
def create_private_dns_domain(self, context, domain, availability_zone):
"""Create a private DNS domain with nova availability zone."""
args = {'domain': domain, 'av_zone': availability_zone}
return self.network_rpcapi.create_private_dns_domain(context, **args)
@wrap_check_policy
def create_public_dns_domain(self, context, domain, project=None):
"""Create a public DNS domain with optional nova project."""
args = {'domain': domain, 'project': project}
return self.network_rpcapi.create_public_dns_domain(context, **args)
@wrap_check_policy
def setup_networks_on_host(self, context, instance, host=None,
teardown=False):
"""Setup or teardown the network structures on hosts related to
instance.
"""
host = host or instance['host']
# NOTE(tr3buchet): host is passed in cases where we need to setup
# or teardown the networks on a host which has been migrated to/from
# and instance['host'] is not yet or is no longer equal to
args = {'instance_id': instance['id'],
'host': host,
'teardown': teardown}
self.network_rpcapi.setup_networks_on_host(context, **args)
def _is_multi_host(self, context, instance):
try:
fixed_ips = self.db.fixed_ip_get_by_instance(context,
instance['uuid'])
except exception.FixedIpNotFoundForInstance:
return False
network = self.db.network_get(context, fixed_ips[0]['network_id'],
project_only='allow_none')
return network['multi_host']
def _get_floating_ip_addresses(self, context, instance):
floating_ips = self.db.instance_floating_address_get_all(context,
instance['uuid'])
return floating_ips
@wrap_check_policy
def migrate_instance_start(self, context, instance, migration):
"""Start to migrate the network of an instance."""
instance_type = flavors.extract_flavor(instance)
args = dict(
instance_uuid=instance['uuid'],
rxtx_factor=instance_type['rxtx_factor'],
project_id=instance['project_id'],
source_compute=migration['source_compute'],
dest_compute=migration['dest_compute'],
floating_addresses=None,
)
if self._is_multi_host(context, instance):
args['floating_addresses'] = \
self._get_floating_ip_addresses(context, instance)
args['host'] = migration['source_compute']
self.network_rpcapi.migrate_instance_start(context, **args)
@wrap_check_policy
def migrate_instance_finish(self, context, instance, migration):
"""Finish migrating the network of an instance."""
instance_type = flavors.extract_flavor(instance)
args = dict(
instance_uuid=instance['uuid'],
rxtx_factor=instance_type['rxtx_factor'],
project_id=instance['project_id'],
source_compute=migration['source_compute'],
dest_compute=migration['dest_compute'],
floating_addresses=None,
)
if self._is_multi_host(context, instance):
args['floating_addresses'] = \
self._get_floating_ip_addresses(context, instance)
args['host'] = migration['dest_compute']
self.network_rpcapi.migrate_instance_finish(context, **args)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._storage_insight_configs_operations import build_create_or_update_request, build_delete_request, build_get_request, build_list_by_workspace_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class StorageInsightConfigsOperations:
"""StorageInsightConfigsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.loganalytics.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
workspace_name: str,
storage_insight_name: str,
parameters: "_models.StorageInsight",
**kwargs: Any
) -> "_models.StorageInsight":
"""Create or update a storage insight.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param storage_insight_name: Name of the storageInsightsConfigs resource.
:type storage_insight_name: str
:param parameters: The parameters required to create or update a storage insight.
:type parameters: ~azure.mgmt.loganalytics.models.StorageInsight
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageInsight, or the result of cls(response)
:rtype: ~azure.mgmt.loganalytics.models.StorageInsight
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageInsight"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'StorageInsight')
request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
storage_insight_name=storage_insight_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('StorageInsight', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('StorageInsight', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/storageInsightConfigs/{storageInsightName}'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
workspace_name: str,
storage_insight_name: str,
**kwargs: Any
) -> "_models.StorageInsight":
"""Gets a storage insight instance.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param storage_insight_name: Name of the storageInsightsConfigs resource.
:type storage_insight_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageInsight, or the result of cls(response)
:rtype: ~azure.mgmt.loganalytics.models.StorageInsight
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageInsight"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
storage_insight_name=storage_insight_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('StorageInsight', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/storageInsightConfigs/{storageInsightName}'} # type: ignore
@distributed_trace_async
async def delete(
self,
resource_group_name: str,
workspace_name: str,
storage_insight_name: str,
**kwargs: Any
) -> None:
"""Deletes a storageInsightsConfigs resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param storage_insight_name: Name of the storageInsightsConfigs resource.
:type storage_insight_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
storage_insight_name=storage_insight_name,
subscription_id=self._config.subscription_id,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/storageInsightConfigs/{storageInsightName}'} # type: ignore
@distributed_trace
def list_by_workspace(
self,
resource_group_name: str,
workspace_name: str,
**kwargs: Any
) -> AsyncIterable["_models.StorageInsightListResult"]:
"""Lists the storage insight instances within a workspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either StorageInsightListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.loganalytics.models.StorageInsightListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageInsightListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_workspace_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
template_url=self.list_by_workspace.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_workspace_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("StorageInsightListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_workspace.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/storageInsightConfigs'} # type: ignore
| |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Model tests."""
from __future__ import absolute_import, print_function
import uuid
import pytest
from mock import patch
from sqlalchemy.exc import SQLAlchemyError
from invenio_pidstore.errors import PIDAlreadyExists, PIDDoesNotExistError, \
PIDInvalidAction, PIDObjectAlreadyAssigned
from invenio_pidstore.models import PersistentIdentifier, PIDStatus, Redirect
@patch('invenio_pidstore.models.logger')
def test_pid_creation(logger, app, db):
"""Test pid creation."""
with app.app_context():
assert PersistentIdentifier.query.count() == 0
pid = PersistentIdentifier.create('doi', '10.1234/foo')
assert PersistentIdentifier.query.count() == 1
assert pid.pid_type == 'doi'
assert pid.pid_value == '10.1234/foo'
assert pid.pid_provider is None
assert pid.status == PIDStatus.NEW
assert pid.object_type is None
assert pid.object_uuid is None
assert logger.info.called
rec_uuid = uuid.uuid4()
pid = PersistentIdentifier.create(
'rec', '2', status=PIDStatus.REGISTERED, object_type='rec',
object_uuid=rec_uuid)
assert PersistentIdentifier.query.count() == 2
assert pid.pid_type == 'rec'
assert pid.pid_value == '2'
assert pid.pid_provider is None
assert pid.status == PIDStatus.REGISTERED
assert pid.object_type == 'rec'
assert pid.object_uuid == rec_uuid
# Can't duplicate existing persistent identifier
assert not logger.exception.called
pytest.raises(
PIDAlreadyExists, PersistentIdentifier.create, 'rec', '2')
assert logger.exception.called
with patch('invenio_pidstore.models.db.session.begin_nested') as mock:
mock.side_effect = SQLAlchemyError()
pytest.raises(SQLAlchemyError, PersistentIdentifier.create,
'rec', '2')
assert logger.exception.call_args[0][0].startswith(
"Failed to create")
def test_alembic(app, db):
"""Test alembic recipes."""
ext = app.extensions['invenio-db']
if db.engine.name == 'sqlite':
raise pytest.skip('Upgrades are not supported on SQLite.')
assert not ext.alembic.compare_metadata()
db.drop_all()
ext.alembic.upgrade()
assert not ext.alembic.compare_metadata()
ext.alembic.stamp()
ext.alembic.downgrade(target='96e796392533')
ext.alembic.upgrade()
assert not ext.alembic.compare_metadata()
def test_pidstatus_as():
"""Test PID status."""
assert PIDStatus.NEW.title == 'New'
assert PIDStatus.RESERVED.title == 'Reserved'
assert next(iter(PIDStatus)) == 'N'
def test_pid_get(app, db):
"""Test pid retrieval."""
with app.app_context():
PersistentIdentifier.create('doi', '10.1234/foo')
assert PersistentIdentifier.get('doi', '10.1234/foo')
pytest.raises(
PIDDoesNotExistError,
PersistentIdentifier.get,
'doi', '10.1234/bar'
)
# PID with provider
doi = '10.1234/a'
PersistentIdentifier.create('doi', doi, pid_provider='dcite')
assert PersistentIdentifier.get('doi', doi)
assert PersistentIdentifier.get(
'doi', doi, pid_provider='dcite')
pytest.raises(
PIDDoesNotExistError,
PersistentIdentifier.get,
'doi', doi, pid_provider='cref'
)
# Retrieve by object
myuuid = uuid.uuid4()
doi = '10.1234/b'
PersistentIdentifier.create(
'doi', doi, object_type='rec', object_uuid=myuuid)
pid = PersistentIdentifier.get_by_object('doi', 'rec', myuuid)
assert pid.pid_value == doi
pytest.raises(
PIDDoesNotExistError,
PersistentIdentifier.get_by_object,
'doi', 'rec', uuid.uuid4()
)
@patch('invenio_pidstore.models.logger')
def test_pid_assign(logger, app, db):
"""Test pid object assignment."""
with app.app_context():
# No assigned object
pid = PersistentIdentifier.create('doi', '10.1234/foo')
assert not pid.has_object()
assert pid.get_assigned_object() is None
assert pid.get_assigned_object('rec') is None
# Assign object
rec_uuid = uuid.uuid4()
pid.assign('rec', rec_uuid)
assert logger.info.call_args[0][0].startswith("Assigned")
assert 'pid' in logger.info.call_args[1]['extra']
assert pid.has_object()
assert pid.get_assigned_object() == rec_uuid
assert pid.get_assigned_object('rec') == rec_uuid
assert pid.get_assigned_object('oth') is None
# Doesnt' raise
pid.assign('rec', rec_uuid)
# Assign without overwrite (uuid as str and uuid)
new_uuid = uuid.uuid4()
pytest.raises(PIDObjectAlreadyAssigned, pid.assign, 'rec', new_uuid)
pytest.raises(
PIDObjectAlreadyAssigned, pid.assign, 'rec', str(new_uuid))
# Assign with overwrite
pid.assign('rec', str(new_uuid), overwrite=True)
assert pid.has_object()
assert pid.get_assigned_object() == new_uuid
assert pid.get_assigned_object('rec') == new_uuid
assert pid.get_assigned_object('oth') is None
# Assign with SQLError
pid = PersistentIdentifier.create('recid', '101')
with patch('invenio_pidstore.models.db.session.begin_nested') as mock:
mock.side_effect = SQLAlchemyError()
pytest.raises(SQLAlchemyError, pid.assign, 'rec', uuid.uuid4())
@patch('invenio_pidstore.models.logger')
def test_pid_unassign_noobject(logger, app, db):
"""Test unassign."""
with app.app_context():
pid = PersistentIdentifier.create('recid', '101')
assert pid.unassign()
pid.assign('rec', uuid.uuid4())
with patch('invenio_pidstore.models.db.session.begin_nested') as mock:
mock.side_effect = SQLAlchemyError()
pytest.raises(SQLAlchemyError, pid.unassign)
assert logger.exception.call_args[0][0].startswith(
"Failed to unassign")
assert 'pid' in logger.exception.call_args[1]['extra']
def test_pid_assign_deleted(app, db):
"""Test pid object assignment."""
with app.app_context():
pid = PersistentIdentifier.create(
'doi', '10.1234/foo', status=PIDStatus.DELETED)
pytest.raises(PIDInvalidAction, pid.assign, 'rec', uuid.uuid4())
@patch('invenio_pidstore.models.logger')
def test_reserve(logger, app, db):
"""Test pid reserve."""
with app.app_context():
i = 1
for s in [PIDStatus.NEW, PIDStatus.RESERVED]:
pid = PersistentIdentifier.create('rec', str(i), status=s)
i += 1
assert pid.reserve()
assert logger.info.call_args[0][0].startswith(
"Reserved PID")
for s in [PIDStatus.REGISTERED, PIDStatus.DELETED,
PIDStatus.REDIRECTED]:
pid = PersistentIdentifier.create('rec', str(i), status=s)
i += 1
pytest.raises(PIDInvalidAction, pid.reserve)
# Test logging of bad errors.
pid = PersistentIdentifier.create('rec', str(i))
with patch('invenio_pidstore.models.db.session.begin_nested') as mock:
mock.side_effect = SQLAlchemyError()
pytest.raises(SQLAlchemyError, pid.reserve)
assert logger.exception.call_args[0][0].startswith(
"Failed to reserve")
assert 'pid' in logger.exception.call_args[1]['extra']
@patch('invenio_pidstore.models.logger')
def test_register(logger, app, db):
"""Test pid register."""
with app.app_context():
i = 1
for s in [PIDStatus.NEW, PIDStatus.RESERVED]:
pid = PersistentIdentifier.create('rec', str(i), status=s)
i += 1
assert pid.register()
assert logger.info.call_args[0][0].startswith(
"Registered PID")
for s in [PIDStatus.REGISTERED, PIDStatus.DELETED,
PIDStatus.REDIRECTED]:
pid = PersistentIdentifier.create('rec', str(i), status=s)
i += 1
pytest.raises(PIDInvalidAction, pid.register)
# Test logging of bad errors.
pid = PersistentIdentifier.create('rec', str(i),
status=PIDStatus.RESERVED)
with patch('invenio_pidstore.models.db.session.begin_nested') as mock:
mock.side_effect = SQLAlchemyError()
pytest.raises(SQLAlchemyError, pid.register)
assert logger.exception.call_args[0][0].startswith(
"Failed to register")
assert 'pid' in logger.exception.call_args[1]['extra']
@patch('invenio_pidstore.models.logger')
def test_delete(logger, app, db):
"""Test pid delete."""
with app.app_context():
i = 1
for s in [PIDStatus.RESERVED, PIDStatus.RESERVED,
PIDStatus.REDIRECTED, PIDStatus.DELETED]:
pid = PersistentIdentifier.create('rec', str(i), status=s)
i += 1
assert pid.delete()
assert logger.info.call_args[0][0] == "Deleted PID."
# New persistent identifiers are removed completely
count = PersistentIdentifier.query.count()
pid = PersistentIdentifier.create('rec', str(i), status=PIDStatus.NEW)
db.session.commit()
assert PersistentIdentifier.query.count() == count + 1
pid.delete()
assert PersistentIdentifier.query.count() == count
assert logger.info.call_args[0][0] == "Deleted PID (removed)."
pid = PersistentIdentifier.create('rec', str(i+1))
with patch('invenio_pidstore.models.db.session.begin_nested') as mock:
mock.side_effect = SQLAlchemyError()
pytest.raises(SQLAlchemyError, pid.delete)
assert logger.exception.call_args[0][0].startswith(
"Failed to delete")
assert 'pid' in logger.exception.call_args[1]['extra']
@patch('invenio_pidstore.models.logger')
def test_redirect(logger, app, db):
"""Test redirection."""
with app.app_context():
pid1 = PersistentIdentifier.create(
'rec', '1', status=PIDStatus.REGISTERED, object_type='rec',
object_uuid=uuid.uuid4())
pid2 = PersistentIdentifier.create(
'doi', '2', status=PIDStatus.REGISTERED, object_type='rec',
object_uuid=uuid.uuid4())
# Can't redirect these statuses
i = 10
for s in [PIDStatus.NEW, PIDStatus.RESERVED, PIDStatus.DELETED, ]:
pid = PersistentIdentifier.create('rec', str(i), status=s)
i += 1
pytest.raises(PIDInvalidAction, pid.redirect, pid1)
pid = PersistentIdentifier.create(
'rec', str(i), status=PIDStatus.REGISTERED)
# Can't redirect to non-exsting pid.
pytest.raises(PIDDoesNotExistError, pid.redirect,
PersistentIdentifier())
pid.redirect(pid1)
assert logger.info.call_args[0][0].startswith("Redirected")
assert 'pid' in logger.info.call_args[1]['extra']
assert pid.status == PIDStatus.REDIRECTED
assert pid.object_type is None
assert pid.object_uuid is not None
new_pid = pid.get_redirect()
assert new_pid.pid_type == 'rec'
assert new_pid.pid_value == '1'
# You can redirect an already redirected pid
pid.redirect(pid2)
new_pid = pid.get_redirect()
assert new_pid.pid_type == 'doi'
assert new_pid.pid_value == '2'
# Assign with SQLError
with patch('invenio_pidstore.models.db.session.begin_nested') as mock:
mock.side_effect = SQLAlchemyError()
pytest.raises(SQLAlchemyError, pid.redirect, '1')
assert logger.exception.call_args[0][0].startswith(
"Failed to redirect")
assert 'pid' in logger.exception.call_args[1]['extra']
def test_redirect_cleanup(app, db):
"""Test proper clean up from redirects."""
with app.app_context():
pid1 = PersistentIdentifier.create(
'recid', '1', status=PIDStatus.REGISTERED, object_type='rec',
object_uuid=uuid.uuid4())
pid2 = PersistentIdentifier.create(
'recid', '2', status=PIDStatus.REGISTERED, object_type='rec',
object_uuid=uuid.uuid4())
pid3 = PersistentIdentifier.create(
'recid', '3', status=PIDStatus.REGISTERED)
db.session.commit()
assert Redirect.query.count() == 0
pid3.redirect(pid1)
assert Redirect.query.count() == 1
pid3.redirect(pid2)
assert Redirect.query.count() == 1
pytest.raises(
PIDObjectAlreadyAssigned, pid3.assign, 'rec', uuid.uuid4())
pid3.unassign()
assert Redirect.query.count() == 0
@patch('invenio_pidstore.models.logger')
def test_sync_status(logger, app, db):
"""Test sync status."""
with app.app_context():
pid = PersistentIdentifier.create(
'rec', '1', status=PIDStatus.REGISTERED, object_type='rec',
object_uuid=uuid.uuid4())
pytest.raises(PIDInvalidAction, pid.reserve)
calls = logger.info.call_count
assert pid.sync_status(PIDStatus.NEW)
assert logger.info.call_count == calls + 1
assert pid.reserve()
calls = logger.info.call_count
assert pid.sync_status(PIDStatus.RESERVED)
assert logger.info.call_count == calls
with patch('invenio_pidstore.models.db.session.begin_nested') as mock:
mock.side_effect = SQLAlchemyError()
pytest.raises(SQLAlchemyError, pid.sync_status, PIDStatus.NEW)
assert logger.exception.call_args[0][0].startswith(
"Failed to sync status")
assert 'pid' in logger.exception.call_args[1]['extra']
def test_repr(app, db):
"""Test representation."""
with app.app_context():
pid = PersistentIdentifier.create(
'recid', '1', status=PIDStatus.REGISTERED, object_type='rec',
object_uuid='de3bb351-bc1a-4e51-8605-c6cd9589a560')
assert str(pid) == \
"<PersistentIdentifier recid:1 / " \
"rec:de3bb351-bc1a-4e51-8605-c6cd9589a560 (R)>"
pid = PersistentIdentifier.create(
'recid', '2', status=PIDStatus.REGISTERED)
assert str(pid) == "<PersistentIdentifier recid:2 (R)>"
| |
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for Ironic SSH power driver."""
import tempfile
import mock
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import uuidutils
import paramiko
from ironic.common import boot_devices
from ironic.common import driver_factory
from ironic.common import exception
from ironic.common import states
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.drivers.modules import console_utils
from ironic.drivers.modules import ssh
from ironic.drivers import utils as driver_utils
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.objects import utils as obj_utils
CONF = cfg.CONF
class SSHValidateParametersTestCase(db_base.DbTestCase):
def test__parse_driver_info_good_password(self):
# make sure we get back the expected things
node = obj_utils.get_test_node(
self.context,
driver='fake_ssh',
driver_info=db_utils.get_test_ssh_info('password'))
info = ssh._parse_driver_info(node)
self.assertIsNotNone(info.get('host'))
self.assertIsNotNone(info.get('username'))
self.assertIsNotNone(info.get('password'))
self.assertIsNotNone(info.get('port'))
self.assertIsNotNone(info.get('virt_type'))
self.assertIsNotNone(info.get('cmd_set'))
self.assertIsNotNone(info.get('uuid'))
def test__parse_driver_info_good_key(self):
# make sure we get back the expected things
node = obj_utils.get_test_node(
self.context,
driver='fake_ssh',
driver_info=db_utils.get_test_ssh_info('key'))
info = ssh._parse_driver_info(node)
self.assertIsNotNone(info.get('host'))
self.assertIsNotNone(info.get('username'))
self.assertIsNotNone(info.get('key_contents'))
self.assertIsNotNone(info.get('port'))
self.assertIsNotNone(info.get('virt_type'))
self.assertIsNotNone(info.get('cmd_set'))
self.assertIsNotNone(info.get('uuid'))
def test__parse_driver_info_good_file(self):
# make sure we get back the expected things
d_info = db_utils.get_test_ssh_info('file')
tempdir = tempfile.mkdtemp()
key_path = tempdir + '/foo'
open(key_path, 'wt').close()
d_info['ssh_key_filename'] = key_path
node = obj_utils.get_test_node(
self.context,
driver='fake_ssh',
driver_info=d_info)
info = ssh._parse_driver_info(node)
self.assertIsNotNone(info.get('host'))
self.assertIsNotNone(info.get('username'))
self.assertIsNotNone(info.get('key_filename'))
self.assertIsNotNone(info.get('port'))
self.assertIsNotNone(info.get('virt_type'))
self.assertIsNotNone(info.get('cmd_set'))
self.assertIsNotNone(info.get('uuid'))
def test__parse_driver_info_bad_file(self):
# A filename that doesn't exist errors.
info = db_utils.get_test_ssh_info('file')
node = obj_utils.get_test_node(
self.context,
driver='fake_ssh',
driver_info=info)
self.assertRaises(
exception.InvalidParameterValue, ssh._parse_driver_info, node)
def test__parse_driver_info_too_many(self):
info = db_utils.get_test_ssh_info('too_many')
node = obj_utils.get_test_node(
self.context,
driver='fake_ssh',
driver_info=info)
self.assertRaises(
exception.InvalidParameterValue, ssh._parse_driver_info, node)
def test__parse_driver_info_missing_host(self):
# make sure error is raised when info is missing
info = db_utils.get_test_ssh_info()
del info['ssh_address']
node = obj_utils.get_test_node(self.context, driver_info=info)
self.assertRaises(exception.MissingParameterValue,
ssh._parse_driver_info,
node)
def test__parse_driver_info_missing_user(self):
# make sure error is raised when info is missing
info = db_utils.get_test_ssh_info()
del info['ssh_username']
node = obj_utils.get_test_node(self.context, driver_info=info)
self.assertRaises(exception.MissingParameterValue,
ssh._parse_driver_info,
node)
def test__parse_driver_info_invalid_creds(self):
# make sure error is raised when info is missing
info = db_utils.get_test_ssh_info('no-creds')
node = obj_utils.get_test_node(self.context, driver_info=info)
self.assertRaises(exception.InvalidParameterValue,
ssh._parse_driver_info,
node)
def test__parse_driver_info_missing_virt_type(self):
# make sure error is raised when info is missing
info = db_utils.get_test_ssh_info()
del info['ssh_virt_type']
node = obj_utils.get_test_node(self.context, driver_info=info)
self.assertRaises(exception.MissingParameterValue,
ssh._parse_driver_info,
node)
def test__parse_driver_info_ssh_port_wrong_type(self):
# make sure error is raised when ssh_port is not integer
info = db_utils.get_test_ssh_info()
info['ssh_port'] = 'wrong_port_value'
node = obj_utils.get_test_node(self.context, driver_info=info)
self.assertRaises(exception.InvalidParameterValue,
ssh._parse_driver_info,
node)
def test__parse_driver_info_with_custom_libvirt_uri(self):
CONF.set_override('libvirt_uri', 'qemu:///foo', 'ssh')
expected_base_cmd = "LC_ALL=C /usr/bin/virsh --connect qemu:///foo"
node = obj_utils.get_test_node(
self.context,
driver='fake_ssh',
driver_info=db_utils.get_test_ssh_info())
node['driver_info']['ssh_virt_type'] = 'virsh'
info = ssh._parse_driver_info(node)
self.assertEqual(expected_base_cmd, info['cmd_set']['base_cmd'])
def test__get_boot_device_map_parallels(self):
boot_map = ssh._get_boot_device_map('parallels')
self.assertEqual('net0', boot_map[boot_devices.PXE])
def test__get_boot_device_map_vbox(self):
boot_map = ssh._get_boot_device_map('vbox')
self.assertEqual('net', boot_map[boot_devices.PXE])
def test__get_boot_device_map_xenserver(self):
boot_map = ssh._get_boot_device_map('xenserver')
self.assertEqual('n', boot_map[boot_devices.PXE])
def test__get_boot_device_map_exception(self):
self.assertRaises(exception.InvalidParameterValue,
ssh._get_boot_device_map,
'this_doesn_t_exist')
class SSHPrivateMethodsTestCase(db_base.DbTestCase):
def setUp(self):
super(SSHPrivateMethodsTestCase, self).setUp()
self.node = obj_utils.get_test_node(
self.context,
driver='fake_ssh',
driver_info=db_utils.get_test_ssh_info())
self.sshclient = paramiko.SSHClient()
@mock.patch.object(utils, 'ssh_connect', autospec=True)
def test__get_connection_client(self, ssh_connect_mock):
ssh_connect_mock.return_value = self.sshclient
client = ssh._get_connection(self.node)
self.assertEqual(self.sshclient, client)
driver_info = ssh._parse_driver_info(self.node)
ssh_connect_mock.assert_called_once_with(driver_info)
@mock.patch.object(utils, 'ssh_connect', autospec=True)
def test__get_connection_exception(self, ssh_connect_mock):
ssh_connect_mock.side_effect = iter(
[exception.SSHConnectFailed(host='fake')])
self.assertRaises(exception.SSHConnectFailed,
ssh._get_connection,
self.node)
driver_info = ssh._parse_driver_info(self.node)
ssh_connect_mock.assert_called_once_with(driver_info)
@mock.patch.object(processutils, 'ssh_execute', autospec=True)
def test__ssh_execute(self, exec_ssh_mock):
ssh_cmd = "somecmd"
expected = ['a', 'b', 'c']
exec_ssh_mock.return_value = ('\n'.join(expected), '')
lst = ssh._ssh_execute(self.sshclient, ssh_cmd)
exec_ssh_mock.assert_called_once_with(self.sshclient, ssh_cmd)
self.assertEqual(expected, lst)
@mock.patch.object(processutils, 'ssh_execute', autospec=True)
def test__ssh_execute_exception(self, exec_ssh_mock):
ssh_cmd = "somecmd"
exec_ssh_mock.side_effect = processutils.ProcessExecutionError
self.assertRaises(exception.SSHCommandFailed,
ssh._ssh_execute,
self.sshclient,
ssh_cmd)
exec_ssh_mock.assert_called_once_with(self.sshclient, ssh_cmd)
@mock.patch.object(processutils, 'ssh_execute', autospec=True)
@mock.patch.object(ssh, '_get_hosts_name_for_node', autospec=True)
def test__get_power_status_on_unquoted(self, get_hosts_name_mock,
exec_ssh_mock):
info = ssh._parse_driver_info(self.node)
exec_ssh_mock.return_value = (
'ExactNodeName', '')
get_hosts_name_mock.return_value = "ExactNodeName"
pstate = ssh._get_power_status(self.sshclient, info)
ssh_cmd = "%s %s" % (info['cmd_set']['base_cmd'],
info['cmd_set']['list_running'])
self.assertEqual(states.POWER_ON, pstate)
exec_ssh_mock.assert_called_once_with(self.sshclient, ssh_cmd)
get_hosts_name_mock.assert_called_once_with(self.sshclient, info)
@mock.patch.object(processutils, 'ssh_execute', autospec=True)
@mock.patch.object(ssh, '_get_hosts_name_for_node', autospec=True)
def test__get_power_status_on(self, get_hosts_name_mock, exec_ssh_mock):
info = ssh._parse_driver_info(self.node)
exec_ssh_mock.return_value = (
'"NodeName" {b43c4982-110c-4c29-9325-d5f41b053513}', '')
get_hosts_name_mock.return_value = "NodeName"
pstate = ssh._get_power_status(self.sshclient, info)
ssh_cmd = "%s %s" % (info['cmd_set']['base_cmd'],
info['cmd_set']['list_running'])
self.assertEqual(states.POWER_ON, pstate)
exec_ssh_mock.assert_called_once_with(self.sshclient, ssh_cmd)
get_hosts_name_mock.assert_called_once_with(self.sshclient, info)
@mock.patch.object(processutils, 'ssh_execute', autospec=True)
@mock.patch.object(ssh, '_get_hosts_name_for_node', autospec=True)
def test__get_power_status_off(self, get_hosts_name_mock, exec_ssh_mock):
info = ssh._parse_driver_info(self.node)
exec_ssh_mock.return_value = (
'"NodeName" {b43c4982-110c-4c29-9325-d5f41b053513}', '')
get_hosts_name_mock.return_value = "NotNodeName"
pstate = ssh._get_power_status(self.sshclient, info)
ssh_cmd = "%s %s" % (info['cmd_set']['base_cmd'],
info['cmd_set']['list_running'])
self.assertEqual(states.POWER_OFF, pstate)
exec_ssh_mock.assert_called_once_with(self.sshclient, ssh_cmd)
get_hosts_name_mock.assert_called_once_with(self.sshclient, info)
@mock.patch.object(processutils, 'ssh_execute', autospec=True)
def test__get_power_status_exception(self, exec_ssh_mock):
info = ssh._parse_driver_info(self.node)
exec_ssh_mock.side_effect = processutils.ProcessExecutionError
self.assertRaises(exception.SSHCommandFailed,
ssh._get_power_status,
self.sshclient,
info)
ssh_cmd = "%s %s" % (info['cmd_set']['base_cmd'],
info['cmd_set']['list_all'])
exec_ssh_mock.assert_called_once_with(
self.sshclient, ssh_cmd)
@mock.patch.object(processutils, 'ssh_execute', autospec=True)
@mock.patch.object(ssh, '_get_hosts_name_for_node', autospec=True)
def test__get_power_status_correct_node(self, get_hosts_name_mock,
exec_ssh_mock):
# Bug: #1397834 test that get_power_status return status of
# baremeta_1 (off) and not baremetal_11 (on)
info = ssh._parse_driver_info(self.node)
exec_ssh_mock.return_value = ('"baremetal_11"\n"seed"\n', '')
get_hosts_name_mock.return_value = "baremetal_1"
pstate = ssh._get_power_status(self.sshclient, info)
self.assertEqual(states.POWER_OFF, pstate)
ssh_cmd = "%s %s" % (info['cmd_set']['base_cmd'],
info['cmd_set']['list_running'])
exec_ssh_mock.assert_called_once_with(self.sshclient, ssh_cmd)
@mock.patch.object(processutils, 'ssh_execute', autospec=True)
def test__get_hosts_name_for_node_match(self, exec_ssh_mock):
info = ssh._parse_driver_info(self.node)
info['macs'] = ["11:11:11:11:11:11", "52:54:00:cf:2d:31"]
ssh_cmd = "%s %s" % (info['cmd_set']['base_cmd'],
info['cmd_set']['list_all'])
cmd_to_exec = "%s %s" % (info['cmd_set']['base_cmd'],
info['cmd_set']['get_node_macs'])
cmd_to_exec = cmd_to_exec.replace('{_NodeName_}', 'NodeName')
exec_ssh_mock.side_effect = iter([('NodeName', ''),
('52:54:00:cf:2d:31', '')])
expected = [mock.call(self.sshclient, ssh_cmd),
mock.call(self.sshclient, cmd_to_exec)]
found_name = ssh._get_hosts_name_for_node(self.sshclient, info)
self.assertEqual('NodeName', found_name)
self.assertEqual(expected, exec_ssh_mock.call_args_list)
@mock.patch.object(processutils, 'ssh_execute', autospec=True)
def test__get_hosts_name_for_node_no_match(self, exec_ssh_mock):
self.config(group='ssh', get_vm_name_attempts=2)
self.config(group='ssh', get_vm_name_retry_interval=0)
info = ssh._parse_driver_info(self.node)
info['macs'] = ["11:11:11:11:11:11", "22:22:22:22:22:22"]
exec_ssh_mock.side_effect = iter([('NodeName', ''),
('52:54:00:cf:2d:31', '')] * 2)
ssh_cmd = "%s %s" % (info['cmd_set']['base_cmd'],
info['cmd_set']['list_all'])
cmd_to_exec = "%s %s" % (info['cmd_set']['base_cmd'],
info['cmd_set']['get_node_macs'])
cmd_to_exec = cmd_to_exec.replace('{_NodeName_}', 'NodeName')
expected = [mock.call(self.sshclient, ssh_cmd),
mock.call(self.sshclient, cmd_to_exec)] * 2
self.assertRaises(exception.NodeNotFound,
ssh._get_hosts_name_for_node, self.sshclient, info)
self.assertEqual(expected, exec_ssh_mock.call_args_list)
@mock.patch.object(processutils, 'ssh_execute', autospec=True)
def test__get_hosts_name_for_node_match_after_retry(self, exec_ssh_mock):
self.config(group='ssh', get_vm_name_attempts=2)
self.config(group='ssh', get_vm_name_retry_interval=0)
info = ssh._parse_driver_info(self.node)
info['macs'] = ["11:11:11:11:11:11", "22:22:22:22:22:22"]
exec_ssh_mock.side_effect = iter([('NodeName', ''),
('', ''),
('NodeName', ''),
('11:11:11:11:11:11', '')])
ssh_cmd = "%s %s" % (info['cmd_set']['base_cmd'],
info['cmd_set']['list_all'])
cmd_to_exec = "%s %s" % (info['cmd_set']['base_cmd'],
info['cmd_set']['get_node_macs'])
cmd_to_exec = cmd_to_exec.replace('{_NodeName_}', 'NodeName')
expected = [mock.call(self.sshclient, ssh_cmd),
mock.call(self.sshclient, cmd_to_exec)] * 2
found_name = ssh._get_hosts_name_for_node(self.sshclient, info)
self.assertEqual('NodeName', found_name)
self.assertEqual(expected, exec_ssh_mock.call_args_list)
@mock.patch.object(processutils, 'ssh_execute', autospec=True)
def test__get_hosts_name_for_node_exception(self, exec_ssh_mock):
info = ssh._parse_driver_info(self.node)
info['macs'] = ["11:11:11:11:11:11", "52:54:00:cf:2d:31"]
ssh_cmd = "%s %s" % (info['cmd_set']['base_cmd'],
info['cmd_set']['list_all'])
cmd_to_exec = "%s %s" % (info['cmd_set']['base_cmd'],
info['cmd_set']['get_node_macs'])
cmd_to_exec = cmd_to_exec.replace('{_NodeName_}', 'NodeName')
exec_ssh_mock.side_effect = iter(
[('NodeName', ''), processutils.ProcessExecutionError])
expected = [mock.call(self.sshclient, ssh_cmd),
mock.call(self.sshclient, cmd_to_exec)]
self.assertRaises(exception.SSHCommandFailed,
ssh._get_hosts_name_for_node,
self.sshclient,
info)
self.assertEqual(expected, exec_ssh_mock.call_args_list)
@mock.patch.object(processutils, 'ssh_execute', autospec=True)
@mock.patch.object(ssh, '_get_power_status', autospec=True)
@mock.patch.object(ssh, '_get_hosts_name_for_node', autospec=True)
def test__power_on_good(self, get_hosts_name_mock, get_power_status_mock,
exec_ssh_mock):
info = ssh._parse_driver_info(self.node)
info['macs'] = ["11:11:11:11:11:11", "52:54:00:cf:2d:31"]
get_power_status_mock.side_effect = iter([states.POWER_OFF,
states.POWER_ON])
get_hosts_name_mock.return_value = "NodeName"
expected = [mock.call(self.sshclient, info),
mock.call(self.sshclient, info)]
cmd_to_exec = "%s %s" % (info['cmd_set']['base_cmd'],
info['cmd_set']['start_cmd'])
cmd_to_exec = cmd_to_exec.replace('{_NodeName_}', 'NodeName')
current_state = ssh._power_on(self.sshclient, info)
self.assertEqual(states.POWER_ON, current_state)
self.assertEqual(expected, get_power_status_mock.call_args_list)
get_hosts_name_mock.assert_called_once_with(self.sshclient, info)
exec_ssh_mock.assert_called_once_with(self.sshclient, cmd_to_exec)
@mock.patch.object(processutils, 'ssh_execute', autospec=True)
@mock.patch.object(ssh, '_get_power_status', autospec=True)
@mock.patch.object(ssh, '_get_hosts_name_for_node', autospec=True)
def test__power_on_fail(self, get_hosts_name_mock, get_power_status_mock,
exec_ssh_mock):
info = ssh._parse_driver_info(self.node)
info['macs'] = ["11:11:11:11:11:11", "52:54:00:cf:2d:31"]
get_power_status_mock.side_effect = iter([states.POWER_OFF,
states.POWER_OFF])
get_hosts_name_mock.return_value = "NodeName"
expected = [mock.call(self.sshclient, info),
mock.call(self.sshclient, info)]
cmd_to_exec = "%s %s" % (info['cmd_set']['base_cmd'],
info['cmd_set']['start_cmd'])
cmd_to_exec = cmd_to_exec.replace('{_NodeName_}', 'NodeName')
current_state = ssh._power_on(self.sshclient, info)
self.assertEqual(states.ERROR, current_state)
self.assertEqual(expected, get_power_status_mock.call_args_list)
get_hosts_name_mock.assert_called_once_with(self.sshclient, info)
exec_ssh_mock.assert_called_once_with(self.sshclient, cmd_to_exec)
@mock.patch.object(processutils, 'ssh_execute', autospec=True)
@mock.patch.object(ssh, '_get_power_status', autospec=True)
@mock.patch.object(ssh, '_get_hosts_name_for_node', autospec=True)
def test__power_on_exception(self, get_hosts_name_mock,
get_power_status_mock, exec_ssh_mock):
info = ssh._parse_driver_info(self.node)
info['macs'] = ["11:11:11:11:11:11", "52:54:00:cf:2d:31"]
exec_ssh_mock.side_effect = processutils.ProcessExecutionError
get_power_status_mock.side_effect = iter([states.POWER_OFF,
states.POWER_ON])
get_hosts_name_mock.return_value = "NodeName"
cmd_to_exec = "%s %s" % (info['cmd_set']['base_cmd'],
info['cmd_set']['start_cmd'])
cmd_to_exec = cmd_to_exec.replace('{_NodeName_}', 'NodeName')
self.assertRaises(exception.SSHCommandFailed,
ssh._power_on,
self.sshclient,
info)
get_power_status_mock.assert_called_once_with(self.sshclient, info)
get_hosts_name_mock.assert_called_once_with(self.sshclient, info)
exec_ssh_mock.assert_called_once_with(self.sshclient, cmd_to_exec)
@mock.patch.object(processutils, 'ssh_execute', autospec=True)
@mock.patch.object(ssh, '_get_power_status', autospec=True)
@mock.patch.object(ssh, '_get_hosts_name_for_node', autospec=True)
def test__power_off_good(self, get_hosts_name_mock,
get_power_status_mock, exec_ssh_mock):
info = ssh._parse_driver_info(self.node)
info['macs'] = ["11:11:11:11:11:11", "52:54:00:cf:2d:31"]
get_power_status_mock.side_effect = iter([states.POWER_ON,
states.POWER_OFF])
get_hosts_name_mock.return_value = "NodeName"
expected = [mock.call(self.sshclient, info),
mock.call(self.sshclient, info)]
cmd_to_exec = "%s %s" % (info['cmd_set']['base_cmd'],
info['cmd_set']['stop_cmd'])
cmd_to_exec = cmd_to_exec.replace('{_NodeName_}', 'NodeName')
current_state = ssh._power_off(self.sshclient, info)
self.assertEqual(states.POWER_OFF, current_state)
self.assertEqual(expected, get_power_status_mock.call_args_list)
get_hosts_name_mock.assert_called_once_with(self.sshclient, info)
exec_ssh_mock.assert_called_once_with(self.sshclient, cmd_to_exec)
@mock.patch.object(processutils, 'ssh_execute', autospec=True)
@mock.patch.object(ssh, '_get_power_status', autospec=True)
@mock.patch.object(ssh, '_get_hosts_name_for_node', autospec=True)
def test__power_off_fail(self, get_hosts_name_mock,
get_power_status_mock, exec_ssh_mock):
info = ssh._parse_driver_info(self.node)
info['macs'] = ["11:11:11:11:11:11", "52:54:00:cf:2d:31"]
get_power_status_mock.side_effect = iter([states.POWER_ON,
states.POWER_ON])
get_hosts_name_mock.return_value = "NodeName"
expected = [mock.call(self.sshclient, info),
mock.call(self.sshclient, info)]
cmd_to_exec = "%s %s" % (info['cmd_set']['base_cmd'],
info['cmd_set']['stop_cmd'])
cmd_to_exec = cmd_to_exec.replace('{_NodeName_}', 'NodeName')
current_state = ssh._power_off(self.sshclient, info)
self.assertEqual(states.ERROR, current_state)
self.assertEqual(expected, get_power_status_mock.call_args_list)
get_hosts_name_mock.assert_called_once_with(self.sshclient, info)
exec_ssh_mock.assert_called_once_with(self.sshclient, cmd_to_exec)
@mock.patch.object(processutils, 'ssh_execute', autospec=True)
@mock.patch.object(ssh, '_get_power_status', autospec=True)
@mock.patch.object(ssh, '_get_hosts_name_for_node', autospec=True)
def test__power_off_exception(self, get_hosts_name_mock,
get_power_status_mock, exec_ssh_mock):
info = ssh._parse_driver_info(self.node)
info['macs'] = ["11:11:11:11:11:11", "52:54:00:cf:2d:31"]
exec_ssh_mock.side_effect = processutils.ProcessExecutionError
get_power_status_mock.side_effect = iter([states.POWER_ON,
states.POWER_OFF])
get_hosts_name_mock.return_value = "NodeName"
cmd_to_exec = "%s %s" % (info['cmd_set']['base_cmd'],
info['cmd_set']['stop_cmd'])
cmd_to_exec = cmd_to_exec.replace('{_NodeName_}', 'NodeName')
self.assertRaises(exception.SSHCommandFailed, ssh._power_off,
self.sshclient, info)
get_power_status_mock.assert_called_once_with(self.sshclient, info)
get_hosts_name_mock.assert_called_once_with(self.sshclient, info)
exec_ssh_mock.assert_called_once_with(self.sshclient, cmd_to_exec)
class SSHDriverTestCase(db_base.DbTestCase):
def setUp(self):
super(SSHDriverTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="fake_ssh")
self.driver = driver_factory.get_driver("fake_ssh")
self.node = obj_utils.create_test_node(
self.context, driver='fake_ssh',
driver_info=db_utils.get_test_ssh_info())
self.port = obj_utils.create_test_port(self.context,
node_id=self.node.id)
self.sshclient = paramiko.SSHClient()
@mock.patch.object(utils, 'ssh_connect', autospec=True)
def test__validate_info_ssh_connect_failed(self, ssh_connect_mock):
ssh_connect_mock.side_effect = iter(
[exception.SSHConnectFailed(host='fake')])
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.power.validate, task)
driver_info = ssh._parse_driver_info(task.node)
ssh_connect_mock.assert_called_once_with(driver_info)
def test_get_properties(self):
expected = ssh.COMMON_PROPERTIES
expected2 = list(ssh.COMMON_PROPERTIES) + list(ssh.CONSOLE_PROPERTIES)
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertEqual(expected, task.driver.power.get_properties())
self.assertEqual(expected, task.driver.management.get_properties())
self.assertEqual(
sorted(expected2),
sorted(task.driver.console.get_properties().keys()))
self.assertEqual(
sorted(expected2),
sorted(task.driver.get_properties().keys()))
def test_validate_fail_no_port(self):
new_node = obj_utils.create_test_node(
self.context,
uuid='aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee',
driver='fake_ssh',
driver_info=db_utils.get_test_ssh_info())
with task_manager.acquire(self.context, new_node.uuid,
shared=True) as task:
self.assertRaises(exception.MissingParameterValue,
task.driver.power.validate,
task)
@mock.patch.object(driver_utils, 'get_node_mac_addresses', autospec=True)
@mock.patch.object(ssh, '_get_connection', autospec=True)
@mock.patch.object(ssh, '_power_on', autospec=True)
def test_reboot_good(self, power_on_mock, get_conn_mock,
get_mac_addr_mock):
info = ssh._parse_driver_info(self.node)
info['macs'] = ["11:11:11:11:11:11", "52:54:00:cf:2d:31"]
get_mac_addr_mock.return_value = info['macs']
get_conn_mock.return_value = self.sshclient
power_on_mock.return_value = states.POWER_ON
with mock.patch.object(ssh, '_parse_driver_info',
autospec=True) as parse_drv_info_mock:
parse_drv_info_mock.return_value = info
with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
task.driver.power.reboot(task)
parse_drv_info_mock.assert_called_once_with(task.node)
get_mac_addr_mock.assert_called_once_with(mock.ANY)
get_conn_mock.assert_called_once_with(task.node)
power_on_mock.assert_called_once_with(self.sshclient, info)
@mock.patch.object(driver_utils, 'get_node_mac_addresses', autospec=True)
@mock.patch.object(ssh, '_get_connection', autospec=True)
@mock.patch.object(ssh, '_power_on', autospec=True)
def test_reboot_fail(self, power_on_mock, get_conn_mock,
get_mac_addr_mock):
info = ssh._parse_driver_info(self.node)
info['macs'] = ["11:11:11:11:11:11", "52:54:00:cf:2d:31"]
get_mac_addr_mock.return_value = info['macs']
get_conn_mock.return_value = self.sshclient
power_on_mock.return_value = states.POWER_OFF
with mock.patch.object(ssh, '_parse_driver_info',
autospec=True) as parse_drv_info_mock:
parse_drv_info_mock.return_value = info
with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
self.assertRaises(exception.PowerStateFailure,
task.driver.power.reboot, task)
parse_drv_info_mock.assert_called_once_with(task.node)
get_mac_addr_mock.assert_called_once_with(mock.ANY)
get_conn_mock.assert_called_once_with(task.node)
power_on_mock.assert_called_once_with(self.sshclient, info)
@mock.patch.object(driver_utils, 'get_node_mac_addresses', autospec=True)
@mock.patch.object(ssh, '_get_connection', autospec=True)
def test_set_power_state_bad_state(self, get_conn_mock,
get_mac_addr_mock):
info = ssh._parse_driver_info(self.node)
info['macs'] = ["11:11:11:11:11:11", "52:54:00:cf:2d:31"]
get_mac_addr_mock.return_value = info['macs']
get_conn_mock.return_value = self.sshclient
with mock.patch.object(ssh, '_parse_driver_info',
autospec=True) as parse_drv_info_mock:
parse_drv_info_mock.return_value = info
with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
self.assertRaises(
exception.InvalidParameterValue,
task.driver.power.set_power_state,
task,
"BAD_PSTATE")
parse_drv_info_mock.assert_called_once_with(task.node)
get_mac_addr_mock.assert_called_once_with(mock.ANY)
get_conn_mock.assert_called_once_with(task.node)
@mock.patch.object(driver_utils, 'get_node_mac_addresses', autospec=True)
@mock.patch.object(ssh, '_get_connection', autospec=True)
@mock.patch.object(ssh, '_power_on', autospec=True)
def test_set_power_state_on_good(self, power_on_mock, get_conn_mock,
get_mac_addr_mock):
info = ssh._parse_driver_info(self.node)
info['macs'] = ["11:11:11:11:11:11", "52:54:00:cf:2d:31"]
get_mac_addr_mock.return_value = info['macs']
get_conn_mock.return_value = self.sshclient
power_on_mock.return_value = states.POWER_ON
with mock.patch.object(ssh, '_parse_driver_info',
autospec=True) as parse_drv_info_mock:
parse_drv_info_mock.return_value = info
with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
task.driver.power.set_power_state(task, states.POWER_ON)
parse_drv_info_mock.assert_called_once_with(task.node)
get_mac_addr_mock.assert_called_once_with(mock.ANY)
get_conn_mock.assert_called_once_with(task.node)
power_on_mock.assert_called_once_with(self.sshclient, info)
@mock.patch.object(driver_utils, 'get_node_mac_addresses', autospec=True)
@mock.patch.object(ssh, '_get_connection', autospec=True)
@mock.patch.object(ssh, '_power_on', autospec=True)
def test_set_power_state_on_fail(self, power_on_mock, get_conn_mock,
get_mac_addr_mock):
info = ssh._parse_driver_info(self.node)
info['macs'] = ["11:11:11:11:11:11", "52:54:00:cf:2d:31"]
get_mac_addr_mock.return_value = info['macs']
get_conn_mock.return_value = self.sshclient
power_on_mock.return_value = states.POWER_OFF
with mock.patch.object(ssh, '_parse_driver_info',
autospec=True) as parse_drv_info_mock:
parse_drv_info_mock.return_value = info
with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
self.assertRaises(
exception.PowerStateFailure,
task.driver.power.set_power_state,
task,
states.POWER_ON)
parse_drv_info_mock.assert_called_once_with(task.node)
get_mac_addr_mock.assert_called_once_with(mock.ANY)
get_conn_mock.assert_called_once_with(task.node)
power_on_mock.assert_called_once_with(self.sshclient, info)
@mock.patch.object(driver_utils, 'get_node_mac_addresses', autospec=True)
@mock.patch.object(ssh, '_get_connection', autospec=True)
@mock.patch.object(ssh, '_power_off', autospec=True)
def test_set_power_state_off_good(self, power_off_mock, get_conn_mock,
get_mac_addr_mock):
info = ssh._parse_driver_info(self.node)
info['macs'] = ["11:11:11:11:11:11", "52:54:00:cf:2d:31"]
get_mac_addr_mock.return_value = info['macs']
get_conn_mock.return_value = self.sshclient
power_off_mock.return_value = states.POWER_OFF
with mock.patch.object(ssh, '_parse_driver_info',
autospec=True) as parse_drv_info_mock:
parse_drv_info_mock.return_value = info
with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
task.driver.power.set_power_state(task, states.POWER_OFF)
parse_drv_info_mock.assert_called_once_with(task.node)
get_mac_addr_mock.assert_called_once_with(mock.ANY)
get_conn_mock.assert_called_once_with(task.node)
power_off_mock.assert_called_once_with(self.sshclient, info)
@mock.patch.object(driver_utils, 'get_node_mac_addresses', autospec=True)
@mock.patch.object(ssh, '_get_connection', autospec=True)
@mock.patch.object(ssh, '_power_off', autospec=True)
def test_set_power_state_off_fail(self, power_off_mock, get_conn_mock,
get_mac_addr_mock):
info = ssh._parse_driver_info(self.node)
info['macs'] = ["11:11:11:11:11:11", "52:54:00:cf:2d:31"]
get_mac_addr_mock.return_value = info['macs']
get_conn_mock.return_value = self.sshclient
power_off_mock.return_value = states.POWER_ON
with mock.patch.object(ssh, '_parse_driver_info',
autospec=True) as parse_drv_info_mock:
parse_drv_info_mock.return_value = info
with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
self.assertRaises(
exception.PowerStateFailure,
task.driver.power.set_power_state,
task,
states.POWER_OFF)
parse_drv_info_mock.assert_called_once_with(task.node)
get_mac_addr_mock.assert_called_once_with(mock.ANY)
get_conn_mock.assert_called_once_with(task.node)
power_off_mock.assert_called_once_with(self.sshclient, info)
@mock.patch.object(ssh, '_get_connection', autospec=True)
@mock.patch.object(ssh, '_get_hosts_name_for_node', autospec=True)
@mock.patch.object(ssh, '_ssh_execute', autospec=True)
def test_management_interface_set_boot_device_vbox_ok(self, mock_exc,
mock_h,
mock_get_conn):
fake_name = 'fake-name'
mock_h.return_value = fake_name
mock_get_conn.return_value = self.sshclient
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node['driver_info']['ssh_virt_type'] = 'vbox'
self.driver.management.set_boot_device(task, boot_devices.PXE)
expected_cmd = ('LC_ALL=C /usr/bin/VBoxManage modifyvm %s '
'--boot1 net') % fake_name
mock_exc.assert_called_once_with(mock.ANY, expected_cmd)
@mock.patch.object(ssh, '_get_power_status', autospec=True)
@mock.patch.object(ssh, '_get_connection', autospec=True)
@mock.patch.object(ssh, '_get_hosts_name_for_node', autospec=True)
@mock.patch.object(ssh, '_ssh_execute', autospec=True)
def test_management_interface_set_boot_device_vbox_with_power_on(
self, mock_exc, mock_h, mock_get_conn, mock_get_power):
fake_name = 'fake-name'
mock_h.return_value = fake_name
mock_get_conn.return_value = self.sshclient
# NOTE(jroll) _power_off calls _get_power_state twice
mock_get_power.side_effect = [
states.POWER_ON, states.POWER_ON, states.POWER_OFF
]
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node['driver_info']['ssh_virt_type'] = 'vbox'
task.node['driver_info']['vbox_use_headless'] = True
self.driver.management.set_boot_device(task, boot_devices.PXE)
expected_cmds = [
mock.call(mock.ANY,
'LC_ALL=C /usr/bin/VBoxManage '
'controlvm %s poweroff' % fake_name),
mock.call(mock.ANY,
'LC_ALL=C /usr/bin/VBoxManage '
'modifyvm %s --boot1 net' % fake_name)
]
self.assertEqual(expected_cmds, mock_exc.call_args_list)
@mock.patch.object(ssh, '_get_connection', autospec=True)
@mock.patch.object(ssh, '_get_hosts_name_for_node', autospec=True)
@mock.patch.object(ssh, '_ssh_execute', autospec=True)
def test_management_interface_set_boot_device_parallels_ok(self, mock_exc,
mock_h,
mock_get_conn):
fake_name = 'fake-name'
mock_h.return_value = fake_name
mock_get_conn.return_value = self.sshclient
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node['driver_info']['ssh_virt_type'] = 'parallels'
self.driver.management.set_boot_device(task, boot_devices.PXE)
expected_cmd = ('LC_ALL=C /usr/bin/prlctl set %s '
'--device-bootorder "net0"') % fake_name
mock_exc.assert_called_once_with(mock.ANY, expected_cmd)
@mock.patch.object(ssh, '_get_connection', autospec=True)
@mock.patch.object(ssh, '_get_hosts_name_for_node', autospec=True)
@mock.patch.object(ssh, '_ssh_execute', autospec=True)
def test_management_interface_set_boot_device_virsh_ok(self, mock_exc,
mock_h,
mock_get_conn):
fake_name = 'fake-name'
mock_h.return_value = fake_name
mock_get_conn.return_value = self.sshclient
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node['driver_info']['ssh_virt_type'] = 'virsh'
self.driver.management.set_boot_device(task, boot_devices.PXE)
expected_cmd = ('EDITOR="sed -i \'/<boot \\(dev\\|order\\)=*\\>'
'/d;/<\\/os>/i\\<boot dev=\\"network\\"/>\'" '
'LC_ALL=C /usr/bin/virsh --connect qemu:///system '
'edit %s') % fake_name
mock_exc.assert_called_once_with(mock.ANY, expected_cmd)
@mock.patch.object(ssh, '_get_connection', autospec=True)
@mock.patch.object(ssh, '_get_hosts_name_for_node', autospec=True)
@mock.patch.object(ssh, '_ssh_execute', autospec=True)
def test_management_interface_set_boot_device_xenserver_ok(self,
mock_exc,
mock_h,
mock_get_conn):
fake_name = 'fake-name'
mock_h.return_value = fake_name
mock_get_conn.return_value = self.sshclient
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node['driver_info']['ssh_virt_type'] = 'xenserver'
self.driver.management.set_boot_device(task, boot_devices.PXE)
expected_cmd = ("LC_ALL=C /opt/xensource/bin/xe vm-param-set uuid=%s "
"HVM-boot-params:order='n'") % fake_name
mock_exc.assert_called_once_with(mock.ANY, expected_cmd)
def test_set_boot_device_bad_device(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.InvalidParameterValue,
self.driver.management.set_boot_device,
task, 'invalid-device')
@mock.patch.object(ssh, '_get_connection', autospec=True)
@mock.patch.object(ssh, '_get_hosts_name_for_node', autospec=True)
def test_set_boot_device_not_supported(self, mock_h, mock_get_conn):
mock_h.return_value = 'NodeName'
mock_get_conn.return_value = self.sshclient
with task_manager.acquire(self.context, self.node.uuid) as task:
# vmware does not support set_boot_device()
task.node['driver_info']['ssh_virt_type'] = 'vmware'
self.assertRaises(NotImplementedError,
self.driver.management.set_boot_device,
task, boot_devices.PXE)
def test_management_interface_get_supported_boot_devices(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
expected = [boot_devices.PXE, boot_devices.DISK,
boot_devices.CDROM]
self.assertEqual(sorted(expected), sorted(task.driver.management.
get_supported_boot_devices(task)))
@mock.patch.object(ssh, '_get_connection', autospec=True)
@mock.patch.object(ssh, '_get_hosts_name_for_node', autospec=True)
@mock.patch.object(ssh, '_ssh_execute', autospec=True)
def test_management_interface_get_boot_device_vbox(self, mock_exc,
mock_h,
mock_get_conn):
fake_name = 'fake-name'
mock_h.return_value = fake_name
mock_exc.return_value = ('net', '')
mock_get_conn.return_value = self.sshclient
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node['driver_info']['ssh_virt_type'] = 'vbox'
result = self.driver.management.get_boot_device(task)
self.assertEqual(boot_devices.PXE, result['boot_device'])
expected_cmd = ('LC_ALL=C /usr/bin/VBoxManage showvminfo '
'--machinereadable %s '
'| awk -F \'"\' \'/boot1/{print $2}\'') % fake_name
mock_exc.assert_called_once_with(mock.ANY, expected_cmd)
@mock.patch.object(ssh, '_get_connection', autospec=True)
@mock.patch.object(ssh, '_get_hosts_name_for_node', autospec=True)
@mock.patch.object(ssh, '_ssh_execute', autospec=True)
def test_management_interface_get_boot_device_parallels(self, mock_exc,
mock_h,
mock_get_conn):
fake_name = 'fake-name'
mock_h.return_value = fake_name
mock_exc.return_value = ('net0', '')
mock_get_conn.return_value = self.sshclient
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node['driver_info']['ssh_virt_type'] = 'parallels'
result = self.driver.management.get_boot_device(task)
self.assertEqual(boot_devices.PXE, result['boot_device'])
expected_cmd = ('LC_ALL=C /usr/bin/prlctl list -i %s '
'| awk \'/^Boot order:/ {print $3}\'') % fake_name
mock_exc.assert_called_once_with(mock.ANY, expected_cmd)
@mock.patch.object(ssh, '_get_connection', autospec=True)
@mock.patch.object(ssh, '_get_hosts_name_for_node', autospec=True)
@mock.patch.object(ssh, '_ssh_execute', autospec=True)
def test_management_interface_get_boot_device_virsh(self, mock_exc,
mock_h,
mock_get_conn):
fake_name = 'fake-name'
mock_h.return_value = fake_name
mock_exc.return_value = ('network', '')
mock_get_conn.return_value = self.sshclient
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node['driver_info']['ssh_virt_type'] = 'virsh'
result = self.driver.management.get_boot_device(task)
self.assertEqual(boot_devices.PXE, result['boot_device'])
expected_cmd = ('LC_ALL=C /usr/bin/virsh --connect '
'qemu:///system dumpxml %s | awk \'/boot dev=/ '
'{ gsub( ".*dev=" Q, "" ); gsub( Q ".*", "" ); '
'print; }\' Q="\'" RS="[<>]" | head -1') % fake_name
mock_exc.assert_called_once_with(mock.ANY, expected_cmd)
@mock.patch.object(ssh, '_get_connection', autospec=True)
@mock.patch.object(ssh, '_get_hosts_name_for_node', autospec=True)
@mock.patch.object(ssh, '_ssh_execute', autospec=True)
def test_management_interface_get_boot_device_xenserver(self, mock_exc,
mock_h,
mock_get_conn):
fake_name = 'fake-name'
mock_h.return_value = fake_name
mock_exc.return_value = ('n', '')
mock_get_conn.return_value = self.sshclient
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node['driver_info']['ssh_virt_type'] = 'xenserver'
result = self.driver.management.get_boot_device(task)
self.assertEqual(boot_devices.PXE, result['boot_device'])
expected_cmd = ('LC_ALL=C /opt/xensource/bin/xe vm-param-get '
'uuid=%s --param-name=HVM-boot-params '
'param-key=order | cut -b 1') % fake_name
mock_exc.assert_called_once_with(mock.ANY, expected_cmd)
@mock.patch.object(ssh, '_get_connection', autospec=True)
@mock.patch.object(ssh, '_get_hosts_name_for_node', autospec=True)
def test_get_boot_device_not_supported(self, mock_h, mock_get_conn):
mock_h.return_value = 'NodeName'
mock_get_conn.return_value = self.sshclient
with task_manager.acquire(self.context, self.node.uuid) as task:
# vmware does not support get_boot_device()
task.node['driver_info']['ssh_virt_type'] = 'vmware'
expected = {'boot_device': None, 'persistent': None}
self.assertEqual(expected,
self.driver.management.get_boot_device(task))
@mock.patch.object(ssh, '_get_connection', autospec=True)
@mock.patch.object(ssh, '_get_hosts_name_for_node', autospec=True)
@mock.patch.object(ssh, '_ssh_execute', autospec=True)
def test_get_power_state_vmware(self, mock_exc, mock_h, mock_get_conn):
# To see replacing {_NodeName_} in vmware's list_running
nodename = 'fakevm'
mock_h.return_value = nodename
mock_get_conn.return_value = self.sshclient
# list_running quotes names
mock_exc.return_value = ('"%s"' % nodename, '')
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node['driver_info']['ssh_virt_type'] = 'vmware'
power_state = self.driver.power.get_power_state(task)
self.assertEqual(states.POWER_ON, power_state)
expected_cmd = ("LC_ALL=C /bin/vim-cmd vmsvc/power.getstate "
"%(node)s | grep 'Powered on' >/dev/null && "
"echo '\"%(node)s\"' || true") % {'node': nodename}
mock_exc.assert_called_once_with(mock.ANY, expected_cmd)
@mock.patch.object(ssh, '_get_connection', autospec=True)
@mock.patch.object(ssh, '_get_hosts_name_for_node', autospec=True)
@mock.patch.object(ssh, '_ssh_execute', autospec=True)
def test_get_power_state_xenserver(self, mock_exc, mock_h, mock_get_conn):
# To see replacing {_NodeName_} in xenserver's list_running
nodename = 'fakevm'
mock_h.return_value = nodename
mock_get_conn.return_value = self.sshclient
mock_exc.return_value = (nodename, '')
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node['driver_info']['ssh_virt_type'] = 'xenserver'
power_state = self.driver.power.get_power_state(task)
self.assertEqual(states.POWER_ON, power_state)
expected_cmd = ("LC_ALL=C /opt/xensource/bin/xe "
"vm-list power-state=running --minimal | tr ',' '\n'")
mock_exc.assert_called_once_with(mock.ANY, expected_cmd)
@mock.patch.object(ssh, '_get_connection', autospec=True)
@mock.patch.object(ssh, '_get_hosts_name_for_node', autospec=True)
@mock.patch.object(ssh, '_ssh_execute', autospec=True)
@mock.patch.object(ssh, '_get_power_status', autospec=True)
def test_start_command_xenserver(self, mock_power, mock_exc, mock_h,
mock_get_conn):
mock_power.side_effect = [states.POWER_OFF, states.POWER_ON]
nodename = 'fakevm'
mock_h.return_value = nodename
mock_get_conn.return_value = self.sshclient
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node['driver_info']['ssh_virt_type'] = 'xenserver'
self.driver.power.set_power_state(task, states.POWER_ON)
expected_cmd = ("LC_ALL=C /opt/xensource/bin/xe "
"vm-start uuid=fakevm && sleep 10s")
mock_exc.assert_called_once_with(mock.ANY, expected_cmd)
@mock.patch.object(ssh, '_get_connection', autospec=True)
@mock.patch.object(ssh, '_get_hosts_name_for_node', autospec=True)
@mock.patch.object(ssh, '_ssh_execute', autospec=True)
@mock.patch.object(ssh, '_get_power_status', autospec=True)
def test_stop_command_xenserver(self, mock_power, mock_exc, mock_h,
mock_get_conn):
mock_power.side_effect = [states.POWER_ON, states.POWER_OFF]
nodename = 'fakevm'
mock_h.return_value = nodename
mock_get_conn.return_value = self.sshclient
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node['driver_info']['ssh_virt_type'] = 'xenserver'
self.driver.power.set_power_state(task, states.POWER_OFF)
expected_cmd = ("LC_ALL=C /opt/xensource/bin/xe "
"vm-shutdown uuid=fakevm force=true")
mock_exc.assert_called_once_with(mock.ANY, expected_cmd)
@mock.patch.object(ssh, '_get_connection', autospec=True)
@mock.patch.object(ssh, '_get_hosts_name_for_node', autospec=True)
@mock.patch.object(ssh, '_ssh_execute', autospec=True)
@mock.patch.object(ssh, '_get_power_status', autospec=True)
def test_start_command_vbox(self, mock_power, mock_exc, mock_h,
mock_get_conn):
mock_power.side_effect = [states.POWER_OFF, states.POWER_ON]
nodename = 'fakevm'
mock_h.return_value = nodename
mock_get_conn.return_value = self.sshclient
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node['driver_info']['ssh_virt_type'] = 'vbox'
self.driver.power.set_power_state(task, states.POWER_ON)
expected_cmd = 'LC_ALL=C /usr/bin/VBoxManage startvm fakevm'
mock_exc.assert_called_once_with(mock.ANY, expected_cmd)
@mock.patch.object(ssh, '_get_connection', autospec=True)
@mock.patch.object(ssh, '_get_hosts_name_for_node', autospec=True)
@mock.patch.object(ssh, '_ssh_execute', autospec=True)
@mock.patch.object(ssh, '_get_power_status', autospec=True)
def test_start_command_vbox_headless(self, mock_power, mock_exc, mock_h,
mock_get_conn):
mock_power.side_effect = [states.POWER_OFF, states.POWER_ON]
nodename = 'fakevm'
mock_h.return_value = nodename
mock_get_conn.return_value = self.sshclient
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node['driver_info']['ssh_virt_type'] = 'vbox'
task.node['driver_info']['vbox_use_headless'] = True
self.driver.power.set_power_state(task, states.POWER_ON)
expected_cmd = ('LC_ALL=C /usr/bin/VBoxManage '
'startvm fakevm --type headless')
mock_exc.assert_called_once_with(mock.ANY, expected_cmd)
def test_management_interface_validate_good(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.management.validate(task)
def test_management_interface_validate_fail(self):
# Missing SSH driver_info information
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake_ssh')
with task_manager.acquire(self.context, node.uuid) as task:
self.assertRaises(exception.MissingParameterValue,
task.driver.management.validate, task)
def test_console_validate(self):
with task_manager.acquire(
self.context, self.node.uuid, shared=True) as task:
task.node.driver_info['ssh_virt_type'] = 'virsh'
task.node.driver_info['ssh_terminal_port'] = 123
task.driver.console.validate(task)
def test_console_validate_missing_port(self):
with task_manager.acquire(
self.context, self.node.uuid, shared=True) as task:
task.node.driver_info['ssh_virt_type'] = 'virsh'
task.node.driver_info.pop('ssh_terminal_port', None)
self.assertRaises(exception.MissingParameterValue,
task.driver.console.validate, task)
def test_console_validate_not_virsh(self):
with task_manager.acquire(
self.context, self.node.uuid, shared=True) as task:
task.node.driver_info = db_utils.get_test_ssh_info(
virt_type='vbox')
self.assertRaisesRegex(exception.InvalidParameterValue,
'not supported for non-virsh types',
task.driver.console.validate, task)
def test_console_validate_invalid_port(self):
with task_manager.acquire(
self.context, self.node.uuid, shared=True) as task:
task.node.driver_info['ssh_terminal_port'] = ''
self.assertRaisesRegex(exception.InvalidParameterValue,
'is not a valid integer',
task.driver.console.validate, task)
@mock.patch.object(ssh, '_get_connection', autospec=True)
@mock.patch.object(ssh, '_get_hosts_name_for_node', autospec=True)
@mock.patch.object(console_utils, 'start_shellinabox_console',
autospec=True)
def test_start_console(self, mock_exec,
get_hosts_name_mock, mock_get_conn):
info = ssh._parse_driver_info(self.node)
mock_exec.return_value = None
get_hosts_name_mock.return_value = "NodeName"
mock_get_conn.return_value = self.sshclient
with task_manager.acquire(self.context,
self.node.uuid) as task:
self.driver.console.start_console(task)
mock_exec.assert_called_once_with(info['uuid'],
info['terminal_port'],
mock.ANY)
@mock.patch.object(ssh, '_get_connection', autospec=True)
@mock.patch.object(ssh, '_get_hosts_name_for_node', autospec=True)
@mock.patch.object(console_utils, 'start_shellinabox_console',
autospec=True)
def test_start_console_fail(self, mock_exec,
get_hosts_name_mock, mock_get_conn):
get_hosts_name_mock.return_value = "NodeName"
mock_get_conn.return_value = self.sshclient
mock_exec.side_effect = exception.ConsoleSubprocessFailed(
error='error')
with task_manager.acquire(self.context,
self.node.uuid) as task:
self.assertRaises(exception.ConsoleSubprocessFailed,
self.driver.console.start_console,
task)
mock_exec.assert_called_once_with(self.node.uuid, mock.ANY, mock.ANY)
@mock.patch.object(ssh, '_get_connection', autospec=True)
@mock.patch.object(ssh, '_get_hosts_name_for_node', autospec=True)
@mock.patch.object(console_utils, 'start_shellinabox_console',
autospec=True)
def test_start_console_fail_nodir(self, mock_exec,
get_hosts_name_mock, mock_get_conn):
get_hosts_name_mock.return_value = "NodeName"
mock_get_conn.return_value = self.sshclient
mock_exec.side_effect = exception.ConsoleError()
with task_manager.acquire(self.context,
self.node.uuid) as task:
self.assertRaises(exception.ConsoleError,
self.driver.console.start_console,
task)
mock_exec.assert_called_once_with(self.node.uuid, mock.ANY, mock.ANY)
@mock.patch.object(console_utils, 'stop_shellinabox_console',
autospec=True)
def test_stop_console(self, mock_exec):
mock_exec.return_value = None
with task_manager.acquire(self.context,
self.node.uuid) as task:
self.driver.console.stop_console(task)
mock_exec.assert_called_once_with(self.node.uuid)
@mock.patch.object(console_utils, 'stop_shellinabox_console',
autospec=True)
def test_stop_console_fail(self, mock_stop):
mock_stop.side_effect = exception.ConsoleError()
with task_manager.acquire(self.context,
self.node.uuid) as task:
self.assertRaises(exception.ConsoleError,
self.driver.console.stop_console,
task)
mock_stop.assert_called_once_with(self.node.uuid)
@mock.patch.object(console_utils, 'get_shellinabox_console_url',
autospec=True)
def test_get_console(self, mock_exec):
url = 'http://localhost:4201'
mock_exec.return_value = url
expected = {'type': 'shellinabox', 'url': url}
with task_manager.acquire(self.context,
self.node.uuid) as task:
task.node.driver_info['ssh_terminal_port'] = 6900
console_info = self.driver.console.get_console(task)
self.assertEqual(expected, console_info)
mock_exec.assert_called_once_with(6900)
| |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common client library functions and classes used by all products."""
import os
import sys
import warnings
import httplib2
import socks
import suds
import yaml
import googleads.errors
import googleads.oauth2
VERSION = '3.7.0'
_COMMON_LIB_SIG = 'googleads/%s' % VERSION
_PROXY_YAML_KEY = 'proxy_info'
_PYTHON_VERSION = 'Python/%d.%d.%d' % (sys.version_info[0], sys.version_info[1],
sys.version_info[2])
# The keys in the authentication dictionary that are used to construct OAuth 2.0
# credentials.
_OAUTH_2_AUTH_KEYS = ('client_id', 'client_secret', 'refresh_token')
# The keys in the proxy dictionary that are used to construct a ProxyInfo
# instance.
_PROXY_KEYS = ('host', 'port')
def GenerateLibSig(short_name):
"""Generates a library signature suitable for a user agent field.
Args:
short_name: The short, product-specific string name for the library.
Returns:
A library signature string to append to user-supplied user-agent value.
"""
return ' (%s, %s, %s)' % (short_name, _COMMON_LIB_SIG, _PYTHON_VERSION)
def LoadFromStorage(path, product_yaml_key, required_client_values,
optional_product_values):
"""Loads the data necessary for instantiating a client from file storage.
In addition to the required_client_values argument, the yaml file must supply
the keys used to create OAuth 2.0 credentials. It may also optionally provide
proxy_info in order to configure a proxy.
Args:
path: A path string to the yaml document whose keys should be used.
product_yaml_key: The key to read in the yaml as a string.
required_client_values: A tuple of strings representing values which must
be in the yaml file for a supported API. If one of these keys is not in
the yaml file, an error will be raised.
optional_product_values: A tuple of strings representing optional values
which may be in the yaml file.
Returns:
A dictionary map of the keys in the yaml file to their values. This will not
contain the keys used for OAuth 2.0 client creation and instead will have a
GoogleOAuth2Client object stored in the 'oauth2_client' field.
Raises:
A GoogleAdsValueError if the given yaml file does not contain the
information necessary to instantiate a client object - either a
required_client_values key was missing or an OAuth 2.0 key was missing.
"""
if not os.path.isabs(path):
path = os.path.expanduser(path)
try:
with open(path, 'r') as handle:
data = yaml.safe_load(handle.read())
product_data = data.get(product_yaml_key) or {}
proxy_data = data.get(_PROXY_YAML_KEY) or {}
except IOError:
raise googleads.errors.GoogleAdsValueError(
'Given yaml file, %s, could not be opened.' % path)
original_keys = list(product_data.keys())
original_proxy_keys = list(proxy_data.keys())
client_kwargs = {}
try:
for key in required_client_values:
client_kwargs[key] = product_data[key]
del product_data[key]
except KeyError:
raise googleads.errors.GoogleAdsValueError(
'Your yaml file, %s, is missing some of the required values. Required '
'values are: %s, actual values are %s'
% (path, required_client_values, original_keys))
try:
proxy_info = (httplib2.ProxyInfo(socks.PROXY_TYPE_HTTP, proxy_data['host'],
proxy_data['port'])
if proxy_data else None)
client_kwargs['https_proxy'] = ('%s:%s' % (proxy_info.proxy_host,
proxy_info.proxy_port)
if proxy_info else None)
except KeyError:
raise googleads.errors.GoogleAdsValueError(
'Your yaml file, %s, is missing some of the required proxy values.'
'Required values are: %s, actual values are %s'
% (path, _PROXY_KEYS, original_proxy_keys))
ca_certs = proxy_data.get('ca_certs', None)
disable_ssl_certificate_validation = proxy_data.get(
'disable_ssl_certificate_validation', True)
try:
client_kwargs['oauth2_client'] = (
googleads.oauth2.GoogleRefreshTokenClient(
product_data['client_id'], product_data['client_secret'],
product_data['refresh_token'], proxy_info,
disable_ssl_certificate_validation, ca_certs))
for auth_key in _OAUTH_2_AUTH_KEYS:
del product_data[auth_key]
except KeyError:
raise googleads.errors.GoogleAdsValueError(
'Your yaml file, %s, is missing some of the required OAuth 2.0 '
'values. Required values are: %s, actual values are %s'
% (path, _OAUTH_2_AUTH_KEYS, original_keys))
for value in optional_product_values:
if value in product_data:
client_kwargs[value] = product_data[value]
del product_data[value]
if product_data:
warnings.warn(
'Your yaml file, %s, contains the following unrecognized '
'keys: %s. They were ignored.' % (path, product_data), stacklevel=3)
return client_kwargs
def _PackForSuds(obj, factory):
"""Packs SOAP input into the format we want for suds.
The main goal here is to pack dictionaries with an 'xsi_type' key into
objects. This allows dictionary syntax to be used even with complex types
extending other complex types. The contents of dictionaries and lists/tuples
are recursively packed. Mutable types are copied - we don't mutate the input.
Args:
obj: A parameter for a SOAP request which will be packed. If this is
a dictionary or list, the contents will recursively be packed. If this
is not a dictionary or list, the contents will be recursively searched
for instances of unpacked dictionaries or lists.
factory: The suds.client.Factory object which can create instances of the
classes generated from the WSDL.
Returns:
If the given obj was a dictionary that contained the 'xsi_type' key, this
will be an instance of a class generated from the WSDL. Otherwise, this will
be the same data type as the input obj was.
"""
if obj in ({}, None):
# Force suds to serialize empty objects. There are legitimate use cases for
# this, for example passing in an empty SearchCriteria object to a DFA
# search method in order to select everything.
return suds.null()
elif isinstance(obj, dict):
if 'xsi_type' in obj:
try:
new_obj = factory.create(obj['xsi_type'])
except suds.TypeNotFound:
new_obj = factory.create(':'.join(['ns0', obj['xsi_type']]))
# Suds sends an empty XML element for enum types which are not set. None
# of Google's Ads APIs will accept this. Initializing all of the fields in
# a suds object to None will ensure that they don't get serialized at all
# unless the user sets a value. User values explicitly set to None will be
# packed into a suds.null() object.
for param, _ in new_obj:
# Another problem is that the suds.mx.appender.ObjectAppender won't
# serialize object types with no fields set, but both AdWords and DFP
# rely on sending objects with just the xsi:type set. The below "if"
# statement is an ugly hack that gets this to work in all(?) situations
# by taking advantage of the fact that these classes generally all have
# a type field. The only other option is to monkey patch ObjectAppender.
if param.endswith('.Type'):
setattr(new_obj, param, obj['xsi_type'])
else:
setattr(new_obj, param, None)
for key in obj:
if key == 'xsi_type': continue
setattr(new_obj, key, _PackForSuds(obj[key], factory))
else:
new_obj = {}
for key in obj:
new_obj[key] = _PackForSuds(obj[key], factory)
return new_obj
elif isinstance(obj, (list, tuple)):
return [_PackForSuds(item, factory) for item in obj]
else:
_RecurseOverObject(obj, factory)
return obj
def _RecurseOverObject(obj, factory, parent=None):
"""Recurses over a nested structure to look for changes in Suds objects.
Args:
obj: A parameter for a SOAP request field which is to be inspected and
will be packed for Suds if an xsi_type is specified, otherwise will be
left unaltered.
factory: The suds.client.Factory object which can create instances of the
classes generated from the WSDL.
parent: The parent object that contains the obj parameter to be inspected.
"""
if _IsSudsIterable(obj):
# Since in-place modification of the Suds object is taking place, the
# iterator should be done over a frozen copy of the unpacked fields.
copy_of_obj = tuple(obj)
for item in copy_of_obj:
if _IsSudsIterable(item):
if 'xsi_type' in item:
if isinstance(obj, tuple):
parent[obj[0]] = _PackForSuds(obj[1], factory)
else:
obj.remove(item)
obj.append(_PackForSuds(item, factory))
_RecurseOverObject(item, factory, obj)
def _IsSudsIterable(obj):
"""A short helper method to determine if a field is iterable for Suds."""
return (obj and not isinstance(obj, basestring) and hasattr(obj, '__iter__'))
class SudsServiceProxy(object):
"""Wraps a suds service object, allowing custom logic to be injected.
This class is responsible for refreshing the HTTP and SOAP headers, so changes
to the client object will be reflected in future SOAP calls, and for
transforming SOAP call input parameters, allowing dictionary syntax to be used
with all SOAP complex types.
Attributes:
suds_client: The suds.client.Client this service belongs to. If you are
familiar with suds and want to use autogenerated classes, you can access
the client and its factory,
"""
def __init__(self, suds_client, header_handler):
"""Initializes a suds service proxy.
Args:
suds_client: The suds.client.Client whose service will be wrapped. Note
that this is the client itself, not the client's embedded service
object.
header_handler: A HeaderHandler responsible for setting the SOAP and HTTP
headers on the service client.
"""
self.suds_client = suds_client
self._header_handler = header_handler
self._method_proxies = {}
def __getattr__(self, attr):
if attr in self.suds_client.wsdl.services[0].ports[0].methods:
if attr not in self._method_proxies:
self._method_proxies[attr] = self._CreateMethod(attr)
return self._method_proxies[attr]
else:
return getattr(self.suds_client.service, attr)
def _CreateMethod(self, method_name):
"""Create a method wrapping an invocation to the SOAP service.
Args:
method_name: A string identifying the name of the SOAP method to call.
Returns:
A callable that can be used to make the desired SOAP request.
"""
soap_service_method = getattr(self.suds_client.service, method_name)
def MakeSoapRequest(*args):
"""Perform a SOAP call."""
self._header_handler.SetHeaders(self.suds_client)
return soap_service_method(*[_PackForSuds(arg, self.suds_client.factory)
for arg in args])
return MakeSoapRequest
class HeaderHandler(object):
"""A generic header handler interface that must be subclassed by each API."""
def SetHeaders(self, client):
"""Sets the SOAP and HTTP headers on the given suds client."""
raise NotImplementedError('You must subclass HeaderHandler.')
| |
#!/usr/bin/env python
"""SeqAn code generation from templates / skeletons.
This module contains code to help the creation of modules, tests, apps etc.
It can be called directly or imported and the main() function can be called.
It will perform the following replacements:
%(AUTHOR)s will be replaced by the author's name, either given on command
line or taken from environment variable SEQAN_AUTHOR.
%(NAME)s will be replaced by the name of the generated code.
%(TITLE)s will be replaced by the name of the generated, but centered in
74 characters, to be used in the file header comment.
%(YEAR)d will be replaced by the current year.
%(DATE)s will be replaced by the current date.
%(TIME)s will be replaced by the current time.
%(HEADER_GUARD)s will be replaced by the UPPER_CASE_PATH_H_ to the file.
%(CMAKE_PROJECT_PATH)s will be replaced by lower_case_path to the target
directory.
Copyright: (c) 2010, Knut Reinert, FU Berlin
License: 3-clause BSD (see LICENSE)
"""
from __future__ import with_statement
__author__ = 'Manuel Holtgrewe <manuel.holtgrewe@fu-berlin.de>'
import datetime
import optparse
import os
import os.path
import sys
import string
import paths
# Add os.path.relpath if it is not already there, so we can use Python 2.5, too.
# TODO(holtgrew): This could go into a "compatibility" module.
if not 'relpath' in dir(os.path):
import posixpath
from posixpath import curdir, sep, pardir, join
def relpath(path, start=curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = posixpath.abspath(start).split(sep)
path_list = posixpath.abspath(path).split(sep)
# Work out how much of the filepath is shared by start and path.
i = len(posixpath.commonprefix([start_list, path_list]))
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
os.path.relpath = relpath
# Length of the header comment.
HEADER_CENTER_WIDTH = 74
# Fallback for author string if neither given on command line or environment
# Variable SEQAN_AUTHOR.
DEFAULT_AUTHOR = 'Your Name <your.email@example.net>'
# Program usage string for command line parser.
USAGE = """
Usage: %prog [options] repository NAME
%prog [options] [module|test|app|demo|header|lheader] NAME LOCATION
""".strip()
# Program description, used for command line parser. Will be wrapped by, though.
DESCRIPTION = """
The SeqAn code generator.
The first version ("repository") is to be be called to create your new entries
below the directory sandbox. The second version is to be called to create new
library modules, tests, apps, and demos inside a sandbox.
""".strip()
#"""
#Example:
#
# %prog repository sandbox/john_doe
#
#The second version is to be called to create new library modules, tests, apps,
#and demos inside a sandbox. Example:
#
# %prog module my_module sandbox/john_doe
#
#This command creates a new library module in sandbox/john_doe/include/seqan.
#It consists of the directory my_module, the files my_module.h and
#my_module/my_module_base.h as well as the info file my_module/INFO.
#
# %prog test my_module sandbox/john_doe
#
#This command creates the tests for module "my_module" in sandbox/john_doe.
#
# %prog app my_app sandbox/john_doe
#
#This command creates a new application named my_app in sandbox/john_doe/apps.
#
# %prog demo my_demo sandbox/john_doe
#
#This command creates a new demo in sandbox/john_doe/demos.
#""".strip()
def createDirectory(path, dry_run=False):
print 'mkdir(%s)' % path
print
if not dry_run:
os.mkdir(path)
def configureFile(target_file, source_file, replacements, dry_run):
print 'Configuring file.'
print ' Source:', source_file
print ' Target:', target_file
print
if os.path.exists(target_file):
msg = 'Target file already exists. Move it away and call the script again.'
print >>sys.stderr, msg
return 1
with open(source_file, 'rb') as f:
contents = f.read()
target_contents = contents % replacements
if dry_run:
print 'The contents of the target file are:'
print '-' * 78
print target_contents
print '-' * 78
else:
with open(target_file, 'wb') as f:
f.write(target_contents)
return 0
def _pathToIdentifier(relative_path):
result = relative_path.replace('/', '_')
result = result.replace('\\', '_')
result = result.replace('-', '_')
result = result.replace('.', '_')
result = result.replace(' ', '_')
return result
def buildReplacements(type_, name, location, target_file, options):
result = {}
result['AUTHOR'] = options.author
result['YEAR'] = datetime.date.today().year
result['TIME'] = datetime.datetime.now().strftime('%H:%M')
result['DATE'] = datetime.date.today().strftime('%Y-%m-%d')
result['NAME'] = name
result['TITLE'] = name.center(HEADER_CENTER_WIDTH).rstrip()
path = os.path.relpath(target_file, paths.repositoryRoot())
guard = _pathToIdentifier(path).upper()
result['HEADER_GUARD'] = guard + '_'
path = os.path.relpath(os.path.dirname(target_file),
paths.repositoryRoot())
cmake_project_name = _pathToIdentifier(path)
result['CMAKE_PROJECT_NAME'] = cmake_project_name
if type_ == 'repository':
result['REPOSITORY_PSEUDO_TARGET_NAME'] = string.capwords(name.replace('/', ' ')).replace(' ', '')
return result
def _checkTargetPaths(target_path):
"""Check that the path does not exist but its parent does."""
# Check that the given path does not exist yet.
if os.path.exists(target_path):
msg = 'The path %s already exists. Move it and call this script again.'
print >>sys.stderr, msg % target_path
return False
# Check that the parent path already exists.
if not os.path.exists(os.path.dirname(target_path)):
msg = 'The parent of the target path does not exist yet: %s'
print >>sys.stderr, msg % os.path.dirname(target_path)
print >>sys.stderr, 'Please create it and call this script again.'
return False
return True
def createModule(name, location, options):
include_path = paths.pathToInclude(location)
seqan_path = os.path.join(include_path, 'seqan')
module_path = os.path.join(seqan_path, name)
header_path = os.path.join(seqan_path, '%s.h' % name)
print 'Creating module in %s' % module_path
if options.create_dirs and not _checkTargetPaths(module_path):
return 1
if options.create_dirs and not _checkTargetPaths(header_path):
return 1
print ' Module path is: %s' % module_path
print ' Module header path is: %s' % header_path
print ''
if options.create_dirs:
# Create directory.
createDirectory(module_path, options.dry_run)
if options.create_programs:
# Copy over module header.
source_file = paths.pathToTemplate('module_template', 'module.h')
target_file = header_path
replacements = buildReplacements('module', name, seqan_path, target_file, options)
res = configureFile(target_file, source_file, replacements, options.dry_run)
if res: return res
# Copy over header inside module.
source_file = paths.pathToTemplate('module_template', 'header.h')
target_file = os.path.join(module_path, '%s_base.h' % name)
replacements = buildReplacements('module', name, seqan_path, target_file, options)
res = configureFile(target_file, source_file, replacements, options.dry_run)
if res: return res
if options.create_infos:
# Copy over INFO file for app and perform replacements.
source_file = paths.pathToTemplate('module_template', 'INFO')
target_file = os.path.join(module_path, 'INFO')
replacements = buildReplacements('app', name, location, target_file, options)
res = configureFile(target_file, source_file, replacements, options.dry_run)
if res: return res
return 0
def createTest(name, location, options):
target_path = paths.pathToTest(location, name)
print 'Creating test in %s' % target_path
if options.create_dirs and not _checkTargetPaths(target_path):
return 1
print ' Target path is: %s' % target_path
print ''
if options.create_dirs:
# Create directory.
createDirectory(target_path, options.dry_run)
if options.create_programs:
# Copy over .cpp file for test and perform replacements.
source_file = paths.pathToTemplate('test_template', 'test.cpp')
target_file = os.path.join(target_path, 'test_%s.cpp' % name)
replacements = buildReplacements('test', name, location, target_file, options)
res = configureFile(target_file, source_file, replacements, options.dry_run)
if res: return res
# Copy over .h file for test and perform replacements.
source_file = paths.pathToTemplate('test_template', 'test.h')
target_file = os.path.join(target_path, 'test_%s.h' % name)
replacements = buildReplacements('test', name, location, target_file, options)
res = configureFile(target_file, source_file, replacements, options.dry_run)
if res: return res
if options.create_cmakelists:
# Copy over CMakeLists.txt file for test and perform replacements.
source_file = paths.pathToTemplate('test_template', 'CMakeLists.txt')
target_file = os.path.join(target_path, 'CMakeLists.txt')
replacements = buildReplacements('test', name, location, target_file, options)
res = configureFile(target_file, source_file, replacements, options.dry_run)
if res: return res
return 0
def createApp(name, location, options):
target_path = paths.pathToApp(location, name)
print 'Creating app in %s' % target_path
if options.create_dirs and not _checkTargetPaths(target_path):
return 1
print ' Target path is: %s' % target_path
print ''
if options.create_programs:
# Create directory.
createDirectory(target_path, options.dry_run)
# Copy over .cpp file for app and perform replacements.
source_file = paths.pathToTemplate('app_template', 'app.cpp')
target_file = os.path.join(target_path, '%s.cpp' % name)
replacements = buildReplacements('app', name, location, target_file, options)
res = configureFile(target_file, source_file, replacements, options.dry_run)
if res: return res
# Copy over .h file for app and perform replacements.
source_file = paths.pathToTemplate('app_template', 'app.h')
target_file = os.path.join(target_path, '%s.h' % name)
replacements = buildReplacements('app', name, location, target_file, options)
res = configureFile(target_file, source_file, replacements, options.dry_run)
if res: return res
if options.create_infos:
# Copy over INFO file for app and perform replacements.
source_file = paths.pathToTemplate('app_template', 'INFO')
target_file = os.path.join(target_path, 'INFO')
replacements = buildReplacements('app', name, location, target_file, options)
res = configureFile(target_file, source_file, replacements, options.dry_run)
if res: return res
if options.create_cmakelists:
# Copy over CMakeLists.txt file for app and perform replacements.
source_file = paths.pathToTemplate('app_template', 'CMakeLists.txt')
target_file = os.path.join(target_path, 'CMakeLists.txt')
replacements = buildReplacements('app', name, location, target_file, options)
res = configureFile(target_file, source_file, replacements, options.dry_run)
if res: return res
return 0
def createDemo(name, location, options):
target_path = paths.pathToDemo(location, name)
print 'Creating demo in %s' % target_path
if options.create_dirs and not _checkTargetPaths(target_path):
return 1
print ' Target path is: %s' % target_path
print ''
if options.create_programs:
# Copy over .cpp file for app and perform replacements.
source_file = paths.pathToTemplate('demo_template', 'demo.cpp')
target_file = os.path.join(target_path)
replacements = buildReplacements('demo', name, location, target_file, options)
res = configureFile(target_file, source_file, replacements, options.dry_run)
if res: return res
return 0
def createHeader(name, location, options):
target_path = paths.pathToHeader(location, name)
print 'Creating (non-library) header in %s' % target_path
if not _checkTargetPaths(target_path):
return 1
print ' Target path is: %s' % target_path
print ''
# Copy over .h file for app and perform replacements.
source_file = paths.pathToTemplate('header_template', 'header.h')
target_file = os.path.join(target_path)
replacements = buildReplacements('header', name, location, target_file, options)
res = configureFile(target_file, source_file, replacements, options.dry_run)
if res: return res
print 'NOTE: Do not forget to add the header to the CMakeLists.txt file!'
return 0
def createLibraryHeader(name, location, options):
target_path = paths.pathToHeader(location, name)
print 'Creating library header in %s' % target_path
if not _checkTargetPaths(target_path):
return 1
print ' Target path is: %s' % target_path
print ''
# Copy over .h file for app and perform replacements.
source_file = paths.pathToTemplate('header_template', 'library_header.h')
target_file = os.path.join(target_path)
replacements = buildReplacements('library_header', name, location, target_file, options)
res = configureFile(target_file, source_file, replacements, options.dry_run)
if res: return res
return 0
def createRepository(location, options):
print 'Creating module %s' % location
target_path = paths.pathToRepository(location)
if options.create_dirs and not _checkTargetPaths(target_path):
return 1
print ' Target path is: %s' % target_path
print ''
if options.create_dirs:
# Create directories.
createDirectory(target_path, options.dry_run)
createDirectory(os.path.join(target_path, 'apps'), options.dry_run)
createDirectory(os.path.join(target_path, 'demos'), options.dry_run)
createDirectory(os.path.join(target_path, 'include'), options.dry_run)
createDirectory(os.path.join(target_path, 'include', 'seqan'), options.dry_run)
createDirectory(os.path.join(target_path, 'tests'), options.dry_run)
if options.create_cmakelists:
# Copy over file ${REPOSITORY}/CMakeLists.txt.
target_file = os.path.join(target_path, 'CMakeLists.txt')
source_file = paths.pathToTemplate('repository_template', 'CMakeLists.txt')
replacements = buildReplacements('repository', location, target_path, target_file, options)
configureFile(target_file, source_file, replacements, options.dry_run)
# Copy over file ${REPOSITORY}/apps/CMakeLists.txt.
target_file = os.path.join(target_path, 'apps', 'CMakeLists.txt')
source_file = paths.pathToTemplate('repository_template', 'apps_CMakeLists.txt')
replacements = buildReplacements('repository', location, target_path, target_file, options)
configureFile(target_file, source_file, replacements, options.dry_run)
# Copy over file ${REPOSITORY}/tests/CMakeLists.txt.
target_file = os.path.join(target_path, 'tests', 'CMakeLists.txt')
source_file = paths.pathToTemplate('repository_template', 'tests_CMakeLists.txt')
replacements = buildReplacements('repository', location, target_path, target_file, options)
configureFile(target_file, source_file, replacements, options.dry_run)
# Copy over file ${REPOSITORY}/demos/CMakeLists.txt.
target_file = os.path.join(target_path, 'demos', 'CMakeLists.txt')
source_file = paths.pathToTemplate('repository_template', 'demos_CMakeLists.txt')
replacements = buildReplacements('repository', location, target_path, target_file, options)
configureFile(target_file, source_file, replacements, options.dry_run)
return 0
def main():
# Parse arguments.
parser = optparse.OptionParser(usage=USAGE, description=DESCRIPTION)
parser.add_option('-s', '--skel-root', dest='skel_root',
help=('Set path to the directory where the skeletons '
'live in. Taken from environment variable '
'SEQAN_SKELS if available.'),
default=os.environ.get('SEQAN_SKELS',
paths.pathToSkeletons()))
parser.add_option('-a', '--author', dest='author',
help=('Set author to use. Should have the format USER '
'<EMAIL>. Taken from environment variable '
'SEQAN_AUTHOR if it exists.'),
default=os.environ.get('SEQAN_AUTHOR', DEFAULT_AUTHOR))
parser.add_option('-d', '--dry-run', dest='dry_run', action='store_true',
help='Do not change anything, just simulate.',
default=False)
parser.add_option('-c', '--cmakelists-only', dest='cmakelists_only',
action='store_true',
help='Only create CMakeLists.txt files',
default=False)
parser.add_option('-i', '--infos-only', dest='infos_only',
action='store_true',
help='Only create INFO files',
default=False)
options, args = parser.parse_args()
if options.infos_only and options.cmakelists_only:
print >>sys.stderr, 'Only one of --info-only and --cmakelists-only can be given.'
return 1
options.create_cmakelists = True
options.create_infos = True
options.create_dirs = True
options.create_programs = True
if options.infos_only or options.cmakelists_only:
options.create_cmakelists = not options.infos_only
options.create_infos = not options.cmakelists_only
options.create_dirs = False
options.create_programs = False
if not args:
parser.print_help(file=sys.stderr)
return 1
if len(args) < 2:
print >>sys.stderr, 'Invalid argument count!'
return 1
if args[0] not in ['module', 'test', 'app', 'demo', 'repository',
'header', 'lheader']:
print >>sys.stderr, 'Invalid template "%s".' % args[0]
return 1
if args[0] == 'repository':
if len(args) != 2:
print >>sys.stderr, 'Invalid argument count!'
return 1
return createRepository(args[1], options)
elif len(args) != 3:
print >>sys.stderr, 'Invalid argument count!'
return 1
create_methods = {
'module' : createModule,
'test': createTest,
'app': createApp,
'demo': createDemo,
'header': createHeader,
'lheader': createLibraryHeader,
}
return create_methods[args[0]](args[1], args[2], options)
if __name__ == '__main__':
sys.exit(main())
| |
import unittest as real_unittest
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.models import get_app, get_apps
from django.test import _doctest as doctest
from django.test.utils import setup_test_environment, teardown_test_environment
from django.test.testcases import OutputChecker, DocTestRunner, TestCase, HachiAnalysisCase
from django.utils import unittest
try:
all
except NameError:
from django.utils.itercompat import all
__all__ = ('DjangoTestRunner', 'DjangoTestSuiteRunner', 'run_tests')
# The module name for tests outside models.py
TEST_MODULE = 'tests'
doctestOutputChecker = OutputChecker()
class DjangoTestRunner(unittest.TextTestRunner):
def __init__(self, *args, **kwargs):
import warnings
warnings.warn(
"DjangoTestRunner is deprecated; it's functionality is indistinguishable from TextTestRunner",
PendingDeprecationWarning
)
super(DjangoTestRunner, self).__init__(*args, **kwargs)
def get_tests(app_module):
try:
app_path = app_module.__name__.split('.')[:-1]
test_module = __import__('.'.join(app_path + [TEST_MODULE]), {}, {}, TEST_MODULE)
except ImportError, e:
# Couldn't import tests.py. Was it due to a missing file, or
# due to an import error in a tests.py that actually exists?
import os.path
from imp import find_module
try:
mod = find_module(TEST_MODULE, [os.path.dirname(app_module.__file__)])
except ImportError:
# 'tests' module doesn't exist. Move on.
test_module = None
else:
# The module exists, so there must be an import error in the
# test module itself. We don't need the module; so if the
# module was a single file module (i.e., tests.py), close the file
# handle returned by find_module. Otherwise, the test module
# is a directory, and there is nothing to close.
if mod[0]:
mod[0].close()
raise
return test_module
def build_suite(app_module):
"Create a complete Django test suite for the provided application module"
suite = unittest.TestSuite()
# Load unit and doctests in the models.py module. If module has
# a suite() method, use it. Otherwise build the test suite ourselves.
if hasattr(app_module, 'suite'):
suite.addTest(app_module.suite())
else:
suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(app_module))
try:
suite.addTest(doctest.DocTestSuite(app_module,
checker=doctestOutputChecker,
runner=DocTestRunner))
except ValueError:
# No doc tests in models.py
pass
# Check to see if a separate 'tests' module exists parallel to the
# models module
test_module = get_tests(app_module)
if test_module:
# Load unit and doctests in the tests.py module. If module has
# a suite() method, use it. Otherwise build the test suite ourselves.
if hasattr(test_module, 'suite'):
suite.addTest(test_module.suite())
else:
suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(test_module))
try:
suite.addTest(doctest.DocTestSuite(test_module,
checker=doctestOutputChecker,
runner=DocTestRunner))
except ValueError:
# No doc tests in tests.py
pass
return suite
def build_test(label):
"""Construct a test case with the specified label. Label should be of the
form model.TestClass or model.TestClass.test_method. Returns an
instantiated test or test suite corresponding to the label provided.
"""
parts = label.split('.')
if len(parts) < 2 or len(parts) > 3:
raise ValueError("Test label '%s' should be of the form app.TestCase or app.TestCase.test_method" % label)
#
# First, look for TestCase instances with a name that matches
#
app_module = get_app(parts[0])
test_module = get_tests(app_module)
TestClass = getattr(app_module, parts[1], None)
# Couldn't find the test class in models.py; look in tests.py
if TestClass is None:
if test_module:
TestClass = getattr(test_module, parts[1], None)
try:
if issubclass(TestClass, (unittest.TestCase, real_unittest.TestCase)):
if len(parts) == 2: # label is app.TestClass
try:
return unittest.TestLoader().loadTestsFromTestCase(TestClass)
except TypeError:
raise ValueError("Test label '%s' does not refer to a test class" % label)
else: # label is app.TestClass.test_method
return TestClass(parts[2])
except TypeError:
# TestClass isn't a TestClass - it must be a method or normal class
pass
#
# If there isn't a TestCase, look for a doctest that matches
#
tests = []
for module in app_module, test_module:
try:
doctests = doctest.DocTestSuite(module,
checker=doctestOutputChecker,
runner=DocTestRunner)
# Now iterate over the suite, looking for doctests whose name
# matches the pattern that was given
for test in doctests:
if test._dt_test.name in (
'%s.%s' % (module.__name__, '.'.join(parts[1:])),
'%s.__test__.%s' % (module.__name__, '.'.join(parts[1:]))):
tests.append(test)
except ValueError:
# No doctests found.
pass
# If no tests were found, then we were given a bad test label.
if not tests:
raise ValueError("Test label '%s' does not refer to a test" % label)
# Construct a suite out of the tests that matched.
return unittest.TestSuite(tests)
def partition_suite(suite, classes, bins):
"""
Partitions a test suite by test type.
classes is a sequence of types
bins is a sequence of TestSuites, one more than classes
Tests of type classes[i] are added to bins[i],
tests with no match found in classes are place in bins[-1]
"""
for test in suite:
if isinstance(test, unittest.TestSuite):
partition_suite(test, classes, bins)
else:
for i in range(len(classes)):
if isinstance(test, classes[i]):
bins[i].addTest(test)
break
else:
bins[-1].addTest(test)
def reorder_suite(suite, classes):
"""
Reorders a test suite by test type.
classes is a sequence of types
All tests of type clases[0] are placed first, then tests of type classes[1], etc.
Tests with no match in classes are placed last.
"""
class_count = len(classes)
bins = [unittest.TestSuite() for i in range(class_count+1)]
partition_suite(suite, classes, bins)
for i in range(class_count):
bins[0].addTests(bins[i+1])
return bins[0]
def dependency_ordered(test_databases, dependencies):
"""Reorder test_databases into an order that honors the dependencies
described in TEST_DEPENDENCIES.
"""
ordered_test_databases = []
resolved_databases = set()
while test_databases:
changed = False
deferred = []
while test_databases:
signature, (db_name, aliases) = test_databases.pop()
dependencies_satisfied = True
for alias in aliases:
if alias in dependencies:
if all(a in resolved_databases for a in dependencies[alias]):
# all dependencies for this alias are satisfied
dependencies.pop(alias)
resolved_databases.add(alias)
else:
dependencies_satisfied = False
else:
resolved_databases.add(alias)
if dependencies_satisfied:
ordered_test_databases.append((signature, (db_name, aliases)))
changed = True
else:
deferred.append((signature, (db_name, aliases)))
if not changed:
raise ImproperlyConfigured("Circular dependency in TEST_DEPENDENCIES")
test_databases = deferred
return ordered_test_databases
class DjangoTestSuiteRunner(object):
def __init__(self, verbosity=1, interactive=True, failfast=True, **kwargs):
self.verbosity = verbosity
self.interactive = interactive
self.failfast = failfast
def setup_test_environment(self, **kwargs):
setup_test_environment()
settings.DEBUG = False
unittest.installHandler()
def build_suite(self, test_labels, extra_tests=None, **kwargs):
suite = unittest.TestSuite()
if test_labels:
for label in test_labels:
if '.' in label:
suite.addTest(build_test(label))
else:
app = get_app(label)
suite.addTest(build_suite(app))
else:
for app in get_apps():
suite.addTest(build_suite(app))
if extra_tests:
for test in extra_tests:
suite.addTest(test)
return reorder_suite(suite, (TestCase,))
def setup_databases(self, **kwargs):
from django.db import connections, DEFAULT_DB_ALIAS
# First pass -- work out which databases actually need to be created,
# and which ones are test mirrors or duplicate entries in DATABASES
mirrored_aliases = {}
test_databases = {}
dependencies = {}
for alias in connections:
connection = connections[alias]
if connection.settings_dict['TEST_MIRROR']:
# If the database is marked as a test mirror, save
# the alias.
mirrored_aliases[alias] = connection.settings_dict['TEST_MIRROR']
else:
# Store a tuple with DB parameters that uniquely identify it.
# If we have two aliases with the same values for that tuple,
# we only need to create the test database once.
item = test_databases.setdefault(
connection.creation.test_db_signature(),
(connection.settings_dict['NAME'], [])
)
item[1].append(alias)
if 'TEST_DEPENDENCIES' in connection.settings_dict:
dependencies[alias] = connection.settings_dict['TEST_DEPENDENCIES']
else:
if alias != DEFAULT_DB_ALIAS:
dependencies[alias] = connection.settings_dict.get('TEST_DEPENDENCIES', [DEFAULT_DB_ALIAS])
# Second pass -- actually create the databases.
old_names = []
mirrors = []
for signature, (db_name, aliases) in dependency_ordered(test_databases.items(), dependencies):
# Actually create the database for the first connection
connection = connections[aliases[0]]
old_names.append((connection, db_name, True))
test_db_name = connection.creation.create_test_db(self.verbosity, autoclobber=not self.interactive)
for alias in aliases[1:]:
connection = connections[alias]
if db_name:
old_names.append((connection, db_name, False))
connection.settings_dict['NAME'] = test_db_name
else:
# If settings_dict['NAME'] isn't defined, we have a backend where
# the name isn't important -- e.g., SQLite, which uses :memory:.
# Force create the database instead of assuming it's a duplicate.
old_names.append((connection, db_name, True))
connection.creation.create_test_db(self.verbosity, autoclobber=not self.interactive)
for alias, mirror_alias in mirrored_aliases.items():
mirrors.append((alias, connections[alias].settings_dict['NAME']))
connections[alias].settings_dict['NAME'] = connections[mirror_alias].settings_dict['NAME']
return old_names, mirrors
def run_suite(self, suite, **kwargs):
return unittest.TextTestRunner(verbosity=self.verbosity, failfast=self.failfast).run(suite)
def teardown_databases(self, old_config, **kwargs):
from django.db import connections
old_names, mirrors = old_config
# Point all the mirrors back to the originals
for alias, old_name in mirrors:
connections[alias].settings_dict['NAME'] = old_name
# Destroy all the non-mirror databases
for connection, old_name, destroy in old_names:
if destroy:
connection.creation.destroy_test_db(old_name, self.verbosity)
else:
connection.settings_dict['NAME'] = old_name
def teardown_test_environment(self, **kwargs):
unittest.removeHandler()
teardown_test_environment()
def suite_result(self, suite, result, **kwargs):
return len(result.failures) + len(result.errors)
def run_tests(self, test_labels, extra_tests=None, **kwargs):
"""
Run the unit tests for all the test labels in the provided list.
Labels must be of the form:
- app.TestClass.test_method
Run a single specific test method
- app.TestClass
Run all the test methods in a given class
- app
Search for doctests and unittests in the named application.
When looking for tests, the test runner will look in the models and
tests modules for the application.
A list of 'extra' tests may also be provided; these tests
will be added to the test suite.
Returns the number of tests that failed.
"""
self.setup_test_environment()
suite = self.build_suite(test_labels, extra_tests)
old_config = self.setup_databases()
result = self.run_suite(suite)
self.teardown_databases(old_config)
self.teardown_test_environment()
return self.suite_result(suite, result)
class HachiSuiteRunner(DjangoTestSuiteRunner):
def build_suite(self, test_labels, extra_tests=None, **kwargs):
f = super(HachiSuiteRunner, self).build_suite(test_labels, extra_tests, **kwargs)
return unittest.TestSuite([case for case in f._tests if isinstance(case, HachiAnalysisCase)])
def run_tests(test_labels, verbosity=1, interactive=True, failfast=False, extra_tests=None):
import warnings
warnings.warn(
'The run_tests() test runner has been deprecated in favor of DjangoTestSuiteRunner.',
DeprecationWarning
)
test_runner = DjangoTestSuiteRunner(verbosity=verbosity, interactive=interactive, failfast=failfast)
return test_runner.run_tests(test_labels, extra_tests=extra_tests)
| |
import os
os.environ['PATH'] = '../caffe/build/tools:'+os.environ['PATH']
import sys
sys.path = ['../caffe/python'] + sys.path
import cv2
import cv
import numpy as np
import shutil
import random
import leveldb
import caffe
from google import protobuf
from caffe.proto import caffe_pb2
from xml.dom import minidom
import matplotlib.pyplot as plt
from scipy.spatial.distance import cdist
import cPickle
import time
def vis_square(fname, data, padsize=1, padval=0):
data -= data.min()
data /= data.max()
# force the number of filters to be square
n = int(np.ceil(np.sqrt(data.shape[0])))
padding = ((0, n ** 2 - data.shape[0]), (0, padsize), (0, padsize)) + ((0, 0),) * (data.ndim - 3)
data = np.pad(data, padding, mode='constant', constant_values=(padval, padval))
# tile the filters into an image
data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])
data = data.mean(axis = -1)
plt.imshow(data)
plt.savefig(fname)
def vis_cluster(dist, patch_dims, ntop, img):
cluster = [ [] for i in xrange(dist.shape[1]) ]
for i in xrange(dist.shape[0]):
for j in xrange(dist.shape[1]):
cluster[j].append((i, dist[i,j]))
cluster.sort(key = lambda x: len(x), reverse = True)
for i in cluster:
print len(i)
i.sort(key = lambda x: x[1], reverse=True)
viz = np.zeros((patch_dims[0]*len(cluster), patch_dims[1]*ntop, img.shape[-1]))
for i in xrange(len(cluster)):
for j in xrange(min(ntop, len(cluster[i]))):
viz[i*patch_dims[0]:(i+1)*patch_dims[0], j*patch_dims[1]:(j+1)*patch_dims[1], :] = img[cluster[i][j][0]]
cv2.imwrite('viz_cluster.jpg', viz)
def cluster_acc(Y_pred, Y):
from sklearn.utils.linear_assignment_ import linear_assignment
assert Y_pred.size == Y.size
D = max(Y_pred.max(), Y.max())+1
w = np.zeros((D,D), dtype=np.int64)
for i in xrange(Y_pred.size):
w[Y_pred[i], Y[i]] += 1
ind = linear_assignment(w.max() - w)
return sum([w[i,j] for i,j in ind])*1.0/Y_pred.size, w
def vis_gradient(X, tmm, img):
from matplotlib.offsetbox import TextArea, DrawingArea, OffsetImage, AnnotationBbox
with open('tmp.pkl') as fin:
X, tmm, img = cPickle.load(fin)
img = np.tile(img, 3)
l = []
q = tmm.transform(X)
ind = np.bincount(q.argmax(axis=1)).argmin()
l = [ i for i in xrange(X.shape[0]) if q[i].argmax() == ind ]
X = X[l,:]
img = img[l]
q = tmm.transform(X)
q = (q.T/q.sum(axis=1)).T
p = (q**2)
p = (p.T/p.sum(axis=1)).T
grad = 2.0/(1.0+cdist(X, tmm.cluster_centers_, 'sqeuclidean'))*(p-q)*cdist(X, tmm.cluster_centers_, 'cityblock')
fig, ax = plt.subplots()
ax.scatter(q[:,ind], grad[:,ind], marker=u'+')
n_disp = 10
arg = np.argsort(q[:,ind])
for i in xrange(n_disp):
j = arg[int(X.shape[0]*(1.0-1.0*i/n_disp))-1]
imgbox = OffsetImage(img[j], zoom=1.8)
ab = AnnotationBbox(imgbox, (q[j,ind], grad[j,ind]),
xybox=(0.95-1.0*i/n_disp, 1.06 ),
xycoords='data',
boxcoords=("axes fraction", "axes fraction"),
pad=0.0,
arrowprops=dict(arrowstyle="->"))
ax.add_artist(ab)
plt.xlabel(r'$q_{ij}$', fontsize=24)
plt.ylabel(r'$|\frac{\partial L}{\partial z_i}|$', fontsize=24)
plt.draw()
plt.show()
def dispImg(X, n, fname=None):
h = X.shape[1]
w = X.shape[2]
c = X.shape[3]
buff = np.zeros((n*w, n*w, c), dtype=np.uint8)
for i in xrange(n):
for j in xrange(n):
buff[i*h:(i+1)*h, j*w:(j+1)*w, :] = X[i*n+j]
if fname is None:
cv2.imshow('a', buff)
cv2.waitKey(0)
else:
cv2.imwrite(fname, buff)
def make_net(fnet, layers):
layer_dict = {}
layer_dict['data'] = """layers {{
name: "{0}"
type: DATA
top: "{0}"
data_param {{
source: "{2}"
backend: LEVELDB
batch_size: 256
}}
transform_param {{
scale: {4}
}}
include: {{ phase: TRAIN }}
}}
layers {{
name: "{0}"
type: DATA
top: "{0}"
data_param {{
source: "{3}"
backend: LEVELDB
batch_size: 100
}}
transform_param {{
scale: {4}
}}
include: {{ phase: TEST }}
}}
"""
layer_dict['data_seek'] = """layers {{
name: "{0}"
type: DATA
top: "{0}"
data_param {{
seek: {5}
source: "{2}"
backend: LEVELDB
batch_size: 256
}}
transform_param {{
scale: {4}
}}
include: {{ phase: TRAIN }}
}}
layers {{
name: "{0}"
type: DATA
top: "{0}"
data_param {{
seek: {5}
source: "{3}"
backend: LEVELDB
batch_size: 100
}}
transform_param {{
scale: {4}
}}
include: {{ phase: TEST }}
}}
"""
layer_dict['sil'] = """layers {{
name: "{0}silence"
type: SILENCE
bottom: "{0}"
}}
"""
layer_dict['tloss'] = """layers {{
name: "{0}"
type: MULTI_T_LOSS
bottom: "{1}"
bottom: "{2}"
blobs_lr: 1.
blobs_lr: 0.
blobs_lr: 0.
top: "loss"
top: "std"
top: "ind"
top: "proba"
multi_t_loss_param {{
num_center: {3}
alpha: 1
lambda: 2
beta: 1
bandwidth: 0.1
weight_filler {{
type: 'gaussian'
std: 0.5
}}
}}
}}
layers {{
name: "silence"
type: SILENCE
bottom: "label"
bottom: "ind"
bottom: "proba"
}}
"""
layer_dict['inner'] = """layers {{
name: "{0}"
type: INNER_PRODUCT
bottom: "{1}"
top: "{0}"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
inner_product_param {{
num_output: {2}
weight_filler {{
type: "gaussian"
std: 0.05
}}
bias_filler {{
type: "constant"
value: 0
}}
}}
}}
"""
layer_dict['inner_init'] = """layers {{
name: "{0}"
type: INNER_PRODUCT
bottom: "{1}"
top: "{0}"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
inner_product_param {{
num_output: {2}
weight_filler {{
type: "gaussian"
std: {3}
}}
bias_filler {{
type: "constant"
value: 0
}}
}}
}}
"""
layer_dict['inner_lr'] = """layers {{
name: "{0}"
type: INNER_PRODUCT
bottom: "{1}"
top: "{0}"
blobs_lr: {4}
blobs_lr: {5}
weight_decay: 1
weight_decay: 0
inner_product_param {{
num_output: {2}
weight_filler {{
type: "gaussian"
std: {3}
}}
bias_filler {{
type: "constant"
value: 0
}}
}}
}}
"""
layer_dict['relu'] = """layers {{
name: "{0}relu"
type: RELU
bottom: "{0}"
top: "{0}"
}}
"""
layer_dict['drop'] = """layers {{
name: "{0}drop"
type: DROPOUT
bottom: "{0}"
top: "{0}"
dropout_param {{
dropout_ratio: {1}
}}
}}
"""
layer_dict['drop_copy'] = """layers {{
name: "{0}drop"
type: DROPOUT
bottom: "{1}"
top: "{0}"
dropout_param {{
dropout_ratio: {2}
}}
}}
"""
layer_dict['euclid'] = """layers {{
name: "{0}"
type: EUCLIDEAN_LOSS
bottom: "{1}"
bottom: "{2}"
top: "{0}"
}}
"""
fnet.write('name: "net"\n')
for k,v in layers:
fnet.write(layer_dict[k].format(*v))
fnet.close()
class TMM(object):
def __init__(self, n_components=1, alpha=1):
self.n_components = n_components
self.tol = 1e-5
self.alpha = float(alpha)
def fit(self, X):
from sklearn.cluster import KMeans
kmeans = KMeans(self.n_components, n_init=20)
kmeans.fit(X)
self.cluster_centers_ = kmeans.cluster_centers_
self.covars_ = np.ones(self.cluster_centers_.shape)
def transform(self, X):
p = 1.0
dist = cdist(X, self.cluster_centers_)
r = 1.0/(1.0+dist**2/self.alpha)**((self.alpha+p)/2.0)
r = (r.T/r.sum(axis=1)).T
return r
def predict(self, X):
return self.transform(X).argmax(axis=1)
def load_mnist(root, training):
if training:
data = 'train-images-idx3-ubyte'
label = 'train-labels-idx1-ubyte'
N = 60000
else:
data = 't10k-images-idx3-ubyte'
label = 't10k-labels-idx1-ubyte'
N = 10000
with open(root+data, 'rb') as fin:
fin.seek(16, os.SEEK_SET)
X = np.fromfile(fin, dtype=np.uint8).reshape((N,28*28))
with open(root+label, 'rb') as fin:
fin.seek(8, os.SEEK_SET)
Y = np.fromfile(fin, dtype=np.uint8)
return X, Y
def make_mnist_data():
X, Y = load_mnist('../mnist/', True)
X = X.astype(np.float64)*0.02
write_db(X, Y, 'mnist_train')
X_, Y_ = read_db('mnist_train', True)
assert np.abs((X - X_)).mean() < 1e-5
assert (Y != Y_).sum() == 0
X2, Y2 = load_mnist('../mnist/', False)
X2 = X2.astype(np.float64)*0.02
write_db(X2, Y2, 'mnist_test')
X3 = np.concatenate((X,X2), axis=0)
Y3 = np.concatenate((Y,Y2), axis=0)
write_db(X3,Y3, 'mnist_total')
def make_reuters_data():
np.random.seed(1234)
random.seed(1234)
from sklearn.feature_extraction.text import CountVectorizer
did_to_cat = {}
cat_list = ['CCAT', 'GCAT', 'MCAT', 'ECAT']
with open('../reuters/rcv1-v2.topics.qrels') as fin:
for line in fin.readlines():
line = line.strip().split(' ')
cat = line[0]
did = int(line[1])
if cat in cat_list:
did_to_cat[did] = did_to_cat.get(did, []) + [cat]
for did in did_to_cat.keys():
if len(did_to_cat[did]) > 1:
del did_to_cat[did]
dat_list = ['lyrl2004_tokens_test_pt0.dat',
'lyrl2004_tokens_test_pt1.dat',
'lyrl2004_tokens_test_pt2.dat',
'lyrl2004_tokens_test_pt3.dat',
'lyrl2004_tokens_train.dat']
data = []
target = []
cat_to_cid = {'CCAT':0, 'GCAT':1, 'MCAT':2, 'ECAT':3}
del did
for dat in dat_list:
with open('../reuters/'+dat) as fin:
for line in fin.readlines():
if line.startswith('.I'):
if 'did' in locals():
assert doc != ''
if did_to_cat.has_key(did):
data.append(doc)
target.append(cat_to_cid[did_to_cat[did][0]])
did = int(line.strip().split(' ')[1])
doc = ''
elif line.startswith('.W'):
assert doc == ''
else:
doc += line
assert len(data) == len(did_to_cat)
X = CountVectorizer(dtype=np.float64, max_features=2000).fit_transform(data)
Y = np.asarray(target)
from sklearn.feature_extraction.text import TfidfTransformer
X = TfidfTransformer(norm='l2', sublinear_tf=True).fit_transform(X)
X = np.asarray(X.todense())*np.sqrt(X.shape[1])
p = np.random.permutation(X.shape[0])
X = X[p]
Y = Y[p]
N = X.shape[0]
write_db(X[:N], Y[:N], 'reutersidf_train')
write_db(X[N*4/5:N], Y[N*4/5:N], 'reutersidf_test')
write_db(X[:N], Y[:N], 'reutersidf_total')
np.save('reutersidf.npy', Y[:N])
N = 10000
write_db(X[:N], Y[:N], 'reutersidf10k_train')
write_db(X[N*4/5:N], Y[N*4/5:N], 'reutersidf10k_test')
write_db(X[:N], Y[:N], 'reutersidf10k_total')
def hog_picture(hog, resolution):
from scipy.misc import imrotate
glyph1 = np.zeros((resolution, resolution), dtype=np.uint8)
glyph1[:, round(resolution / 2)-1:round(resolution / 2) + 1] = 255
glyph = np.zeros((resolution, resolution, 9), dtype=np.uint8)
glyph[:, :, 0] = glyph1
for i in xrange(1, 9):
glyph[:, :, i] = imrotate(glyph1, -i * 20)
shape = hog.shape
clamped_hog = hog.copy()
clamped_hog[hog < 0] = 0
image = np.zeros((resolution * shape[0], resolution * shape[1]), dtype=np.float32)
for i in xrange(shape[0]):
for j in xrange(shape[1]):
for k in xrange(9):
image[i*resolution:(i+1)*resolution, j*resolution:(j+1)*resolution] = np.maximum(image[i*resolution:(i+1)*resolution, j*resolution:(j+1)*resolution], clamped_hog[i, j, k] * glyph[:, :, k])
return image
def load_stl(fname):
from joblib import Parallel, delayed
import features
X = np.fromfile('../stl/'+fname, dtype=np.uint8)
X = X.reshape((X.size/3/96/96, 3, 96, 96)).transpose((0,3,2,1))
dispImg(X[:100, :, :, [2,1,0]], 10, fname+'_org.jpg')
n_jobs = 10
cmap_size = (8,8)
N = X.shape[0]
H = np.asarray(Parallel(n_jobs=n_jobs)( delayed(features.hog)(X[i]) for i in xrange(N) ))
H_img = np.repeat(np.asarray([ hog_picture(H[i], 9) for i in xrange(100) ])[:, :,:,np.newaxis], 3, 3)
dispImg(H_img, 10, fname+'_hog.jpg')
H = H.reshape((H.shape[0], H.size/N))
X_small = np.asarray(Parallel(n_jobs=n_jobs)( delayed(cv2.resize)(X[i], cmap_size) for i in xrange(N) ))
crcb = np.asarray(Parallel(n_jobs=n_jobs)( delayed(cv2.cvtColor)(X_small[i], cv.CV_RGB2YCrCb) for i in xrange(N) ))
crcb = crcb[:,:,:,1:]
crcb = crcb.reshape((crcb.shape[0], crcb.size/N))
feature = np.concatenate(((H-0.2)*10.0, (crcb-128.0)/10.0), axis=1)
print feature.shape
return feature, X[:,:,:,[2,1,0]]
def make_stl_data():
np.random.seed(1234)
random.seed(1234)
X_train, img_train = load_stl('train_X.bin')
X_test, img_test = load_stl('test_X.bin')
X_unlabel, img_unlabel = load_stl('unlabeled_X.bin')
Y_train = np.fromfile('../stl/train_y.bin', dtype=np.uint8) - 1
Y_test = np.fromfile('../stl/test_y.bin', dtype=np.uint8) - 1
X_total = np.concatenate((X_train, X_test), axis=0)
img_total = np.concatenate((img_train, img_test), axis=0)
Y_total = np.concatenate((Y_train, Y_test))
p = np.random.permutation(X_total.shape[0])
X_total = X_total[p]
img_total = img_total[p]
Y_total = Y_total[p]
write_db(X_total, Y_total, 'stl_total')
write_db(img_total, Y_total, 'stl_img')
X = np.concatenate((X_total, X_unlabel), axis=0)
p = np.random.permutation(X.shape[0])
X = X[p]
Y = np.zeros((X.shape[0],))
N = X.shape[0]*4/5
write_db(X[:N], Y[:N], 'stl_train')
write_db(X[N:], Y[N:], 'stl_test')
def read_db(str_db, float_data = True):
db = leveldb.LevelDB(str_db)
datum = caffe_pb2.Datum()
array = []
label = []
for k,v in db.RangeIter():
dt = datum.FromString(v)
if float_data:
array.append(dt.float_data)
else:
array.append(np.fromstring(dt.data, dtype=np.uint8))
label.append(dt.label)
return np.asarray(array), np.asarray(label)
def write_db(X, Y, fname):
if os.path.exists(fname):
shutil.rmtree(fname)
assert X.shape[0] == Y.shape[0]
X = X.reshape((X.shape[0], X.size/X.shape[0], 1, 1))
db = leveldb.LevelDB(fname)
for i in xrange(X.shape[0]):
x = X[i]
if x.ndim != 3:
x = x.reshape((x.size,1,1))
db.Put('{:08}'.format(i), caffe.io.array_to_datum(x, int(Y[i])).SerializeToString())
del db
def update_db(seek, N, X, Y, fname):
assert X.shape[0] == Y.shape[0]
X = X.reshape((X.shape[0], X.size/X.shape[0], 1, 1))
db = leveldb.LevelDB(fname)
for i in xrange(X.shape[0]):
x = X[i]
if x.ndim != 3:
x = x.reshape((x.size,1,1))
db.Put('{:08}'.format((i+seek)%N), caffe.io.array_to_datum(x, int(Y[i])).SerializeToString())
del db
def extract_feature(net, model, blobs, N, train = False, device = None):
if type(net) is str:
if train:
caffe.Net.set_phase_train()
if model:
net = caffe.Net(net, model)
else:
net = caffe.Net(net)
caffe.Net.set_phase_test()
if not (device is None):
caffe.Net.set_mode_gpu()
caffe.Net.set_device(device)
batch_size = net.blobs[blobs[0]].num
res = [ [] for i in blobs ]
for i in xrange((N-1)/batch_size+1):
ret = net.forward(blobs=blobs)
for i in xrange(len(blobs)):
res[i].append(ret[blobs[i]].copy())
for i in xrange(len(blobs)):
res[i] = np.concatenate(res[i], axis=0)[:N]
return res, net
def write_net(db, dim, n_class, seek):
layers = [ ('data_seek', ('data','dummy',db+'_total', db+'_total', 1.0, seek)),
('data_seek', ('label', 'dummy', 'train_weight', 'train_weight', 1.0, seek)),
('inner', ('inner1', 'data', 500)),
('relu', ('inner1',)),
('inner', ('inner2', 'inner1', 500)),
('relu', ('inner2',)),
('inner', ('inner3', 'inner2', 2000)),
('relu', ('inner3',)),
('inner', ('output', 'inner3', dim)),
('tloss', ('loss', 'output', 'label', n_class))
]
with open('net.prototxt', 'w') as fnet:
make_net(fnet, layers)
def DisKmeans(db, update_interval = None):
from sklearn.cluster import KMeans
from sklearn.mixture import GMM
from sklearn.lda import LDA
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import normalized_mutual_info_score
from scipy.spatial.distance import cdist
import cPickle
from scipy.io import loadmat
if db == 'mnist':
N_class = 10
batch_size = 100
train_batch_size = 256
X, Y = read_db(db+'_total', True)
X = np.asarray(X, dtype=np.float64)
Y = np.asarray(np.squeeze(Y), dtype = np.int32)
N = X.shape[0]
img = np.clip((X/0.02), 0, 255).astype(np.uint8).reshape((N, 28, 28, 1))
elif db == 'stl':
N_class = 10
batch_size = 100
train_batch_size = 256
img = read_db('stl_img', False)[0]
img = img.reshape((img.shape[0], 96, 96, 3))
X, Y = read_db(db+'_total', True)
X = np.asarray(X, dtype=np.float64)
Y = np.asarray(np.squeeze(Y), dtype = np.int32)
N = X.shape[0]
elif db == 'reuters':
N_class = 4
batch_size = 100
train_batch_size = 256
Y = np.fromfile('reuters.npy', dtype=np.int64)
N = Y.shape[0]
elif db == 'reutersidf':
N_class = 4
batch_size = 100
train_batch_size = 256
Y = np.load('reutersidf.npy')
N = Y.shape[0]
elif db == 'reuters10k' or db == 'reutersidf10k':
N_class = 4
batch_size = 100
train_batch_size = 256
X, Y = read_db(db+'_total', True)
X = np.asarray(X, dtype=np.float64)
Y = np.asarray(np.squeeze(Y), dtype = np.int32)
N = X.shape[0]
tmm_alpha = 1.0
total_iters = (N-1)/train_batch_size+1
if not update_interval:
update_interval = total_iters
Y_pred = np.zeros((Y.shape[0]))
iters = 0
seek = 0
dim = 10
acc_list = []
while True:
write_net(db, dim, N_class, "'{:08}'".format(0))
if iters == 0:
write_db(np.zeros((N,N_class)), np.zeros((N,)), 'train_weight')
ret, net = extract_feature('net.prototxt', 'exp/'+db+'/save_iter_100000.caffemodel', ['output'], N, True, 0)
feature = ret[0].squeeze()
gmm_model = TMM(N_class)
gmm_model.fit(feature)
net.params['loss'][0].data[0,0,:,:] = gmm_model.cluster_centers_.T
net.params['loss'][1].data[0,0,:,:] = 1.0/gmm_model.covars_.T
else:
ret, net = extract_feature('net.prototxt', 'init.caffemodel', ['output'], N, True, 0)
feature = ret[0].squeeze()
gmm_model.cluster_centers_ = net.params['loss'][0].data[0,0,:,:].T
Y_pred_last = Y_pred
Y_pred = gmm_model.predict(feature).squeeze()
acc, freq = cluster_acc(Y_pred, Y)
acc_list.append(acc)
nmi = normalized_mutual_info_score(Y, Y_pred)
print freq
print freq.sum(axis=1)
print 'acc: ', acc, 'nmi: ', nmi
print (Y_pred != Y_pred_last).sum()*1.0/N
if (Y_pred != Y_pred_last).sum() < 0.001*N:
print acc_list
return acc, nmi
time.sleep(1)
write_net(db, dim, N_class, "'{:08}'".format(seek))
weight = gmm_model.transform(feature)
weight = (weight.T/weight.sum(axis=1)).T
bias = (1.0/weight.sum(axis=0))
bias = N_class*bias/bias.sum()
weight = (weight**2)*bias
weight = (weight.T/weight.sum(axis=1)).T
print weight[:10,:]
write_db(weight, np.zeros((weight.shape[0],)), 'train_weight')
net.save('init.caffemodel')
del net
with open('solver.prototxt', 'w') as fsolver:
fsolver.write("""net: "net.prototxt"
base_lr: 0.01
lr_policy: "step"
gamma: 0.1
stepsize: 100000
display: 10
max_iter: %d
momentum: 0.9
weight_decay: 0.0000
snapshot: 100
snapshot_prefix: "exp/test/save"
snapshot_after_train:true
solver_mode: GPU
debug_info: false
sample_print: false
device_id: 0"""%update_interval)
os.system('caffe train --solver=solver.prototxt --weights=init.caffemodel')
shutil.copyfile('exp/test/save_iter_%d.caffemodel'%update_interval, 'init.caffemodel')
iters += 1
seek = (seek + train_batch_size*update_interval)%N
if __name__ == '__main__':
db = sys.argv[1]
if db == 'mnist':
lam = 160
elif db == 'stl':
lam = 40
elif db == 'reutersidf' or db == 'reutersidf10k':
lam = 20
else:
lam = int(sys.argv[2])
"""acc_list = []
nmi_list = []
for i in xrange(0,9):
lam = 10*(2**i)
acc, nmi = DisKmeans(db, lam)
acc_list.append(acc)
nmi_list.append(nmi)
print acc_list
print nmi_list"""
DisKmeans(db, lam)
| |
# coding=utf-8
setThrowException(True)
class Nautilus:
REMOTE = 0
LOCAL = 1
_tab = None
_dirs = {}
def __init__(self):
self._dirs = {self.LOCAL: [], self.REMOTE: []}
self._startNautilus()
self._initWebdav()
sleep(1)
self._initLocal()
def _startNautilus(self):
openApp("/usr/bin/nautilus")
wait(Pattern("1265282313623.png").similar(0.70).firstN(1))
def _initWebdav(self):
click("1265202229746.png")
wait("1265202325039.png")
click("1265202325039.png")
click("1265202559414.png")
click("1265278752490.png")
type("1265278810480.png", "webdav")
click("1265279597052.png")
self._tab = self.REMOTE
self._dirs[self.REMOTE].append("/")
def _initLocal(self):
type("t", KEY_CTRL)
sleep(1)
self._tab = self.LOCAL
click("1265313336023.png")
self._dirs[self.LOCAL].append("/")
self.openLocal("1265314310481.png", "down")
def switchRemote(self):
if (self._tab == self.LOCAL):
type(Key.PAGE_UP, KEY_CTRL)
self._tab = self.REMOTE
sleep(0.4)
def switchLocal(self):
if (self._tab == self.REMOTE):
type(Key.PAGE_DOWN, KEY_CTRL)
self._tab = self.LOCAL
sleep(0.4)
def openLocal(self, dirImg, dirName):
self.switchLocal()
self._open(dirImg, dirName)
def _open(self, dirImg, dirName):
doubleClick(dirImg)
self._dirs[self._tab].append(dirName)
def openRemote(self, dirImg, dirName):
self.switchRemote()
self._open(dirImg, dirName)
def upRemote(self, dirName=None):
self.switchRemote()
sleep(1)
self.goUp(dirName)
def upLocal(self, dirName=None):
self.switchLocal()
sleep(1)
self.goUp(dirName)
def goUp(self, dirName):
if dirName == None:
click(Pattern("1266259183958.png").similar(0.90).firstN(1))
self._dirs[self._tab].pop()
return
while self._dirs[self._tab][-1] != dirName:
click(Pattern("1266259183958.png").similar(0.90).firstN(1))
self._dirs[self._tab].pop()
def copy(self):
type("c", KEY_CTRL)
def paste(self):
type("v", KEY_CTRL)
def rename(self, fileImg, newName, newFileImg):
click(fileImg)
sleep(0.2)
type(Key.F2)
sleep(0.5)
type("a", KEY_CTRL)
sleep(0.2)
paste(newName)
sleep(0.1)
type(Key.ENTER)
wait(newFileImg)
class NautilusWebdavTest:
_nautilus = None
def __init__(self, nautilus):
self._nautilus = nautilus
def run(self):
self.testInitial()
self.testListCollection()
self.testDownloadSingle()
self.testListSubdir()
self.testDownloadMultiple()
self.testCreateNewdir()
self.testListNewdir()
self.testUploadSingle()
self.testCreateNewsubdir()
self.testListNewsubdir()
self.testUploadSingleOverwrite()
self.testDeleteNewdir()
self.testUploadNew()
self.testDownloadUploaded()
self.testRenameFiles()
self.testCopyFilesRemote()
self.testRenameCollection()
def testInitial(self):
self._nautilus.switchRemote()
find(Pattern("1265793342336.png").similar(0.70).firstN(1))
def testListCollection(self):
self._nautilus.openRemote("1265279909203.png", "collection")
find(Pattern("1265793562509.png").similar(0.70).firstN(1))
def testDownloadSingle(self):
self._nautilus.switchRemote()
click(Pattern("1265314613379.png").similar(0.95).firstN(1))
self._nautilus.copy()
self._nautilus.switchLocal()
sleep(1)
self._nautilus.paste()
sleep(1)
find(Pattern("1265315503320.png").similar(0.68).firstN(1))
def testListSubdir(self):
self._nautilus.openRemote("1265315881723.png", "subdir")
find("1265315899109.png")
def testDownloadMultiple(self):
self._nautilus.switchRemote()
click(Pattern("1265795508414.png").similar(0.70).firstN(1))
type(Key.DOWN, KEY_SHIFT)
self._nautilus.copy()
sleep(1)
self._nautilus.switchLocal()
sleep(1)
self._nautilus.paste()
sleep(1)
find(Pattern("1265822372031.png").similar(0.90).firstN(1))
def testCreateNewdir(self):
self._nautilus.switchRemote()
type("n", KEY_CTRL | KEY_SHIFT)
sleep(1)
type("newdir")
type("\n")
find(Pattern("1266256110323.png").similar(0.90).firstN(1))
def testListNewdir(self):
self._nautilus.openRemote("1266256707500.png", "newdir")
find(Pattern("1266256773322.png").similar(0.90).firstN(1))
def testUploadSingle(self):
self._nautilus.switchLocal()
click(Pattern("1266256870724.png").similar(0.90).firstN(1))
self._nautilus.copy()
self._nautilus.switchRemote()
self._nautilus.paste()
find(Pattern("1266256969255.png").similar(0.90).firstN(1))
def testCreateNewsubdir(self):
self._nautilus.switchRemote()
type("n", KEY_CTRL | KEY_SHIFT)
sleep(1)
type("newsubdir")
type("\n")
find(Pattern("1266257989662.png").similar(0.90).firstN(1))
def testListNewsubdir(self):
self._nautilus.openRemote("1266256707500.png", "newdir")
find(Pattern("1266258293601.png").similar(0.90).firstN(1))
def testUploadSingleOverwrite(self):
self._nautilus.switchLocal()
click(Pattern("1266257097775.png").similar(0.90).firstN(1))
self._nautilus.copy()
self._nautilus.switchRemote()
self._nautilus.paste()
find(Pattern("1266258371198.png").similar(0.78).firstN(1))
self._nautilus.switchLocal()
click(Pattern("1266257097775.png").similar(0.90).firstN(1))
self._nautilus.copy()
self._nautilus.switchRemote()
self._nautilus.paste()
wait(Pattern("1266258781306.png").similar(0.90).firstN(1))
dialog = find(Pattern("1266258781306.png").similar(0.90).firstN(1))
click(dialog.inside().find(Pattern("1266257459272.png").similar(0.90).firstN(1)))
find(Pattern("1266258834123.png").similar(0.90).firstN(1))
def testDeleteNewdir(self):
self._nautilus.upRemote("collection")
click(Pattern("1266259247619.png").similar(0.90).firstN(1))
type(Key.DELETE)
wait(Pattern("1266259486059.png").similar(0.55).firstN(1))
dialog = find(Pattern("1266259486059.png").similar(0.55).firstN(1))
click(dialog.inside().find(Pattern("1266259533961.png").similar(0.90).firstN(1)))
sleep(1)
find(Pattern("1266259597530.png").similar(0.90).firstN(1))
def testUploadNew(self):
self._nautilus.upLocal("/")
self._nautilus.openLocal(Pattern("1266259890975.png").similar(0.90).firstN(1), "up")
type("a", KEY_CTRL)
sleep(0.5)
self._nautilus.copy()
self._nautilus.switchRemote()
sleep(1)
self._nautilus.paste()
find(Pattern("1266263725826.png").similar(0.90).firstN(1))
find(Pattern("1266263798494.png").similar(0.90).firstN(1))
find(Pattern("1266264073912.png").similar(0.90).firstN(1))
def testDownloadUploaded(self):
self._nautilus.switchRemote()
# downloading dirs is broken in Nautilus
click(Pattern("1266269427884.png").similar(0.90).firstN(1))
type(Key.DOWN, KEY_SHIFT)
type(Key.DOWN, KEY_SHIFT)
type(Key.DOWN, KEY_SHIFT)
self._nautilus.copy()
self._nautilus.upLocal("/")
self._nautilus.openLocal("1265314310481.png", "down")
self._nautilus.paste()
def testRenameFiles(self):
self._nautilus.switchRemote()
self._nautilus.rename("1266270237742.png", u"put_test_renamed.xml", Pattern("1266270332525.png").similar(0.90).firstN(1))
self._nautilus.rename("1266270356862.png", u"put_test_utf8_\u00f6\u00e4\u00fc\u00df.txt", Pattern("1266274332854.png").similar(0.80).firstN(1))
self._nautilus.rename(Pattern("1266270558156.png").similar(0.90).firstN(1), u"put_non_utf8_test.txt", Pattern("1266270602424.png").similar(0.90).firstN(1))
def testCopyFilesRemote(self):
self._nautilus.switchRemote()
click(Pattern("1266274684143.png").similar(0.80).firstN(1))
# invert selection
type("i", KEY_CTRL | KEY_SHIFT)
sleep(1)
self._nautilus.copy()
self._nautilus.openRemote(Pattern("1266274684143.png").similar(0.80).firstN(1), "collection")
sleep(1)
self._nautilus.paste()
sleep(1)
wait(Pattern("1266311546228.png").similar(0.60).firstN(1))
find(Pattern("1266311574320.png").similar(0.60).firstN(1))
find(Pattern("1266311712385.png").similar(0.60).firstN(1))
def testRenameCollection(self):
self._nautilus.upRemote()
self._nautilus.rename(Pattern("1266310197088.png").similar(0.90).firstN(1), "renamed_collection", Pattern("1266310220931.png").similar(0.90).firstN(1))
nautilus = Nautilus()
test = NautilusWebdavTest(nautilus)
test.run()
| |
from parsr import *
import unittest
class myTestCase(unittest.TestCase):
@classmethod
def suite(cls):
suite = unittest.TestSuite()
for name in cls.tests:
suite.addTest(cls(name))
return suite
class myTestSuite(unittest.TestSuite):
@classmethod
def suite(cls):
return cls()
class tokenTests(myTestCase):
def setUp(self):
self.tokA = token("(?P<a>a)")
self.tokB = token("(?P<b>b)")
self.grA = grammar.fromSymbol(self.tokA)
self.grB = grammar.fromSymbol(self.tokB)
tests = ["parse", "merger", "oneChar", "results"]#, "placeOfError"]
def parse(self):
self.assertEqual(self.grA.parse("a"), {"a" : "a"})
self.assertEqual(self.grB.parse("b"), {"b" : "b"})
self.assertRaises(SyntaxError, self.grA.parse, "b")
self.assertRaises(SyntaxError, self.grB.parse, "a")
def merger(self):
tokC = token("c", merger = lambda x: "CCC")
grC = grammar.fromSymbol(tokC)
self.assertEqual(grC.parse("c"), "CCC")
def oneChar(self):
tokEmpty = token("\s*")
grEmpty = grammar.fromSymbol(tokEmpty)
self.assertRaises(ValueError, grEmpty.parse, "a")
def results(self):
tokString = token("a")
grString = grammar.fromSymbol(tokString)
self.assertEqual(grString.parse("a"), "a")
class chainTests(tokenTests):
def setUp(self):
super(chainTests, self).setUp()
self.tokAA = self.tokA >> self.tokA
self.tokAB = self.tokA >> self.tokB
self.grAA = grammar.fromSymbol(self.tokAA)
self.grAB = grammar.fromSymbol(self.tokAB)
tests = ["parse", "merger", "context", "doubleChain"]
def parse(self):
res = self.grAA.parse("aa")
self.assertTrue(isinstance(res, list))
self.assertEqual(len(res), 2)
self.assertEqual(res, [{"a" : "a"}, {"a" : "a"}])
res = self.grAB.parse("ab")
self.assertEqual(res, [{"a" : "a"}, {"b" : "b"}])
self.assertRaises(SyntaxError, self.grAA.parse, "ab")
self.assertRaises(SyntaxError, self.grAA.parse, "a")
self.assertRaises(SyntaxError, self.grAB.parse, "aa")
def merger(self):
def merger(res):
return ("%s" % res[1]["b"], "%s" % res[0]["a"])
self.mergedAB = grammar.fromSymbol(chain([self.tokA, self.tokB], merger = merger))
self.assertEqual(self.mergedAB.parse("ab"), ("b", "a"))
def context(self):
self.mergedAB = grammar.fromSymbol(chain([self.tokA, self.tokB], merger = lambda x, foo: foo))
self.assertEqual(self.mergedAB.parse("ab", { "foo" : "bar"}), "bar")
def doubleChain(self):
grAAAB = grammar.fromSymbol(chain([self.tokAA, self.tokAB]))
self.assertEqual(grAAAB.parse("aaab"), [[{"a":"a"}, {"a":"a"}], [{"a":"a"}, {"b":"b"}]])
class repeatTests(tokenTests):
def setUp(self):
super(repeatTests, self).setUp()
self.repeatA = grammar.fromSymbol(repeat(self.tokA))
self.someB = grammar.fromSymbol(repeat(self.tokB, From = 3, To = 4))
self.minOneA = grammar.fromSymbol(repeat(self.tokA, From = 1))
tests = ["parse", "FromTo", "merger", "mergeOnce", "withOptional", "nested"]
def parse(self):
res = self.repeatA.parse("aaaaa")
self.assertTrue(isinstance(res, list))
self.assertEqual(len(res), 5)
self.assertEqual(res, [{"a" : "a"}, {"a" : "a"}, {"a" : "a"}, {"a" : "a"}, {"a" : "a"}])
res = self.someB.parse("bbb")
self.assertTrue(isinstance(res, list))
self.assertEqual(len(res), 3)
self.assertEqual(res, [{"b" : "b"}, {"b" : "b"}, {"b" : "b"}])
self.assertEqual(self.minOneA.parse("a"), [{"a": "a"}])
def FromTo(self):
self.assertRaises(SyntaxError, self.someB.parse, "aa")
self.assertRaises(SyntaxError, self.someB.parse, "bb")
self.assertRaises(SyntaxError, self.someB.parse, "bbbbb")
self.assertRaises(SyntaxError, self.minOneA.parse, "")
def merger(self):
self.repeatA.startSymbol.merger = lambda x: "REPEAT"
self.someB.startSymbol.merger = lambda x: "SOME"
self.assertEqual(self.repeatA.parse("aaaaa"), "REPEAT")
self.assertEqual(self.someB.parse("bbb"), "SOME")
def mergeOnce(self):
self.count = 0
def countMerges(res):
self.count += 1
return res
self.repeatA.startSymbol.merger = countMerges
self.repeatA.parse("aaaaaa")
self.assertEqual(self.count, 1)
def withOptional(self):
gr = grammar.fromSymbol(optional(token("b")) >> repeat(token("a"), From = 1))
self.assertEqual(gr.parse("a"), [[], ["a"]])
self.assertEqual(gr.parse("aaa"), [[], ["a", "a", "a"]])
self.assertEqual(gr.parse("baaa"), [["b"], ["a", "a", "a"]])
def nested(self):
gr = grammar.fromSymbol(repeat( chain([repeat(token("a"), From = 1), repeat(token("b"), From = 1)]) ))
gr.parse("aaaab")
gr.parse("aaabbbbbbababbabbab")
gr.parse("abbbab")
class oneOfTests(tokenTests):
def setUp(self):
super(oneOfTests, self).setUp()
self.oneOf = grammar.fromSymbol(oneOf([self.tokA, self.tokB]))
tests = ["parse", "merge"]
def parse(self):
res = self.oneOf.parse("a")
self.assertEqual(res, {"a" : "a"})
res = self.oneOf.parse("b")
self.assertEqual(res, {"b" : "b"})
self.assertRaises(SyntaxError, self.oneOf.parse, "c")
def merge(self):
def merger(l):
self.assertTrue(not isinstance(l, list))
return "foo"
self.oneOf.startSymbol.merger = merger
self.assertEqual(self.oneOf.parse("a"), "foo")
class optionalTests(tokenTests):
def setUp(self):
super(optionalTests, self).setUp()
self.optional = grammar.fromSymbol(optional(self.tokA) >> self.tokB)
tests = ["parse", "atEnd", "withOneOfAtEnd"]
def parse(self):
res = self.optional.parse("b")
self.assertEqual(res, [[], {"b": "b"}])
res = self.optional.parse("ab")
self.assertEqual(res, [[{"a" : "a"}], {"b" : "b"}])
self.assertRaises(SyntaxError, self.optional.parse, "cb")
def atEnd(self):
gr = grammar.fromSymbol(self.tokA >> optional(self.tokB))
self.assertEqual(gr.parse("a"), [{"a":"a"}, []])
self.assertEqual(gr.parse("ab"), [{"a": "a"}, [{"b":"b"}]])
def withOneOfAtEnd(self):
gr = grammar.fromSymbol(oneOf([self.tokA, self.tokB]) >> optional(self.tokB))
self.assertEqual(gr.parse("a"), [{"a":"a"}, []])
self.assertEqual(gr.parse("ab"), [{"a": "a"}, [{"b":"b"}]])
self.assertEqual(gr.parse("bb"), [{"b": "b"}, [{"b":"b"}]])
class omitTests(tokenTests):
def setUp(self):
super(omitTests, self).setUp()
ls = lexerState([self.tokA], token("\s+"))
self.withOmitted = grammar.fromSymbol(repeat(self.tokA), lexerStates = [ls])
tests = ["parse"]
def parse(self):
res = self.withOmitted.parse("aa a a")
self.assertEqual(len(res), 4)
self.assertEqual(res, [{"a": "a"}, {"a" : "a"}, {"a" : "a"}, {"a" : "a"}])
class stateTests(myTestCase):
tests = ["parse"]
def parse(self):
t1 = token("a")
t2 = token("b")
t3 = token("c")
t4 = token("/b")#, popState = True)
aState = lexerState([t1, t2, t3, t4])
bState = lexerState([t1, t2, t3, t4], omit = token("[ ]+"), pushOn = t2, popOn = t4)
t = grammar.fromSymbol(repeat(oneOf([t1, t2, t3, t4]), From = 1), lexerStates = [aState, bState])
self.assertEqual(t.parse("a"), ["a"])
self.assertEqual(t.parse("b"), ["b"])
self.assertEqual(t.parse("c"), ["c"])
self.assertEqual(t.parse("/b"), ["/b"])
self.assertEqual(t.parse("abc/b"), ["a", "b", "c", "/b"])
self.assertEqual(t.parse("ab c /ba"), ["a", "b", "c", "/b", "a"])
self.assertRaises(SyntaxError, t.parse, "a b c /ba")
self.assertRaises(SyntaxError, t.parse, "ab c /b a")
class grammarTests(myTestCase):
class lang(grammar):
whiteSpace = token("[ ]+")
commentEnd = token("[*]/") #, popState
commentBody = token("([^*/]|([*](?![/]))|((?<![*])[/]))")
commentStart = token( "/[*]")
commentState = lexState( [
"commentEnd"
], [
"commentBody"
],
pushOn = "commentStart",
popOn = "commentEnd")
comment = symbol("commentStart commentEnd")
oneNumber = token("\d")
lexerStartState = lexState( [
"commentStart",
"oneNumber",
"minus",
"plus",
"mulOperator",
], [
"whiteSpace"
] )
@token("-")
def minus(res):
return "SUB"
@symbol("?minus {1,}*oneNumber")
def number(res):
res = flatten(res)
if res[0] != "SUB":
numbers = "".join(res[0])
return int(numbers)
numbers = "".join(res[1])
return -1*int(numbers)
@token("([*](?![/]))|([/](?![*]))")
def mulOperator(res):
if res == "*":
return "MUL"
return "DIV"
plus = token("[+]")
@symbol("plus | minus")
def addOperator(res):
if res[0] != "SUB":
return "ADD"
return "SUB"
@symbol("number mulOperator number")
def mulOperation(res):
if res[1] == "MUL":
return res[0] * res[2]
return res[0] / res[2]
@symbol("number addOperator number")
def addOperation(res):
if res[1] == "ADD":
return res[0] + res[2]
return res[0] - res[2]
@symbol("mulOperation | addOperation ?comment")
def expr(res):
return res[0]
@symbol("expr")
def startSymbol(res):
return res[0]
def setUp(self):
pass
tests = ["createParser", "testParser", "testComment"]
def createParser(self):
self.parser = self.lang()
def testParser(self):
self.createParser()
self.assertEqual(self.parser.parse("1 + 2"), 3)
self.assertEqual(self.parser.parse("1+2"), 3)
self.assertEqual(self.parser.parse(" 1 +2 "), 3)
self.assertEqual(self.parser.parse("1 - 2"), -1)
self.assertEqual(self.parser.parse("1-2"), -1)
self.assertEqual(self.parser.parse(" 1 -2 "), -1)
self.assertEqual(self.parser.parse("1 * 2"), 2)
self.assertEqual(self.parser.parse("1*-2"), -2)
self.assertEqual(self.parser.parse("4 / -2"), -2)
self.assertEqual(self.parser.parse("4/2"), 2)
def testComment(self):
self.createParser()
self.assertEqual(self.parser.parse("1 + 2 /* foobar */"), 3)
class generalTests(myTestCase):
tests = ["infiniteExpansion"]
def infiniteExpansion(self):
a = repeat(definedLater("b"))
b = repeat(a)
a.define("b", b)
gr = grammar.fromSymbol(a)
self.assertRaises(InfiniteStateExpansion, gr.parse, "")
class parsrTests(myTestSuite):
def __init__(self, *args, **kwargs):
super(parsrTests, self).__init__(*args, **kwargs)
self.addTests(tokenTests.suite())
self.addTests(chainTests.suite())
self.addTests(repeatTests.suite())
self.addTests(optionalTests.suite())
self.addTests(oneOfTests.suite())
self.addTests(omitTests.suite())
self.addTests(stateTests.suite())
self.addTests(grammarTests.suite())
self.addTests(generalTests.suite())
if __name__ == "__main__":
unittest.TextTestRunner(verbosity = 2).run(parsrTests.suite())
| |
import time
import os
from subprocess import Popen, PIPE
import re
import timenode
import subprocess
class TimeLog:
def __init__(self, args, logfilename=None):
self.dir = args.dir
self.systray = args.systray
self.logfilename = logfilename or time.strftime("%Y-%m-%d.log")
self.curfilename = "active"
self.load_tasks()
#33 #Taken from: http://stackoverflow.com/questions/3983946/get-active-window-title-in-x
def get_active_window_x11(self):
(id_w, err) = Popen(['xdotool', 'getwindowfocus'], stdout=PIPE).communicate()
pid = 0
name = "Unknown"
if id_w != None:
(res, err) = Popen(['xdotool','getwindowname',id_w], stdout=PIPE).communicate()
if res: name=res.rstrip();
(res, err) = Popen(['xdotool','getwindowpid',id_w], stdout=PIPE).communicate()
if res: pid=res.rstrip();
return (pid, name)
def get_active_window_win(self):
from win32gui import GetWindowText, GetForegroundWindow
from win32process import GetWindowThreadProcessId
hwnd = GetForegroundWindow()
tid, pid= GetWindowThreadProcessId(hwnd)
capt = GetWindowText(hwnd)
return (pid, capt)
def get_active_window(self):
if os.name=='nt':
return self.get_active_window_win()
else:
return self.get_active_window_x11()
def get_process_command_from_pid_win32(self, processid):
cmd = 'tasklist /fi "pid eq '+str(processid)+'" /fo csv'
# print cmd
# execute the command
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE,stderr=subprocess.STDOUT)
lines = proc.stdout.readlines()
if len(lines)>1:
line = lines[1]
print line
items=line.split(",")
if len(items)>=2:
pname = items[0][1:-1]
# print "PNAME="+pname
return pname
#cmdLine = str(line)
#file = cmdLine.strip().split(" ")[-1];
#split = file.split("\\"); #"
#command=split[1]
return "<unknown>"
def get_process_command_from_id(self, processid):
if processid == 0:
return ""
command="<unknown>"
if os.name=='nt':
command = self.get_process_command_from_pid_win32(processid)
else:
try:
command = os.readlink("/proc/%s/exe" % processid)
except OSError as e:
print e
print "command:"+command
return command
@staticmethod
def escape(str):
#Surround the string in double quotes and escape any pre-existing double quotes
return '"%s"' % str.replace("\"", "\\\"")
@staticmethod
def unescape(str):
#Remove surrounding double quotes and replace escaped double quotes
m = re.match("^\"(.*)\"$", a)
if m != None:
str = m.group(1)
return str.replace("\"", "\\\"")
@staticmethod
def get_current_time():
return "%d" % (time.time() * 1000)
def logtime(self,command = None,window=None):
command = command or self.currentcommand
window = window or self.currentwindow
try:os.mkdir(self.dir)
except:pass
f = open(os.path.join(self.dir,self.logfilename), 'a+')
moreinfo = ""
if re.search("chrom",command):
(moreinfo,err) = Popen(['chromix', 'list'], stdout=PIPE).communicate()
moreinfo = moreinfo
win2url = {}
for moreline in moreinfo.split('\n'):
moreitems = moreline.split(' ',2)
if len(moreitems)==3:
win2url[moreitems[2]]=moreitems[1]
window = window.replace(' - Google Chrome','').replace(' - Chromium','')
moreinfo = win2url.get(window) or ""
#print u'W=[{}]=[{}] W2U={}'.format(window,moreinfo,win2url)
window = unicode(window,'cp1251' if os.name=='nt' else 'utf-8').encode('utf-8')
tsk = self.query.find_task([TimeLog.get_current_time(),command,window,moreinfo],int(time.time() * 1000),10000000)
self.query.process([TimeLog.get_current_time(),command,window,moreinfo],int(time.time() * 1000),10000000)
self.logtask(tsk)
logstring = "{} {} {} {}\n".format(TimeLog.get_current_time(), TimeLog.escape(command), TimeLog.escape(window.replace("\n","|")), TimeLog.escape(moreinfo.strip()))
if f:
f.write(logstring)
f.flush()
f.close()
print ("{}->{}".format(logstring,tsk.tag if tsk else "n/a"))
@staticmethod
def fmt_delta_time(time):
time=int(time/1000)
days, remainder = divmod(time,3600*24)
hours, remainder = divmod(remainder, 3600)
minutes, seconds = divmod(remainder, 60)
#s1 = ' ' if days<1 else "%3dd "%days
s1=""
s1 = s1+'%02d:%02d:%02d' % (hours, minutes,seconds)
return s1
def load_tasks(self):
self.query = timenode.TimeQuery(False,False)
self.query.tasks = timenode.loadTasks(os.path.join(self.dir,"tasks.cat"))
self.query.tasks.children.add(timenode.TimeNode("*OTHER*",expr=["*OTHER*"]))
self.query.tasks.children.add(timenode.TimeNode("*AWAY*",expr=["*OTHER*"]))
@staticmethod
def fmt_delta_time_mins(time):
time=int(time/1000)
days, remainder = divmod(time,3600*24)
hours, remainder = divmod(remainder, 3600)
minutes, seconds = divmod(remainder, 60)
s1 = '%02d:%02d' % (minutes, seconds)
return s1
def logtask(self,tsk):
fname=os.path.join(self.dir, self.curfilename)
if not os.path.exists(fname):
self.load_tasks()
print "logtask ",tsk
self.query.sort_tasks()
s="["+(tsk.tag if tsk else "n/a")+"]:"
n=0
for t in sorted(self.query.tasks.children,key=lambda t:t.tqueue.sum(),reverse=True):
if t.time>0 and t.tag!="*OTHER*":
pct = 100*t.time/self.query.tasks.tqueue.sum()
s=s+"%2.0f%%%s:" %(pct,t.tag)
n+=1
if n>=3:
break
if len(s)>0:
#print "SNAPSHOT: "+s
f = open(fname,"wt")
f.write(s)
f.close();
if self.systray:
self.systray(s)
def get_idle_time(self):
if os.name == 'nt':
from win32api import GetLastInputInfo
t = GetLastInputInfo()
return t
else:
from actmon import get_ilde_time
return get_idle_time()
def monitor_active_window(self):
max_idle_timeout = 15
self.cur_idle = False
lastwindow = None
i = 0
try:
while True:
self.curpid, self.currentwindow = self.get_active_window()
print "CW {}".format(self.currentwindow)
if self.currentwindow != lastwindow:
self.currentcommand = self.get_process_command_from_id(self.curpid)
self.logtime()
lastwindow = self.currentwindow
idle = False
try:
idle = self.get_idle_time() > max_idle_timeout*1000
except:
pass
if idle!=self.cur_idle:
self.cur_idle=idle
if idle:
self.logtime("<idle>", "idle for %d"%max_idle_timeout )
else:
self.logtime()
time.sleep(2)
except KeyboardInterrupt:
TimeLog.logtime('#SIGTERM', 'TimeLog terminated')
print ""
| |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
import xml.dom.minidom as minidom
from pants.base.build_environment import get_buildroot
from pants.util.contextutil import temporary_dir
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class IdeaIntegrationTest(PantsRunIntegrationTest):
def _idea_test(self, specs, project_dir=os.path.join('.pants.d', 'idea', 'idea', 'IdeaGen'),
project_name=None, check_func=None, config=None):
"""Helper method that tests idea generation on the input spec list.
:param project_dir: directory passed to --idea-project-dir
:param project_name: name passed to --idea-project-name
:param check_func: method to call back with the directory where project files are written.
:param dict config: pants.ini configuration parameters
"""
project_dir = os.path.join(get_buildroot(), project_dir)
if not os.path.exists(project_dir):
os.makedirs(project_dir)
with temporary_dir(root_dir=project_dir) as project_dir_path:
extra_flags = ['--idea-project-dir={dir}'.format(dir=project_dir_path)]
if project_name is None:
project_name = "project" # to match Pants' built-in default w/o --idea-project-name
else:
extra_flags += ['--idea-project-name={name}'.format(name=project_name)]
all_flags = ['idea', '--no-open'] + specs + extra_flags
pants_run = self.run_pants(all_flags, config=config)
self.assert_success(pants_run)
expected_files = ('{project_name}.iml'.format(project_name=project_name),
'{project_name}.ipr'.format(project_name=project_name))
workdir = os.path.join(project_dir_path, project_name)
self.assertTrue(os.path.exists(workdir),
'exec ./pants {all_flags}. Failed to find project_dir at {dir}.'
.format(all_flags=" ".join(all_flags), dir=workdir))
self.assertTrue(all(os.path.exists(os.path.join(workdir, name))
for name in expected_files),
msg="Failed to exec ./pants {all_flags}".format(all_flags=all_flags))
if check_func:
check_func(workdir)
def _get_new_module_root_manager(self, dom):
module = dom.getElementsByTagName('module')[0]
components = module.getElementsByTagName('component')
for component in components:
if component.getAttribute('name') == 'NewModuleRootManager':
return module.getElementsByTagName('content')[0]
return None
def _get_sourceFolders(self, dom):
"""Navigate the dom to return the list of all <sourceFolder> entries in the project file"""
return self._get_new_module_root_manager(dom).getElementsByTagName('sourceFolder')
def _get_excludeFolders(self, dom):
"""Navigate the dom to return the list of all <excludeFolder> entries in the project file"""
return self._get_new_module_root_manager(dom).getElementsByTagName('excludeFolder')
# Testing IDEA integration on lots of different targets which require different functionalities to
# make sure that everything that needs to happen for idea gen does happen.
# TODO(Garrett Malmquist): Actually validate the contents of the project files, rather than just
# checking if they exist.
def test_idea_on_alternate_project_dir(self):
alt_dir = os.path.join('.pants.d', 'tmp', 'some', 'random', 'directory', 'for', 'idea', 'stuff')
self._idea_test(['examples/src/java/com/pants/examples/hello::'], project_dir=alt_dir)
def test_idea_alternate_name(self):
alt_name = "alt-name"
self._idea_test(['examples/src/java/com/pants/examples/hello::'], project_name=alt_name)
def test_idea_on_protobuf(self):
self._idea_test(['examples/src/java/com/pants/examples/protobuf::'])
def test_idea_on_jaxb(self): # Make sure it works without ::, pulling deps as necessary.
self._idea_test(['examples/src/java/com/pants/examples/jaxb/main'])
def test_idea_on_unicode(self):
self._idea_test(['testprojects/src/java/com/pants/testproject/unicode::'])
def test_idea_on_hello(self):
def do_check(path):
"""Check to see that the project contains the expected source folders."""
found_source_content = False
iml_file = os.path.join(path, 'project.iml')
self.assertTrue(os.path.exists(iml_file))
dom = minidom.parse(iml_file)
expected_paths = ["file://" + os.path.join(get_buildroot(), path) for path in [
'examples/src/java/com/pants/example/hello',
'examples/src/java/com/pants/examples/hello/greet',
'examples/src/java/com/pants/examples/hello/main',
'examples/src/resources/com/pants/example/hello',
]]
remaining = set(expected_paths)
for sourceFolder in self._get_sourceFolders(dom):
found_source_content = True
self.assertEquals("False", sourceFolder.getAttribute('isTestSource'))
url = sourceFolder.getAttribute('url')
self.assertIn(url, remaining,
msg="Couldn't find url={url} in {expected}".format(url=url,
expected=expected_paths))
remaining.remove(url)
self.assertTrue(found_source_content)
self._idea_test(['examples/src/java/com/pants/examples/hello::'], check_func=do_check)
def test_idea_on_annotations(self):
self._idea_test(['examples/src/java/com/pants/examples/annotation::'])
def test_idea_on_all_examples(self):
self._idea_test(['examples/src/java/com/pants/examples::'])
def _check_javadoc_and_sources(self, path, library_name):
"""
:param path: path to the idea project directory
:param library_name: name of the library to check for (e.g. guava)
"""
def _get_module_library_orderEntry(dom):
module = dom.getElementsByTagName('module')[0]
components = module.getElementsByTagName('component')
for component in components:
if component.getAttribute('name') == 'NewModuleRootManager':
for orderEntry in component.getElementsByTagName('orderEntry'):
if orderEntry.getAttribute('type') == 'module-library':
for library in orderEntry.getElementsByTagName('library'):
if library.getAttribute('name') == 'external':
return library
return None
iml_file = os.path.join(path, 'project.iml')
self.assertTrue(os.path.exists(iml_file))
dom = minidom.parse(iml_file)
libraryElement = _get_module_library_orderEntry(dom)
sources = libraryElement.getElementsByTagName('SOURCES')[0]
sources_found = False
roots = sources.getElementsByTagName('root')
for root in roots:
url = root.getAttribute('url')
if re.match(r'.*\bexternal-libsources\b.*{library_name}\b.*-sources\.jar\b.*$'
.format(library_name=library_name), url):
sources_found = True
break
self.assertTrue(sources_found)
javadoc = libraryElement.getElementsByTagName('JAVADOC')[0]
javadoc_found = False
for root in javadoc.getElementsByTagName('root'):
url = root.getAttribute('url')
if re.match(r'.*\bexternal-libjavadoc\b.*{library_name}\b.*-javadoc\.jar\b.*$'
.format(library_name=library_name), url):
javadoc_found = True
break
self.assertTrue(javadoc_found)
# NOTE(Garrett Malmquist): The test below assumes that the annotation example's dependency on
# guava will never be removed. If it ever is, these tests will need to be changed to check for a
# different 3rdparty jar library.
# Testing for:
# <orderEntry type="module-library">
# <library name="external">
# ...
# <JAVADOC>
# <root url="jar://$MODULE_DIR$/external-libjavadoc/guava-16.0-javadoc.jar!/" />
# </JAVADOC>
# <SOURCES>
# <root url="jar://$MODULE_DIR$/external-libsources/guava-16.0-sources.jar!/" />
# </SOURCES>
# </library>
# </orderEntry>
def test_idea_external_javadoc_and_sources(self):
def do_check(path):
self._check_javadoc_and_sources(path, 'guava')
self._idea_test(['examples/src/java/com/pants/examples/annotation::'],
check_func=do_check)
def test_idea_on_java_sources(self):
self._idea_test(['testprojects/src/scala/com/pants/testproject/javasources::'])
def test_idea_missing_sources(self):
"""Test what happens if we try to fetch sources from a jar that doesn't have any."""
self._idea_test(['testprojects/src/java/com/pants/testproject/missing_sources'])
def test_idea_on_thriftdeptest(self):
self._idea_test(['testprojects/src/java/com/pants/testproject/thriftdeptest::'])
def test_idea_on_scaladepsonboth(self):
self._idea_test(['testprojects/src/scala/com/pants/testproject/scaladepsonboth::'])
def test_idea_on_maven_layout(self):
def do_check(path):
"""
The contents of the .iml file should have sourceFolder entries that all look like:
<sourceFolder url=".../src/main/java" isTestSource="False"/>
<sourceFolder url=".../src/main/resources" isTestSource="False"/>
<sourceFolder url=".../src/test/java" isTestSource="True"/>
<sourceFolder url=".../src/test/resources" isTestSource="True"/>
...
"""
found_source_content = False
iml_file = os.path.join(path, 'project.iml')
self.assertTrue(os.path.exists(iml_file))
dom = minidom.parse(iml_file)
for sourceFolder in self._get_sourceFolders(dom):
found_source_content = True
url = sourceFolder.getAttribute('url')
is_test_source = sourceFolder.getAttribute('isTestSource')
if url.endswith("src/main/java") or url.endswith("src/main/resources"):
self.assertEquals("False", is_test_source,
msg="wrong test flag: url={url} isTestSource={is_test_source}"
.format(url=url, is_test_source=is_test_source))
elif url.endswith("src/test/java") or url.endswith("src/test/resources"):
self.assertEquals("True", is_test_source,
msg="wrong test flag: url={url} isTestSource={is_test_source}"
.format(url=url, is_test_source=is_test_source))
else:
self.fail("Unexpected sourceContent tag: url={url} isTestSource={is_test_source}"
.format(url=url, is_test_source=is_test_source))
self.assertTrue(found_source_content)
self._idea_test(['testprojects/maven_layout/resource_collision::', '--idea-use-source-root',
'--idea-infer-test-from-siblings',],
check_func=do_check)
def test_idea_exclude_maven_targets(self):
def do_check(path):
"""Expect to see at least these two excludeFolder entries:
<excludeFolder url="file://.../testprojects/maven_layout/protolib-test/target" />
<excludeFolder url="file://.../testprojects/maven_layout/maven_and_pants/target" />
And this source entry:
<sourceFolder url="file://.../testprojects/maven_layout/maven_and_pants/src/main/java"
isTestSource="False" />
"""
found_source_content = False
iml_file = os.path.join(path, 'project.iml')
self.assertTrue(os.path.exists(iml_file))
dom = minidom.parse(iml_file)
for sourceFolder in self._get_sourceFolders(dom):
found_source_content = True
url = sourceFolder.getAttribute('url')
self.assertTrue(url.endswith("testprojects/maven_layout/maven_and_pants/src/main/java"),
msg="Unexpected url={url}".format(url=url))
self.assertEquals("False", sourceFolder.getAttribute('isTestSource'))
self.assertTrue(found_source_content)
expected = ["testprojects/maven_layout/protolib-test/target",
"testprojects/maven_layout/maven_and_pants/target"]
found_exclude_folders = [excludeFolder.getAttribute('url')
for excludeFolder in self._get_excludeFolders(dom)]
for suffix in expected:
found = False
for url in found_exclude_folders:
if url.endswith(suffix):
found = True
break
self.assertTrue(found, msg="suffix {suffix} not found in {foundExcludeFolders}"
.format(suffix=suffix, foundExcludeFolders=found_exclude_folders))
# Test together with --idea-use-source-root because that makes sense in a Maven environment
self._idea_test(['testprojects/maven_layout/maven_and_pants::', '--idea-exclude-maven-target',
'--idea-use-source-root',],
check_func=do_check)
def test_idea_excludeFolders(self):
def assertExpectedInExcludeFolders(path, expected):
iml_file = os.path.join(path, 'project.iml')
self.assertTrue(os.path.exists(iml_file))
dom = minidom.parse(iml_file)
found_exclude_folders = [excludeFolder.getAttribute('url')
for excludeFolder in self._get_excludeFolders(dom)]
for suffix in expected:
found = False
for url in found_exclude_folders:
if url.endswith(suffix):
found = True
break
self.assertTrue(found, msg="suffix {suffix} not found in {foundExcludeFolders}"
.format(suffix=suffix, foundExcludeFolders=found_exclude_folders))
def do_check_default(path):
assertExpectedInExcludeFolders(path, ["/compile", "/ivy", "/python", "/resources"])
def do_check_override(path):
assertExpectedInExcludeFolders(path, ["exclude-folder-sentinel"])
self._idea_test(['examples/src/java/com/pants/examples/hello::'], check_func=do_check_default)
self._idea_test(['examples/src/java/com/pants/examples/hello::'], check_func=do_check_override,
config= {
'idea': {'exclude_folders': ['exclude-folder-sentinel']}
})
def test_all_targets(self):
self._idea_test(['src::', 'tests::', 'examples::', 'testprojects::',
# The android targets won't work if the Android ADK is not installed
'--exclude-target-regexp=.*android.*',])
| |
#!/usr/bin/env python
"""EMR cost calculator
Usage:
emr_cost_calculator.py total --region=<reg> \
--created_after=<ca> --created_before=<cb> \
[--aws_access_key_id=<ai> --aws_secret_access_key=<ak>]
emr_cost_calculator.py cluster --region=<reg> --cluster_id=<ci> \
[--aws_access_key_id=<ai> --aws_secret_access_key=<ak>]
emr_cost_calculator.py -h | --help
Options:
-h --help Show this screen
total Calculate the total EMR cost \
for a period of time
cluster Calculate the cost of single \
cluster given the cluster id
--region=<reg> The aws region that the \
cluster was launched on
--aws_access_key_id=<ai> Self-explanatory
--aws_secret_access_key=<ci> Self-explanatory
--created_after=<ca> The calculator will compute \
the cost for all the cluster created after the created_after day
--created_before=<cb> The calculator will compute \
the cost for all the cluster created before the created_before day
--cluster_id=<ci> The id of the cluster you want to \
calculate the cost for
"""
from docopt import docopt
import boto.emr
from retrying import retry
import sys
import time
import math
import yaml
import datetime
config = yaml.load(open('config.yml', 'r'))
prices = config['prices']
def validate_date(date_text):
try:
return datetime.datetime.strptime(date_text, '%Y-%m-%d')
except ValueError:
raise ValueError('Incorrect data format, should be YYYY-MM-DD')
def retry_if_EmrResponseError(exception):
"""
Use this function in order to back off only
on EmrResponse errors and not in other exceptions
"""
return isinstance(exception, boto.exception.EmrResponseError)
class Ec2Instance:
def __init__(self, creation_ts, termination_ts, instance_price):
self.lifetime = self._get_lifetime(creation_ts, termination_ts)
self.cost = self.lifetime * instance_price
@staticmethod
def _parse_dates(creation_ts, termination_ts):
"""
:param creation_ts: the creation time string
:param termination_ts: the termination time string
:return: the lifetime of a single instance in hours
"""
date_format = '%Y-%m-%dT%H:%M:%S.%fZ'
creation_ts = \
time.mktime(time.strptime(creation_ts, date_format))
termination_ts = \
time.mktime(time.strptime(termination_ts, date_format))
return creation_ts, termination_ts
def _get_lifetime(self, creation_ts, termination_ts):
"""
:param creation_ts: the creation time string
:param termination_ts: the termination time string
:return: the lifetime of a single instance in hours
"""
(creation_ts, termination_ts) = \
Ec2Instance._parse_dates(creation_ts, termination_ts)
return math.ceil((termination_ts - creation_ts) / 3600)
class InstanceGroup:
def __init__(self, group_id, instance_type, group_type):
self.group_id = group_id
self.instance_type = instance_type
self.group_type = group_type
self.price = 0
class EmrCostCalculator:
def __init__(
self,
region,
aws_access_key_id=None,
aws_secret_access_key=None
):
try:
print >> sys.stderr, \
'[INFO] Retrieving cost in region %s' \
% (region)
self.conn = \
boto.emr.connect_to_region(
region,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key
)
self.spot_used = False
except:
print >> sys.stderr, \
'[ERROR] Could not establish connection with EMR api'
def get_total_cost_by_dates(self, created_after, created_before):
total_cost = 0
for cluster_id in \
self._get_cluster_list(created_after, created_before):
cost_dict = self.get_cluster_cost(cluster_id)
total_cost += cost_dict['TOTAL']
return total_cost
@retry(
wait_exponential_multiplier=1000,
wait_exponential_max=7000,
retry_on_exception=retry_if_EmrResponseError
)
def get_cluster_cost(self, cluster_id):
"""
Joins the information from the instance groups and the instances
in order to calculate the price of the whole cluster
It is important that we use a backoff policy in this case since Amazon
throttles the number of API requests.
:return: A dictionary with the total cost of the cluster and the
individual cost of each instance group (Master, Core, Task)
"""
instance_groups = self._get_instance_groups(cluster_id)
cost_dict = {}
for instance_group in instance_groups:
for instance in self._get_instances(instance_group, cluster_id):
cost_dict.setdefault(instance_group.group_type, 0)
cost_dict[instance_group.group_type] += instance.cost
cost_dict.setdefault('TOTAL', 0)
cost_dict['TOTAL'] += instance.cost
return EmrCostCalculator._sanitise_floats(cost_dict)
@staticmethod
def _sanitise_floats(aDict):
"""
Round the values to 3 decimals.
#Did it this way to avoid
https://docs.python.org/2/tutorial/floatingpoint.html#representation-error
"""
for key in aDict:
aDict[key] = round(aDict[key], 3)
return aDict
def _get_cluster_list(self, created_after, created_before):
"""
:return: An iterator of cluster ids for the specified dates
"""
marker = None
while True:
cluster_list = \
self.conn.list_clusters(
created_after,
created_before,
marker=marker
)
for cluster in cluster_list.clusters:
yield cluster.id
try:
marker = cluster_list.marker
except AttributeError:
break
def _get_instance_groups(self, cluster_id):
"""
Invokes the EMR api and gets a list of the cluster's instance groups.
:return: List of our custom InstanceGroup objects
"""
groups = self.conn.list_instance_groups(cluster_id).instancegroups
instance_groups = []
for group in groups:
inst_group = InstanceGroup(
group.id,
group.instancetype,
group.instancegrouptype
)
# If is is a spot instance get the bidprice
if group.market == 'SPOT':
inst_group.price = float(group.bidprice)
else:
inst_group.price = prices[group.instancetype]['ec2'] + \
prices[group.instancetype]['emr']
instance_groups.append(inst_group)
return instance_groups
def _get_instances(self, instance_group, cluster_id):
"""
Invokes the EMR api to retrieve a list of all the instances
that were used in the cluster.
This list is then joind to the InstanceGroup list
on the instance group id
:return: An iterator of our custom Ec2Instance objects.
"""
instance_list = []
marker = None
while True:
batch = self.conn.list_instances(
cluster_id,
instance_group.group_id,
marker=marker
)
instance_list.extend(batch.instances)
try:
marker = batch.marker
except AttributeError:
break
for instance_info in instance_list:
try:
end_date_time = datetime.datetime \
.now() \
.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
if hasattr(instance_info.status.timeline, 'enddatetime'):
end_date_time = instance_info.status.timeline.enddatetime
inst = Ec2Instance(
instance_info.status.timeline.creationdatetime,
end_date_time,
instance_group.price
)
yield inst
except AttributeError as e:
print >> sys.stderr, \
'[WARN] Error when computing instance cost. Cluster: %s'\
% cluster_id
print >> sys.stderr, e
if __name__ == '__main__':
args = docopt(__doc__)
if args.get('total'):
created_after = validate_date(args.get('--created_after'))
created_before = validate_date(args.get('--created_before'))
calc = EmrCostCalculator(
args.get('--region'),
args.get('--aws_access_key_id'),
args.get('--aws_secret_access_key')
)
print calc.get_total_cost_by_dates(created_after, created_before)
elif args.get('cluster'):
calc = EmrCostCalculator(
args.get('--region'),
args.get('--aws_access_key_id'),
args.get('--aws_secret_access_key')
)
print calc.get_cluster_cost(args.get('--cluster_id'))
else:
print >> sys.stderr, \
'[ERROR] Invalid operation, please check usage again'
| |
from django.core.exceptions import ValidationError
from django.test import TestCase
from bestiary import models
class Rune(models.Rune):
class Meta:
abstract = True
@classmethod
def stub(cls, **kwargs):
defaults = {
'type': Rune.TYPE_ENERGY,
'stars': 6,
'level': 0,
'slot': 2,
'main_stat': Rune.STAT_HP_PCT,
'innate_stat': None,
'innate_stat_value': None,
'substats': [],
'substat_values': [],
'substats_enchanted': [],
'substats_grind_value': [],
}
defaults.update(kwargs)
rune = cls(**defaults)
rune.update_fields()
return rune
class Attributes(TestCase):
def test_stars_too_high_when_cleaning(self):
rune = Rune.stub()
rune.stars = 9
with self.assertRaises(ValidationError) as cm:
rune.clean()
self.assertIn('stars', cm.exception.error_dict)
self.assertEqual(cm.exception.error_dict['stars'][0].code, 'stars_invalid')
def test_stars_too_low_when_cleaning(self):
rune = Rune.stub()
rune.stars = 0
with self.assertRaises(ValidationError) as cm:
rune.clean()
self.assertIn('stars', cm.exception.error_dict)
self.assertEqual(cm.exception.error_dict['stars'][0].code, 'stars_invalid')
def test_stars_missing_when_cleaning(self):
rune = Rune.stub()
rune.stars = None
with self.assertRaises(ValidationError) as cm:
rune.clean()
self.assertIn('stars', cm.exception.error_dict)
self.assertEqual(cm.exception.error_dict['stars'][0].code, 'stars_missing')
def test_level_too_high_when_cleaning(self):
rune = Rune.stub()
rune.level = 20
with self.assertRaises(ValidationError) as cm:
rune.clean()
self.assertIn('level', cm.exception.error_dict)
self.assertEqual(cm.exception.error_dict['level'][0].code, 'level_invalid')
def test_level_too_low_when_cleaning(self):
rune = Rune.stub()
rune.level = -1
with self.assertRaises(ValidationError) as cm:
rune.clean()
self.assertIn('level', cm.exception.error_dict)
self.assertEqual(cm.exception.error_dict['level'][0].code, 'level_invalid')
def test_level_missing_when_cleaning(self):
rune = Rune.stub()
rune.level = None
with self.assertRaises(ValidationError) as cm:
rune.clean()
self.assertIn('level', cm.exception.error_dict)
self.assertEqual(cm.exception.error_dict['level'][0].code, 'level_missing')
def test_slot_too_high_when_cleaning(self):
rune = Rune.stub()
rune.slot = 9
with self.assertRaises(ValidationError) as cm:
rune.clean()
self.assertIn('slot', cm.exception.error_dict)
self.assertEqual(cm.exception.error_dict['slot'][0].code, 'slot_invalid')
def test_slot_too_low_when_cleaning(self):
rune = Rune.stub()
rune.slot = 0
with self.assertRaises(ValidationError) as cm:
rune.clean()
self.assertIn('slot', cm.exception.error_dict)
self.assertEqual(cm.exception.error_dict['slot'][0].code, 'slot_invalid')
def test_slot_missing_when_cleaning(self):
rune = Rune.stub()
rune.slot = None
with self.assertRaises(ValidationError) as cm:
rune.clean()
self.assertIn('slot', cm.exception.error_dict)
self.assertEqual(cm.exception.error_dict['slot'][0].code, 'slot_missing')
def test_level_too_low_when_enchant_applied(self):
rune = Rune.stub(
level=11,
substats=[Rune.STAT_RESIST_PCT, Rune.STAT_ACCURACY_PCT, Rune.STAT_HP, Rune.STAT_ATK],
substat_values=[4, 4, 4, 4],
substats_enchanted=[True, False, False, False],
)
with self.assertRaises(ValidationError) as cm:
rune.clean()
self.assertIn('level', cm.exception.error_dict)
self.assertEqual(cm.exception.error_dict['level'][0].code, 'level_invalid')
def test_quality_normal(self):
rune = Rune.stub()
self.assertEqual(rune.quality, Rune.QUALITY_NORMAL)
def test_quality_magic(self):
rune = Rune.stub(
substats=[Rune.STAT_ATK],
substat_values=[4]
)
self.assertEqual(rune.quality, Rune.QUALITY_MAGIC)
def test_quality_rare(self):
rune = Rune.stub(
substats=[Rune.STAT_ATK, Rune.STAT_DEF],
substat_values=[4, 4, 4]
)
self.assertEqual(rune.quality, Rune.QUALITY_RARE)
def test_quality_hero(self):
rune = Rune.stub(
substats=[Rune.STAT_ATK, Rune.STAT_DEF, Rune.STAT_ATK_PCT],
substat_values=[4, 4, 4]
)
self.assertEqual(rune.quality, Rune.QUALITY_HERO)
def test_quality_legend(self):
rune = Rune.stub(
substats=[Rune.STAT_ATK, Rune.STAT_DEF, Rune.STAT_ATK_PCT, Rune.STAT_DEF_PCT],
substat_values=[4, 4, 4, 4]
)
self.assertEqual(rune.quality, Rune.QUALITY_LEGEND)
def test_one_enchant_gem_applied(self):
rune = Rune.stub(
level=12,
substats=[Rune.STAT_ATK, Rune.STAT_DEF, Rune.STAT_ATK_PCT, Rune.STAT_DEF_PCT],
substat_values=[4, 4, 4, 4],
substats_enchanted=[True, False, False, False],
)
try:
rune.clean()
except ValidationError:
self.fail()
def test_too_many_enchant_gems_applied(self):
rune = Rune.stub(
level=12,
substats=[Rune.STAT_ATK, Rune.STAT_DEF, Rune.STAT_ATK_PCT, Rune.STAT_DEF_PCT],
substat_values=[4, 4, 4, 4],
substats_enchanted=[True, True, False, False],
)
with self.assertRaises(ValidationError) as cm:
rune.clean()
self.assertIn('substats_enchanted', cm.exception.error_dict)
self.assertEqual(cm.exception.error_dict['substats_enchanted'][0].code, 'too_many_enchants')
def test_does_not_have_enchant_gem_applied(self):
rune = Rune.stub()
self.assertFalse(rune.has_gem)
def test_does_have_enchant_gem_applied(self):
rune = Rune.stub(
level=12,
substats=[Rune.STAT_ATK, Rune.STAT_DEF, Rune.STAT_ATK_PCT, Rune.STAT_DEF_PCT],
substat_values=[4, 4, 4, 4],
substats_enchanted=[True, False, False, False],
)
self.assertTrue(rune.has_gem)
def test_does_not_have_grind_applied(self):
rune = Rune.stub()
self.assertFalse(rune.has_grind)
def test_does_have_grind_applied(self):
rune = Rune.stub(
level=12,
substats=[Rune.STAT_ATK, Rune.STAT_DEF, Rune.STAT_ATK_PCT, Rune.STAT_DEF_PCT],
substat_values=[4, 4, 4, 4],
substats_grind_value=[4, 0, 0, 0],
)
self.assertTrue(rune.has_grind)
class Stats(TestCase):
def test_duplicate_stats_main_and_innate(self):
rune = Rune.stub(
main_stat=Rune.STAT_HP_PCT,
innate_stat=Rune.STAT_HP_PCT,
innate_stat_value=8,
)
with self.assertRaises(ValidationError) as cm:
rune.clean()
self.assertEqual(cm.exception.code, 'duplicate_stats')
def test_duplicate_stats_main_and_sub(self):
rune = Rune.stub(
main_stat=Rune.STAT_HP_PCT,
substats=[Rune.STAT_HP_PCT],
substat_values=[8],
)
with self.assertRaises(ValidationError) as cm:
rune.clean()
self.assertEqual(cm.exception.code, 'duplicate_stats')
def test_duplicate_stats_innate_and_sub(self):
rune = Rune.stub(
main_stat=Rune.STAT_ATK_PCT,
innate_stat=Rune.STAT_HP_PCT,
innate_stat_value=8,
substats=[Rune.STAT_HP_PCT],
substat_values=[8],
)
with self.assertRaises(ValidationError) as cm:
rune.clean()
self.assertEqual(cm.exception.code, 'duplicate_stats')
def test_main_stat_value_auto_populated(self):
rune = Rune.stub()
self.assertIsNotNone(rune.main_stat_value)
self.assertGreater(rune.main_stat_value, 0)
def test_main_stat_value_capped(self):
rune = Rune.stub(
main_stat_value=99999,
)
self.assertEqual(
rune.main_stat_value,
Rune.MAIN_STAT_VALUES[rune.main_stat][rune.stars][rune.level]
)
def test_main_stat_value_missing_when_cleaning(self):
rune = Rune.stub()
rune.main_stat_value = None # rune.update_fields() sets main stat value, so reset it here
with self.assertRaises(ValidationError) as cm:
rune.clean()
self.assertIn('main_stat_value', cm.exception.error_dict)
self.assertEqual(cm.exception.error_dict['main_stat_value'][0].code, 'main_stat_missing')
def test_main_stat_value_exception_when_cleaning(self):
rune = Rune.stub()
rune.main_stat_value = 99999 # rune.update_fields() sets main stat value, so reset it here
with self.assertRaises(ValidationError) as cm:
rune.clean()
self.assertIn('main_stat_value', cm.exception.error_dict)
self.assertEqual(cm.exception.error_dict['main_stat_value'][0].code, 'main_stat_too_high')
def test_main_stat_slot_1_invalid_stat(self):
for invalid_stat in [
Rune.STAT_HP,
Rune.STAT_HP_PCT,
Rune.STAT_ATK_PCT,
Rune.STAT_DEF,
Rune.STAT_DEF_PCT,
Rune.STAT_SPD,
Rune.STAT_CRIT_RATE_PCT,
Rune.STAT_CRIT_DMG_PCT,
Rune.STAT_RESIST_PCT,
Rune.STAT_ACCURACY_PCT,
]:
rune = Rune.stub(slot=1, main_stat=invalid_stat)
with self.assertRaises(ValidationError) as cm:
rune.clean()
self.assertIn('main_stat', cm.exception.error_dict)
self.assertEqual(cm.exception.error_dict['main_stat'][0].code, 'invalid_main_stat_for_slot')
def test_main_stat_slot_2_invalid_stat(self):
for invalid_stat in [
Rune.STAT_CRIT_RATE_PCT,
Rune.STAT_CRIT_DMG_PCT,
Rune.STAT_RESIST_PCT,
Rune.STAT_ACCURACY_PCT,
]:
rune = Rune.stub(slot=2, main_stat=invalid_stat)
with self.assertRaises(ValidationError) as cm:
rune.clean()
self.assertIn('main_stat', cm.exception.error_dict)
self.assertEqual(cm.exception.error_dict['main_stat'][0].code, 'invalid_main_stat_for_slot')
def test_main_stat_slot_3_invalid_stat(self):
for invalid_stat in [
Rune.STAT_HP,
Rune.STAT_HP_PCT,
Rune.STAT_ATK,
Rune.STAT_ATK_PCT,
Rune.STAT_DEF_PCT,
Rune.STAT_SPD,
Rune.STAT_CRIT_RATE_PCT,
Rune.STAT_CRIT_DMG_PCT,
Rune.STAT_RESIST_PCT,
Rune.STAT_ACCURACY_PCT,
]:
rune = Rune.stub(slot=3, main_stat=invalid_stat)
with self.assertRaises(ValidationError) as cm:
rune.clean()
self.assertIn('main_stat', cm.exception.error_dict)
self.assertEqual(cm.exception.error_dict['main_stat'][0].code, 'invalid_main_stat_for_slot')
def test_main_stat_slot_4_invalid_stat(self):
for invalid_stat in [
Rune.STAT_SPD,
Rune.STAT_RESIST_PCT,
Rune.STAT_ACCURACY_PCT,
]:
rune = Rune.stub(slot=4, main_stat=invalid_stat)
with self.assertRaises(ValidationError) as cm:
rune.clean()
self.assertIn('main_stat', cm.exception.error_dict)
self.assertEqual(cm.exception.error_dict['main_stat'][0].code, 'invalid_main_stat_for_slot')
def test_main_stat_slot_5_invalid_stat(self):
for invalid_stat in [
Rune.STAT_HP_PCT,
Rune.STAT_ATK,
Rune.STAT_ATK_PCT,
Rune.STAT_DEF,
Rune.STAT_DEF_PCT,
Rune.STAT_SPD,
Rune.STAT_CRIT_RATE_PCT,
Rune.STAT_CRIT_DMG_PCT,
Rune.STAT_RESIST_PCT,
Rune.STAT_ACCURACY_PCT,
]:
rune = Rune.stub(slot=5, main_stat=invalid_stat)
with self.assertRaises(ValidationError) as cm:
rune.clean()
self.assertIn('main_stat', cm.exception.error_dict)
self.assertEqual(cm.exception.error_dict['main_stat'][0].code, 'invalid_main_stat_for_slot')
def test_main_stat_slot_6_invalid_stat(self):
for invalid_stat in [
Rune.STAT_SPD,
Rune.STAT_CRIT_RATE_PCT,
Rune.STAT_CRIT_DMG_PCT,
]:
rune = Rune.stub(slot=6, main_stat=invalid_stat)
with self.assertRaises(ValidationError) as cm:
rune.clean()
self.assertIn('main_stat', cm.exception.error_dict)
self.assertEqual(cm.exception.error_dict['main_stat'][0].code, 'invalid_main_stat_for_slot')
def test_innate_stat_specified_but_value_missing_when_cleaning(self):
rune = Rune.stub(
innate_stat=Rune.STAT_HP,
innate_stat_value=0,
)
rune.innate_stat_value = None
with self.assertRaises(ValidationError) as cm:
rune.clean()
self.assertIn('innate_stat_value', cm.exception.error_dict)
self.assertEqual(cm.exception.error_dict['innate_stat_value'][0].code, 'innate_stat_missing')
def test_innate_stat_value_capped(self):
rune = Rune.stub(
innate_stat=Rune.STAT_HP,
innate_stat_value=99999,
)
self.assertEqual(rune.innate_stat_value, Rune.SUBSTAT_INCREMENTS[Rune.STAT_HP][rune.stars])
def test_innate_stat_value_too_large_when_cleaning(self):
rune = Rune.stub(
innate_stat=Rune.STAT_HP,
innate_stat_value=0,
)
rune.innate_stat_value = 999 # rune.update_fields() caps it, so reset it here
with self.assertRaises(ValidationError) as cm:
rune.clean()
self.assertIn('innate_stat_value', cm.exception.error_dict)
self.assertEqual(cm.exception.error_dict['innate_stat_value'][0].code, 'innate_stat_too_high')
def test_innate_stat_value_too_small_when_cleaning(self):
rune = Rune.stub(
innate_stat=Rune.STAT_HP,
innate_stat_value=0,
)
with self.assertRaises(ValidationError) as cm:
rune.clean()
self.assertIn('innate_stat_value', cm.exception.error_dict)
self.assertEqual(cm.exception.error_dict['innate_stat_value'][0].code, 'innate_stat_too_low')
def test_substat_arrays_always_same_length(self):
rune = Rune.stub(
substats=[
Rune.STAT_HP,
Rune.STAT_ATK,
Rune.STAT_CRIT_DMG_PCT,
],
substat_values=[4, 4, 4]
)
rune.clean()
self.assertEqual(len(rune.substats), 3)
self.assertEqual(len(rune.substat_values), 3)
self.assertEqual(len(rune.substats_enchanted), 3)
self.assertEqual(len(rune.substats_grind_value), 3)
def test_substat_enchanted_defaults_false(self):
rune = Rune.stub(
substats=[
Rune.STAT_HP,
Rune.STAT_ATK,
Rune.STAT_CRIT_DMG_PCT,
],
substat_values=[4, 4, 4]
)
rune.clean()
self.assertFalse(any(rune.substats_enchanted))
def test_substat_grind_value_defaults_zero(self):
rune = Rune.stub(
substats=[
Rune.STAT_HP,
Rune.STAT_ATK,
Rune.STAT_CRIT_DMG_PCT,
],
substat_values=[4, 4, 4]
)
rune.clean()
self.assertEqual(sum(rune.substats_grind_value), 0)
def test_substat_grind_value_maximum_enforced(self):
rune = Rune.stub(
substats=[
Rune.STAT_HP,
Rune.STAT_ATK,
Rune.STAT_CRIT_DMG_PCT,
],
substat_values=[4, 4, 4],
substats_grind_value=[4, 99, 4],
)
with self.assertRaises(ValidationError) as cm:
rune.clean()
self.assertIn('substats_grind_value', cm.exception.error_dict)
self.assertEqual(cm.exception.error_dict['substats_grind_value'][0].code, 'grind_too_high')
def test_no_substat_upgrades_received(self):
rune = Rune.stub(level=0)
self.assertEqual(rune.substat_upgrades_received, 0)
self.assertEqual(rune.substat_upgrades_remaining, 4)
def test_one_substat_upgrades_received(self):
rune = Rune.stub(level=3)
self.assertEqual(rune.substat_upgrades_received, 1)
self.assertEqual(rune.substat_upgrades_remaining, 3)
def test_two_substat_upgrades_received(self):
rune = Rune.stub(level=6)
self.assertEqual(rune.substat_upgrades_received, 2)
self.assertEqual(rune.substat_upgrades_remaining, 2)
def test_three_substat_upgrades_received(self):
rune = Rune.stub(level=9)
self.assertEqual(rune.substat_upgrades_received, 3)
self.assertEqual(rune.substat_upgrades_remaining, 1)
def test_all_substat_upgrades_received(self):
rune = Rune.stub(level=12)
self.assertEqual(rune.substat_upgrades_received, 4)
self.assertEqual(rune.substat_upgrades_remaining, 0)
def test_lv0_required_number_of_substats(self):
rune = Rune.stub(level=0)
try:
rune.clean()
except ValidationError:
self.fail()
def test_lv3_not_enough_substats(self):
rune = Rune.stub(level=3, substats=[], substat_values=[])
with self.assertRaises(ValidationError) as cm:
rune.clean()
self.assertIn('substats', cm.exception.error_dict)
self.assertEqual(cm.exception.error_dict['substats'][0].code, 'not_enough_substats')
def test_lv3_enough_substats(self):
rune = Rune.stub(
level=3,
substats=[Rune.STAT_HP],
substat_values=[4],
)
try:
rune.clean()
except ValidationError:
self.fail()
def test_lv6_not_enough_substats(self):
rune = Rune.stub(
level=6,
substats=[Rune.STAT_HP],
substat_values=[4]
)
with self.assertRaises(ValidationError) as cm:
rune.clean()
self.assertIn('substats', cm.exception.error_dict)
self.assertEqual(cm.exception.error_dict['substats'][0].code, 'not_enough_substats')
def test_lv6_enough_substats(self):
rune = Rune.stub(
level=3,
substats=[Rune.STAT_HP, Rune.STAT_ACCURACY_PCT],
substat_values=[4, 4],
)
try:
rune.clean()
except ValidationError:
self.fail()
def test_lv9_not_enough_substats(self):
rune = Rune.stub(
level=9,
substats=[Rune.STAT_HP, Rune.STAT_ACCURACY_PCT],
substat_values=[4, 4]
)
with self.assertRaises(ValidationError) as cm:
rune.clean()
self.assertIn('substats', cm.exception.error_dict)
self.assertEqual(cm.exception.error_dict['substats'][0].code, 'not_enough_substats')
def test_lv9_enough_substats(self):
rune = Rune.stub(
level=9,
substats=[Rune.STAT_HP, Rune.STAT_ACCURACY_PCT, Rune.STAT_ATK],
substat_values=[4, 4, 4],
)
try:
rune.clean()
except ValidationError:
self.fail()
def test_lv12_not_enough_substats(self):
rune = Rune.stub(
level=12,
substats=[Rune.STAT_HP, Rune.STAT_ACCURACY_PCT, Rune.STAT_ATK],
substat_values=[4, 4, 4]
)
with self.assertRaises(ValidationError) as cm:
rune.clean()
self.assertIn('substats', cm.exception.error_dict)
self.assertEqual(cm.exception.error_dict['substats'][0].code, 'not_enough_substats')
def test_lv12_enough_substats(self):
rune = Rune.stub(
level=12,
substats=[Rune.STAT_HP, Rune.STAT_ACCURACY_PCT, Rune.STAT_ATK, Rune.STAT_DEF],
substat_values=[4, 4, 4, 4],
)
try:
rune.clean()
except ValidationError:
self.fail()
def test_substat_values_limited_no_upgrades_received(self):
rune = Rune.stub(
level=0,
stars=6,
substats=[Rune.STAT_ATK_PCT],
substat_values=[999]
)
self.assertEqual(rune.substat_values[0], Rune.SUBSTAT_INCREMENTS[Rune.STAT_ATK_PCT][rune.stars])
def test_substat_values_limited_all_upgrades_received(self):
rune = Rune.stub(
level=12,
stars=6,
substats=[Rune.STAT_ATK_PCT, Rune.STAT_ATK, Rune.STAT_DEF, Rune.STAT_DEF_PCT],
substat_values=[999, 4, 4, 4]
)
self.assertEqual(rune.substat_values[0], Rune.SUBSTAT_INCREMENTS[Rune.STAT_ATK_PCT][rune.stars] * 5)
def test_substat_value_limit_when_cleaning(self):
rune = Rune.stub(
level=0,
stars=6,
substats=[Rune.STAT_ATK_PCT],
substat_values=[0],
)
rune.substat_values[0] = 999
with self.assertRaises(ValidationError) as cm:
rune.clean()
self.assertIn('substat_values', cm.exception.error_dict)
self.assertEqual(cm.exception.error_dict['substat_values'][0].code, 'substat_too_high')
def test_has_hp_flat(self):
rune = Rune.stub(substats=[Rune.STAT_HP], substat_values=[0])
self.assertTrue(rune.has_hp)
def test_has_hp_pct(self):
rune = Rune.stub(substats=[Rune.STAT_HP_PCT], substat_values=[0])
self.assertTrue(rune.has_hp)
def test_has_atk_flat(self):
rune = Rune.stub(substats=[Rune.STAT_ATK], substat_values=[0])
self.assertTrue(rune.has_atk)
def test_has_atk_pct(self):
rune = Rune.stub(substats=[Rune.STAT_ATK_PCT], substat_values=[0])
self.assertTrue(rune.has_atk)
def test_has_def_flat(self):
rune = Rune.stub(substats=[Rune.STAT_DEF], substat_values=[0])
self.assertTrue(rune.has_def)
def test_has_def_pct(self):
rune = Rune.stub(substats=[Rune.STAT_DEF_PCT], substat_values=[0])
self.assertTrue(rune.has_def)
def test_has_crit_rate(self):
rune = Rune.stub(substats=[Rune.STAT_CRIT_RATE_PCT], substat_values=[0])
self.assertTrue(rune.has_crit_rate)
def test_has_crit_dmg(self):
rune = Rune.stub(substats=[Rune.STAT_CRIT_DMG_PCT], substat_values=[0])
self.assertTrue(rune.has_crit_dmg)
def test_has_speed(self):
rune = Rune.stub(substats=[Rune.STAT_SPD], substat_values=[0])
self.assertTrue(rune.has_speed)
def test_has_resistance(self):
rune = Rune.stub(substats=[Rune.STAT_RESIST_PCT], substat_values=[0])
self.assertTrue(rune.has_resist)
def test_has_accurracy(self):
rune = Rune.stub(substats=[Rune.STAT_ACCURACY_PCT], substat_values=[0])
self.assertTrue(rune.has_accuracy)
def test_main_stat_sets_has_flag(self):
rune = Rune.stub(main_stat=Rune.STAT_HP)
self.assertTrue(rune.has_hp)
def test_innate_stat_sets_has_flag(self):
rune = Rune.stub(innate_stat=Rune.STAT_HP, innate_stat_value=8)
self.assertTrue(rune.has_hp)
def test_substat_stat_sets_has_flag(self):
rune = Rune.stub(substats=[Rune.STAT_HP], substat_values=[0])
self.assertTrue(rune.has_hp)
def test_get_stat_from_main_stat(self):
rune = Rune.stub(
level=0,
stars=6,
main_stat=Rune.STAT_HP,
main_stat_value=Rune.MAIN_STAT_VALUES[Rune.STAT_HP][6][0],
)
self.assertEqual(rune.get_stat(Rune.STAT_HP), Rune.MAIN_STAT_VALUES[Rune.STAT_HP][6][0])
def test_get_stat_from_innate_stat(self):
rune = Rune.stub(
innate_stat=Rune.STAT_HP,
innate_stat_value=8,
)
self.assertEqual(rune.get_stat(Rune.STAT_HP), 8)
def test_get_stat_from_substat(self):
rune = Rune.stub(
substats=[Rune.STAT_HP],
substat_values=[8],
)
rune.clean()
self.assertEqual(rune.get_stat(Rune.STAT_HP), 8)
def test_get_hp_pct(self):
rune = Rune.stub(
main_stat=Rune.STAT_ATK_PCT,
innate_stat=Rune.STAT_HP_PCT,
innate_stat_value=8,
)
self.assertEqual(rune.get_stat(Rune.STAT_HP_PCT), 8)
def test_get_hp(self):
rune = Rune.stub(
innate_stat=Rune.STAT_HP,
innate_stat_value=8,
)
self.assertEqual(rune.get_stat(Rune.STAT_HP), 8)
def test_get_atk_pct(self):
rune = Rune.stub(
innate_stat=Rune.STAT_ATK_PCT,
innate_stat_value=8,
)
self.assertEqual(rune.get_stat(Rune.STAT_ATK_PCT), 8)
def test_get_atk(self):
rune = Rune.stub(
innate_stat=Rune.STAT_ATK,
innate_stat_value=8,
)
self.assertEqual(rune.get_stat(Rune.STAT_ATK), 8)
def test_get_spd(self):
rune = Rune.stub(
innate_stat=Rune.STAT_SPD,
innate_stat_value=6,
)
self.assertEqual(rune.get_stat(Rune.STAT_SPD), 6)
def test_get_cri_rate(self):
rune = Rune.stub(
innate_stat=Rune.STAT_CRIT_RATE_PCT,
innate_stat_value=6,
)
self.assertEqual(rune.get_stat(Rune.STAT_CRIT_RATE_PCT), 6)
def test_get_cri_dmg(self):
rune = Rune.stub(
innate_stat=Rune.STAT_CRIT_DMG_PCT,
innate_stat_value=7,
)
self.assertEqual(rune.get_stat(Rune.STAT_CRIT_DMG_PCT), 7)
def test_get_res(self):
rune = Rune.stub(
innate_stat=Rune.STAT_RESIST_PCT,
innate_stat_value=8,
)
self.assertEqual(rune.get_stat(Rune.STAT_RESIST_PCT), 8)
def test_get_acc(self):
rune = Rune.stub(
innate_stat=Rune.STAT_ACCURACY_PCT,
innate_stat_value=8,
)
self.assertEqual(rune.get_stat(Rune.STAT_ACCURACY_PCT), 8)
def test_get_stat_with_grind_applied(self):
rune = Rune.stub(
substats=[Rune.STAT_ATK],
substat_values=[4],
substats_grind_value=[4],
)
self.assertEqual(rune.get_stat(Rune.STAT_ATK), 8)
class Efficiency(TestCase):
def test_efficiencies_star_6_level_15(self):
"""Baseline test of max-level rune"""
rune = Rune.stub(level=15)
self.assertAlmostEqual(
1.0 / 2.8 * 100,
rune.efficiency,
)
# no upgrades left
self.assertAlmostEqual(
1.0 / 2.8 * 100,
rune.max_efficiency,
)
def test_efficiencies_star_6_level_12(self):
"""No upgrades are left so this should match the max level test"""
rune = Rune.stub(level=12)
self.assertAlmostEqual(
1.0 / 2.8 * 100,
rune.efficiency,
)
# no upgrades left
self.assertAlmostEqual(
1.0 / 2.8 * 100,
rune.max_efficiency,
)
def test_efficiencies_star_6_level_0(self):
rune = Rune.stub(level=0)
self.assertAlmostEqual(
1.0 / 2.8 * 100,
rune.efficiency,
)
# four max upgrades (as 6*, at 100% of 20%)
self.assertAlmostEqual(
(1.0 + 4 * 0.2) / 2.8 * 100,
rune.max_efficiency,
)
def test_efficiencies_star_5_level_0_existing(self):
rune = Rune.stub(
level=0,
stars=5,
substats=[
Rune.STAT_HP_PCT,
Rune.STAT_ATK_PCT,
Rune.STAT_DEF_PCT,
# efficiency is always worse than % of base stats
Rune.STAT_HP,
],
substat_values=[0, 0, 0, 0],
)
self.assertAlmostEqual(
(51 / 63) / 2.8 * 100,
rune.efficiency,
)
# four base% upgrades (as 5*, at 88% of 20%)
self.assertAlmostEqual(
(51 / 63 + 4 * 0.875 * 0.2) / 2.8 * 100,
rune.max_efficiency,
)
def test_efficiencies_star_5_level_0_new_base(self):
rune = Rune.stub(level=6, stars=5)
self.assertAlmostEqual(
(51 / 63) / 2.8 * 100,
rune.efficiency,
)
# two base % upgrades (as 5*, at 88% of 20%)
self.assertAlmostEqual(
(51 / 63 + 2 * 0.875 * 0.2) / 2.8 * 100,
rune.max_efficiency,
)
def test_efficiencies_star_4_level_0_existing(self):
rune = Rune.stub(
level=0,
stars=4,
substats=[
Rune.STAT_HP_PCT,
Rune.STAT_ATK_PCT,
Rune.STAT_DEF_PCT,
# efficiency is always worse than % of base stats
Rune.STAT_HP,
],
substat_values=[0, 0, 0, 0],
)
self.assertAlmostEqual(
(43 / 63) / 2.8 * 100,
rune.efficiency,
)
# four base% upgrades (as 4*, at 75% of 20%)
self.assertAlmostEqual(
(43 / 63 + 4 * 0.75 * 0.2) / 2.8 * 100,
rune.max_efficiency,
)
def test_efficiencies_star_4_level_6_new_base(self):
"""Two upgrades left; a 4* should get the (max) base % upgrades"""
rune = Rune.stub(level=6, stars=4)
self.assertAlmostEqual(
(43 / 63) / 2.8 * 100,
rune.efficiency,
)
# two base% upgrades (as 4*, at 75% of 20%)
self.assertAlmostEqual(
(43 / 63 + 2 * 0.75 * 0.2) / 2.8 * 100,
rune.max_efficiency,
)
def test_efficiencies_star_4_level_9_new_base(self):
rune = Rune.stub(
level=9,
stars=4,
# exclude base% stats to force upgrade to CD
substats=[
Rune.STAT_HP_PCT,
Rune.STAT_ATK_PCT,
Rune.STAT_DEF_PCT,
],
substat_values=[0, 0, 0],
)
self.assertAlmostEqual(
(43 / 63) / 2.8 * 100,
rune.efficiency,
)
# base %, ACC, and RES are all on the same scale so we'll always get a 75% this way
self.assertAlmostEqual(
(43 / 63 + 1 * 0.75 * 0.2) / 2.8 * 100,
rune.max_efficiency,
)
def test_efficiencies_star_4_level_9_existing_cr(self):
rune = Rune.stub(
level=0,
stars=4,
# exclude base% stats to force upgrade to CD
substats=[
Rune.STAT_CRIT_RATE_PCT,
],
substat_values=[0],
)
self.assertAlmostEqual(
(43 / 63) / 2.8 * 100,
rune.efficiency,
)
# one at CR and 3x at BASE %
self.assertAlmostEqual(
(43 / 63 + 1 * 4/6 * 0.2 + 3 * 0.75 * 0.2) / 2.8 * 100,
rune.max_efficiency,
)
def test_efficiency_over_100_with_grinds(self):
rune = Rune.stub(
level=15,
stars=6,
innate_stat=Rune.STAT_ACCURACY_PCT,
innate_stat_value=8,
substats=[
Rune.STAT_SPD,
Rune.STAT_ATK_PCT,
Rune.STAT_DEF_PCT,
Rune.STAT_RESIST_PCT,
],
substat_values=[30, 8, 8, 8], # All upgrades into SPD
substats_grind_value=[3, 0, 0, 0],
)
self.assertAlmostEqual(
(63 / 63 + 1 * (30 + 3) / 6 * 0.2 + 4 * 1 * 0.2) / 2.8 * 100,
rune.efficiency
)
| |
from datetime import datetime
import hashlib
from werkzeug.security import generate_password_hash, check_password_hash
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from markdown import markdown
import bleach
from flask import current_app, request, url_for, Markup
from flask.ext.login import UserMixin, AnonymousUserMixin
from app.exceptions import ValidationError
from . import db, login_manager
class Permission:
FOLLOW = 0x01
COMMENT = 0x02
WRITE_ARTICLES = 0x04
MODERATE_COMMENTS = 0x08
ADMINISTER = 0x80
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
default = db.Column(db.Boolean, default=False, index=True)
permissions = db.Column(db.Integer)
users = db.relationship('User', backref='role', lazy='dynamic')
@staticmethod
def insert_roles():
roles = {
'User': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_ARTICLES, True),
'Moderator': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_ARTICLES |
Permission.MODERATE_COMMENTS, False),
'Administrator': (0xff, False)
}
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
role.permissions = roles[r][0]
role.default = roles[r][1]
db.session.add(role)
db.session.commit()
def __repr__(self):
return '<Role %r>' % self.name
class Follow(db.Model):
__tablename__ = 'follows'
follower_id = db.Column(db.Integer, db.ForeignKey('users.id'),
primary_key=True)
followed_id = db.Column(db.Integer, db.ForeignKey('users.id'),
primary_key=True)
timestamp = db.Column(db.DateTime, default=datetime.utcnow)
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(64), unique=True, index=True)
username = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
password_hash = db.Column(db.String(128))
confirmed = db.Column(db.Boolean, default=False)
name = db.Column(db.String(64))
location = db.Column(db.String(64))
about_me = db.Column(db.Text())
member_since = db.Column(db.DateTime(), default=datetime.utcnow)
last_seen = db.Column(db.DateTime(), default=datetime.utcnow)
avatar_hash = db.Column(db.String(32))
posts = db.relationship('Post', backref='author', lazy='dynamic')
followed = db.relationship('Follow',
foreign_keys=[Follow.follower_id],
backref=db.backref('follower', lazy='joined'),
lazy='dynamic',
cascade='all, delete-orphan')
followers = db.relationship('Follow',
foreign_keys=[Follow.followed_id],
backref=db.backref('followed', lazy='joined'),
lazy='dynamic',
cascade='all, delete-orphan')
comments = db.relationship('Comment', backref='author', lazy='dynamic')
@staticmethod
def generate_fake(count=100):
from sqlalchemy.exc import IntegrityError
from random import seed
import forgery_py
seed()
for i in range(count):
u = User(email=forgery_py.internet.email_address(),
username=forgery_py.internet.user_name(True),
password=forgery_py.lorem_ipsum.word(),
confirmed=True,
name=forgery_py.name.full_name(),
location=forgery_py.address.city(),
about_me=forgery_py.lorem_ipsum.sentence(),
member_since=forgery_py.date.date(True))
db.session.add(u)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
@staticmethod
def add_self_follows():
for user in User.query.all():
if not user.is_following(user):
user.follow(user)
db.session.add(user)
db.session.commit()
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
if self.role is None:
if self.email == current_app.config['FLASKY_ADMIN']:
self.role = Role.query.filter_by(permissions=0xff).first()
if self.role is None:
self.role = Role.query.filter_by(default=True).first()
if self.email is not None and self.avatar_hash is None:
self.avatar_hash = hashlib.md5(
self.email.encode('utf-8')).hexdigest()
self.followed.append(Follow(followed=self))
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def confirm(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
return True
def generate_reset_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'reset': self.id})
def reset_password(self, token, new_password):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('reset') != self.id:
return False
self.password = new_password
db.session.add(self)
return True
def generate_email_change_token(self, new_email, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'change_email': self.id, 'new_email': new_email})
def change_email(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('change_email') != self.id:
return False
new_email = data.get('new_email')
if new_email is None:
return False
if self.query.filter_by(email=new_email).first() is not None:
return False
self.email = new_email
self.avatar_hash = hashlib.md5(
self.email.encode('utf-8')).hexdigest()
db.session.add(self)
return True
def can(self, permissions):
return self.role is not None and \
(self.role.permissions & permissions) == permissions
def is_administrator(self):
return self.can(Permission.ADMINISTER)
def ping(self):
self.last_seen = datetime.utcnow()
db.session.add(self)
def gravatar(self, size=100, default='identicon', rating='g'):
if request.is_secure:
url = 'https://secure.gravatar.com/avatar'
else:
url = 'http://www.gravatar.com/avatar'
hash = self.avatar_hash or hashlib.md5(
self.email.encode('utf-8')).hexdigest()
return '{url}/{hash}?s={size}&d={default}&r={rating}'.format(
url=url, hash=hash, size=size, default=default, rating=rating)
def follow(self, user):
if not self.is_following(user):
f = Follow(follower=self, followed=user)
db.session.add(f)
def unfollow(self, user):
f = self.followed.filter_by(followed_id=user.id).first()
if f:
db.session.delete(f)
def is_following(self, user):
return self.followed.filter_by(
followed_id=user.id).first() is not None
def is_followed_by(self, user):
return self.followers.filter_by(
follower_id=user.id).first() is not None
@property
def followed_posts(self):
return Post.query.join(Follow, Follow.followed_id == Post.author_id)\
.filter(Follow.follower_id == self.id)
def to_json(self):
json_user = {
'url': url_for('api.get_post', id=self.id, _external=True),
'username': self.username,
'member_since': self.member_since,
'last_seen': self.last_seen,
'posts': url_for('api.get_user_posts', id=self.id, _external=True),
'followed_posts': url_for('api.get_user_followed_posts',
id=self.id, _external=True),
'post_count': self.posts.count()
}
return json_user
def generate_auth_token(self, expiration):
s = Serializer(current_app.config['SECRET_KEY'],
expires_in=expiration)
return s.dumps({'id': self.id}).decode('ascii')
@staticmethod
def verify_auth_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return None
return User.query.get(data['id'])
def __repr__(self):
return '<User %r>' % self.username
class AnonymousUser(AnonymousUserMixin):
def can(self, permissions):
return False
def is_administrator(self):
return False
login_manager.anonymous_user = AnonymousUser
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class Post(db.Model):
__tablename__ = 'posts'
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.Text)
body_html = db.Column(db.Text)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
comments = db.relationship('Comment', backref='post', lazy='dynamic')
@staticmethod
def generate_fake(count=100):
from random import seed, randint
import forgery_py
seed()
user_count = User.query.count()
for i in range(count):
u = User.query.offset(randint(0, user_count - 1)).first()
p = Post(body=forgery_py.lorem_ipsum.sentences(randint(1, 5)),
timestamp=forgery_py.date.date(True),
author=u)
db.session.add(p)
db.session.commit()
@staticmethod
def on_changed_body(target, value, oldvalue, initiator):
# allowed_tags = ['a', 'abbr', 'acronym', 'b', 'blockquote', 'code',
# 'em', 'i', 'li', 'ol', 'pre', 'strong', 'ul',
# 'h1', 'h2', 'h3', 'p']
# target.body_html = bleach.linkify(bleach.clean(
# markdown(value, output_format='html'),
# tags=allowed_tags, strip=True))
target.body_html = Markup(markdown(value, output_format='html'))
def to_json(self):
json_post = {
'url': url_for('api.get_post', id=self.id, _external=True),
'body': self.body,
'body_html': self.body_html,
'timestamp': self.timestamp,
'author': url_for('api.get_user', id=self.author_id,
_external=True),
'comments': url_for('api.get_post_comments', id=self.id,
_external=True),
'comment_count': self.comments.count()
}
return json_post
@staticmethod
def from_json(json_post):
body = json_post.get('body')
if body is None or body == '':
raise ValidationError('post does not have a body')
return Post(body=body)
db.event.listen(Post.body, 'set', Post.on_changed_body)
class Comment(db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.Text)
body_html = db.Column(db.Text)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
disabled = db.Column(db.Boolean)
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
post_id = db.Column(db.Integer, db.ForeignKey('posts.id'))
@staticmethod
def on_changed_body(target, value, oldvalue, initiator):
allowed_tags = ['a', 'abbr', 'acronym', 'b', 'code', 'em', 'i',
'strong']
target.body_html = bleach.linkify(bleach.clean(
markdown(value, output_format='html'),
tags=allowed_tags, strip=True))
def to_json(self):
json_comment = {
'url': url_for('api.get_comment', id=self.id, _external=True),
'post': url_for('api.get_post', id=self.post_id, _external=True),
'body': self.body,
'body_html': self.body_html,
'timestamp': self.timestamp,
'author': url_for('api.get_user', id=self.author_id,
_external=True),
}
return json_comment
@staticmethod
def from_json(json_comment):
body = json_comment.get('body')
if body is None or body == '':
raise ValidationError('comment does not have a body')
return Comment(body=body)
db.event.listen(Comment.body, 'set', Comment.on_changed_body)
| |
from __future__ import unicode_literals
from django.utils import six
from channels import Channel
from channels.tests import ChannelTestCase
from channels.handler import AsgiRequest
from channels.exceptions import RequestTimeout, RequestAborted
class RequestTests(ChannelTestCase):
"""
Tests that ASGI request handling correctly decodes HTTP requests.
"""
def test_basic(self):
"""
Tests that the handler can decode the most basic request message,
with all optional fields omitted.
"""
Channel("test").send({
"reply_channel": "test-reply",
"http_version": "1.1",
"method": "GET",
"path": "/test/",
}, immediately=True)
request = AsgiRequest(self.get_next_message("test"))
self.assertEqual(request.path, "/test/")
self.assertEqual(request.method, "GET")
self.assertFalse(request.body)
self.assertNotIn("HTTP_HOST", request.META)
self.assertNotIn("REMOTE_ADDR", request.META)
self.assertNotIn("REMOTE_HOST", request.META)
self.assertNotIn("REMOTE_PORT", request.META)
self.assertNotIn("SERVER_NAME", request.META)
self.assertNotIn("SERVER_PORT", request.META)
self.assertFalse(request.GET)
self.assertFalse(request.POST)
self.assertFalse(request.COOKIES)
def test_extended(self):
"""
Tests a more fully-featured GET request
"""
Channel("test").send({
"reply_channel": "test",
"http_version": "1.1",
"method": "GET",
"path": "/test2/",
"query_string": b"x=1&y=%26foo+bar%2Bbaz",
"headers": {
"host": b"example.com",
"cookie": b"test-time=1448995585123; test-value=yeah",
},
"client": ["10.0.0.1", 1234],
"server": ["10.0.0.2", 80],
}, immediately=True)
request = AsgiRequest(self.get_next_message("test"))
self.assertEqual(request.path, "/test2/")
self.assertEqual(request.method, "GET")
self.assertFalse(request.body)
self.assertEqual(request.META["HTTP_HOST"], "example.com")
self.assertEqual(request.META["REMOTE_ADDR"], "10.0.0.1")
self.assertEqual(request.META["REMOTE_HOST"], "10.0.0.1")
self.assertEqual(request.META["REMOTE_PORT"], 1234)
self.assertEqual(request.META["SERVER_NAME"], "10.0.0.2")
self.assertEqual(request.META["SERVER_PORT"], "80")
self.assertEqual(request.GET["x"], "1")
self.assertEqual(request.GET["y"], "&foo bar+baz")
self.assertEqual(request.COOKIES["test-time"], "1448995585123")
self.assertEqual(request.COOKIES["test-value"], "yeah")
self.assertFalse(request.POST)
def test_post_single(self):
"""
Tests a POST body contained within a single message.
"""
Channel("test").send({
"reply_channel": "test",
"http_version": "1.1",
"method": "POST",
"path": "/test2/",
"query_string": "django=great",
"body": b"ponies=are+awesome",
"headers": {
"host": b"example.com",
"content-type": b"application/x-www-form-urlencoded",
"content-length": b"18",
},
}, immediately=True)
request = AsgiRequest(self.get_next_message("test"))
self.assertEqual(request.path, "/test2/")
self.assertEqual(request.method, "POST")
self.assertEqual(request.body, b"ponies=are+awesome")
self.assertEqual(request.META["HTTP_HOST"], "example.com")
self.assertEqual(request.META["CONTENT_TYPE"], "application/x-www-form-urlencoded")
self.assertEqual(request.GET["django"], "great")
self.assertEqual(request.POST["ponies"], "are awesome")
with self.assertRaises(KeyError):
request.POST["django"]
with self.assertRaises(KeyError):
request.GET["ponies"]
def test_post_multiple(self):
"""
Tests a POST body across multiple messages (first part in 'body').
"""
Channel("test").send({
"reply_channel": "test",
"http_version": "1.1",
"method": "POST",
"path": "/test/",
"body": b"there_a",
"body_channel": "test-input",
"headers": {
"host": b"example.com",
"content-type": b"application/x-www-form-urlencoded",
"content-length": b"21",
},
}, immediately=True)
Channel("test-input").send({
"content": b"re=fou",
"more_content": True,
}, immediately=True)
Channel("test-input").send({
"content": b"r+lights",
}, immediately=True)
request = AsgiRequest(self.get_next_message("test"))
self.assertEqual(request.method, "POST")
self.assertEqual(request.body, b"there_are=four+lights")
self.assertEqual(request.META["CONTENT_TYPE"], "application/x-www-form-urlencoded")
self.assertEqual(request.POST["there_are"], "four lights")
def test_post_files(self):
"""
Tests POSTing files using multipart form data and multiple messages,
with no body in the initial message.
"""
body = (
b'--BOUNDARY\r\n' +
b'Content-Disposition: form-data; name="title"\r\n\r\n' +
b'My First Book\r\n' +
b'--BOUNDARY\r\n' +
b'Content-Disposition: form-data; name="pdf"; filename="book.pdf"\r\n\r\n' +
b'FAKEPDFBYTESGOHERE' +
b'--BOUNDARY--'
)
Channel("test").send({
"reply_channel": "test",
"http_version": "1.1",
"method": "POST",
"path": "/test/",
"body_channel": "test-input",
"headers": {
"content-type": b"multipart/form-data; boundary=BOUNDARY",
"content-length": six.text_type(len(body)).encode("ascii"),
},
}, immediately=True)
Channel("test-input").send({
"content": body[:20],
"more_content": True,
}, immediately=True)
Channel("test-input").send({
"content": body[20:],
}, immediately=True)
request = AsgiRequest(self.get_next_message("test"))
self.assertEqual(request.method, "POST")
self.assertEqual(len(request.body), len(body))
self.assertTrue(request.META["CONTENT_TYPE"].startswith("multipart/form-data"))
self.assertFalse(request._post_parse_error)
self.assertEqual(request.POST["title"], "My First Book")
self.assertEqual(request.FILES["pdf"].read(), b"FAKEPDFBYTESGOHERE")
def test_stream(self):
"""
Tests the body stream is emulated correctly.
"""
Channel("test").send({
"reply_channel": "test",
"http_version": "1.1",
"method": "PUT",
"path": "/",
"body": b"onetwothree",
"headers": {
"host": b"example.com",
"content-length": b"11",
},
}, immediately=True)
request = AsgiRequest(self.get_next_message("test", require=True))
self.assertEqual(request.method, "PUT")
self.assertEqual(request.read(3), b"one")
self.assertEqual(request.read(), b"twothree")
def test_request_timeout(self):
"""
Tests that the code correctly gives up after the request body read timeout.
"""
Channel("test").send({
"reply_channel": "test",
"http_version": "1.1",
"method": "POST",
"path": "/test/",
"body": b"there_a",
"body_channel": "test-input",
"headers": {
"host": b"example.com",
"content-type": b"application/x-www-form-urlencoded",
"content-length": b"21",
},
}, immediately=True)
# Say there's more content, but never provide it! Muahahaha!
Channel("test-input").send({
"content": b"re=fou",
"more_content": True,
}, immediately=True)
class VeryImpatientRequest(AsgiRequest):
body_receive_timeout = 0
with self.assertRaises(RequestTimeout):
VeryImpatientRequest(self.get_next_message("test"))
def test_request_abort(self):
"""
Tests that the code aborts when a request-body close is sent.
"""
Channel("test").send({
"reply_channel": "test",
"http_version": "1.1",
"method": "POST",
"path": "/test/",
"body": b"there_a",
"body_channel": "test-input",
"headers": {
"host": b"example.com",
"content-type": b"application/x-www-form-urlencoded",
"content-length": b"21",
},
}, immediately=True)
Channel("test-input").send({
"closed": True,
}, immediately=True)
with self.assertRaises(RequestAborted):
AsgiRequest(self.get_next_message("test"))
| |
# Copyright (c) 2014 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# The contents of this file are mainly copied from cm_api sources,
# released by Cloudera. Codes not used by Sahara CDH plugin are removed.
# You can find the original codes at
#
# https://github.com/cloudera/cm_api/tree/master/python/src/cm_api
#
# To satisfy the pep8 and python3 tests, we did some changes to the codes.
# We also change some importings to use Sahara inherited classes.
from oslo_serialization import jsonutils as json
import six
from sahara.plugins.cdh.client import role_config_groups
from sahara.plugins.cdh.client import roles
from sahara.plugins.cdh.client import types
SERVICES_PATH = "/clusters/%s/services"
SERVICE_PATH = "/clusters/%s/services/%s"
ROLETYPES_CFG_KEY = 'roleTypeConfigs'
def create_service(resource_root, name, service_type,
cluster_name="default"):
"""Create a service
:param resource_root: The root Resource object.
:param name: Service name
:param service_type: Service type
:param cluster_name: Cluster name
:return: An ApiService object
"""
apiservice = ApiService(resource_root, name, service_type)
return types.call(resource_root.post, SERVICES_PATH % (cluster_name,),
ApiService, True, data=[apiservice])[0]
def get_service(resource_root, name, cluster_name="default"):
"""Lookup a service by name
:param resource_root: The root Resource object.
:param name: Service name
:param cluster_name: Cluster name
:return: An ApiService object
"""
return _get_service(resource_root, "%s/%s"
% (SERVICES_PATH % (cluster_name,), name))
def _get_service(resource_root, path):
return types.call(resource_root.get, path, ApiService)
def get_all_services(resource_root, cluster_name="default", view=None):
"""Get all services
:param resource_root: The root Resource object.
:param cluster_name: Cluster name
:return: A list of ApiService objects.
"""
return types.call(resource_root.get, SERVICES_PATH % (cluster_name,),
ApiService, True,
params=(dict(view=view) if view else None))
def delete_service(resource_root, name, cluster_name="default"):
"""Delete a service by name
:param resource_root: The root Resource object.
:param name: Service name
:param cluster_name: Cluster name
:return: The deleted ApiService object
"""
return types.call(resource_root.delete,
"%s/%s" % (SERVICES_PATH % (cluster_name,), name),
ApiService)
class ApiService(types.BaseApiResource):
_ATTRIBUTES = {
'name': None,
'type': None,
'displayName': None,
'serviceState': types.ROAttr(),
'healthSummary': types.ROAttr(),
'healthChecks': types.ROAttr(),
'clusterRef': types.ROAttr(types.ApiClusterRef),
'configStale': types.ROAttr(),
'configStalenessStatus': types.ROAttr(),
'clientConfigStalenessStatus': types.ROAttr(),
'serviceUrl': types.ROAttr(),
'maintenanceMode': types.ROAttr(),
'maintenanceOwners': types.ROAttr(),
}
def __init__(self, resource_root, name=None, type=None):
types.BaseApiObject.init(self, resource_root, locals())
def __str__(self):
return ("<ApiService>: %s (cluster: %s)"
% (self.name, self._get_cluster_name()))
def _get_cluster_name(self):
if hasattr(self, 'clusterRef') and self.clusterRef:
return self.clusterRef.clusterName
return None
def _path(self):
"""Return the API path for this service
This method assumes that lack of a cluster reference means that the
object refers to the Cloudera Management Services instance.
"""
if self._get_cluster_name():
return SERVICE_PATH % (self._get_cluster_name(), self.name)
else:
return '/cm/service'
def _role_cmd(self, cmd, roles, api_version=1):
return self._post("roleCommands/" + cmd, types.ApiBulkCommandList,
data=roles, api_version=api_version)
def _parse_svc_config(self, json_dic, view=None):
"""Parse a json-decoded ApiServiceConfig dictionary into a 2-tuple
:param json_dic: The json dictionary with the config data.
:param view: View to materialize.
:return: 2-tuple (service config dictionary, role type configurations)
"""
svc_config = types.json_to_config(json_dic, view == 'full')
rt_configs = {}
if ROLETYPES_CFG_KEY in json_dic:
for rt_config in json_dic[ROLETYPES_CFG_KEY]:
rt_configs[rt_config['roleType']] = types.json_to_config(
rt_config, view == 'full')
return (svc_config, rt_configs)
def create_yarn_job_history_dir(self):
"""Create the Yarn job history directory
:return: Reference to submitted command.
:since: API v6
"""
return self._cmd('yarnCreateJobHistoryDirCommand', api_version=6)
def get_config(self, view=None):
"""Retrieve the service's configuration
Retrieves both the service configuration and role type configuration
for each of the service's supported role types. The role type
configurations are returned as a dictionary, whose keys are the
role type name, and values are the respective configuration
dictionaries.
The 'summary' view contains strings as the dictionary values. The full
view contains types.ApiConfig instances as the values.
:param view: View to materialize ('full' or 'summary')
:return: 2-tuple (service config dictionary, role type configurations)
"""
path = self._path() + '/config'
resp = self._get_resource_root().get(
path, params=(dict(view=view) if view else None))
return self._parse_svc_config(resp, view)
def update_config(self, svc_config, **rt_configs):
"""Update the service's configuration
:param svc_config: Dictionary with service configuration to update.
:param rt_configs: Dict of role type configurations to update.
:return: 2-tuple (service config dictionary, role type configurations)
"""
path = self._path() + '/config'
if svc_config:
data = types.config_to_api_list(svc_config)
else:
data = {}
if rt_configs:
rt_list = []
for rt, cfg in six.iteritems(rt_configs):
rt_data = types.config_to_api_list(cfg)
rt_data['roleType'] = rt
rt_list.append(rt_data)
data[ROLETYPES_CFG_KEY] = rt_list
resp = self._get_resource_root().put(path, data=json.dumps(data))
return self._parse_svc_config(resp)
def create_role(self, role_name, role_type, host_id):
"""Create a role
:param role_name: Role name
:param role_type: Role type
:param host_id: ID of the host to assign the role to
:return: An ApiRole object
"""
return roles.create_role(self._get_resource_root(), self.name,
role_type, role_name, host_id,
self._get_cluster_name())
def delete_role(self, name):
"""Delete a role by name
:param name: Role name
:return: The deleted ApiRole object
"""
return roles.delete_role(self._get_resource_root(), self.name, name,
self._get_cluster_name())
def get_roles_by_type(self, role_type, view=None):
"""Get all roles of a certain type in a service
:param role_type: Role type
:param view: View to materialize ('full' or 'summary')
:return: A list of ApiRole objects.
"""
return roles.get_roles_by_type(self._get_resource_root(), self.name,
role_type, self._get_cluster_name(),
view)
def get_all_role_config_groups(self):
"""Get a list of role configuration groups in the service
:return: A list of ApiRoleConfigGroup objects.
:since: API v3
"""
return role_config_groups.get_all_role_config_groups(
self._get_resource_root(), self.name, self._get_cluster_name())
def start(self):
"""Start a service
:return: Reference to the submitted command.
"""
return self._cmd('start')
def stop(self):
"""Stop a service
:return: Reference to the submitted command.
"""
return self._cmd('stop')
def restart(self):
"""Restart a service
:return: Reference to the submitted command.
"""
return self._cmd('restart')
def get_health_summary(self):
return getattr(self, 'healthSummary', None)
def get_health_checks_status(self):
return getattr(self, 'healthChecks', None)
def start_roles(self, *role_names):
"""Start a list of roles
:param role_names: names of the roles to start.
:return: List of submitted commands.
"""
return self._role_cmd('start', role_names)
def create_hbase_root(self):
"""Create the root directory of an HBase service
:return: Reference to the submitted command.
"""
return self._cmd('hbaseCreateRoot')
def create_hdfs_tmp(self):
"""Create /tmp directory in HDFS
Create the /tmp directory in HDFS with appropriate ownership and
permissions.
:return: Reference to the submitted command
:since: API v2
"""
return self._cmd('hdfsCreateTmpDir')
def refresh(self, *role_names):
"""Execute the "refresh" command on a set of roles
:param role_names: Names of the roles to refresh.
:return: Reference to the submitted command.
"""
return self._role_cmd('refresh', role_names)
def decommission(self, *role_names):
"""Decommission roles in a service
:param role_names: Names of the roles to decommission.
:return: Reference to the submitted command.
"""
return self._cmd('decommission', data=role_names)
def deploy_client_config(self, *role_names):
"""Deploys client configuration to the hosts where roles are running
:param role_names: Names of the roles to decommission.
:return: Reference to the submitted command.
"""
return self._cmd('deployClientConfig', data=role_names)
def format_hdfs(self, *namenodes):
"""Format NameNode instances of an HDFS service
:param namenodes: Name of NameNode instances to format.
:return: List of submitted commands.
"""
return self._role_cmd('hdfsFormat', namenodes)
def install_oozie_sharelib(self):
"""Installs the Oozie ShareLib
Oozie must be stopped before running this command.
:return: Reference to the submitted command.
:since: API v3
"""
return self._cmd('installOozieShareLib', api_version=3)
def create_oozie_db(self):
"""Creates the Oozie Database Schema in the configured database
:return: Reference to the submitted command.
:since: API v2
"""
return self._cmd('createOozieDb', api_version=2)
def upgrade_oozie_db(self):
"""Upgrade Oozie Database schema as part of a major version upgrade
:return: Reference to the submitted command.
:since: API v6
"""
return self._cmd('oozieUpgradeDb', api_version=6)
def create_hive_metastore_tables(self):
"""Creates the Hive metastore tables in the configured database
Will do nothing if tables already exist. Will not perform an upgrade.
:return: Reference to the submitted command.
:since: API v3
"""
return self._cmd('hiveCreateMetastoreDatabaseTables', api_version=3)
def create_hive_warehouse(self):
"""Creates the Hive warehouse directory in HDFS
:return: Reference to the submitted command.
:since: API v3
"""
return self._cmd('hiveCreateHiveWarehouse')
def create_hive_userdir(self):
"""Creates the Hive user directory in HDFS
:return: Reference to the submitted command.
:since: API v4
"""
return self._cmd('hiveCreateHiveUserDir')
def enable_nn_ha(self, active_name, standby_host_id, nameservice, jns,
standby_name_dir_list=None, qj_name=None,
standby_name=None, active_fc_name=None,
standby_fc_name=None, zk_service_name=None,
force_init_znode=True,
clear_existing_standby_name_dirs=True,
clear_existing_jn_edits_dir=True):
"""Enable High Availability (HA) with Auto-Failover for HDFS NameNode
@param active_name: Name of Active NameNode.
@param standby_host_id: ID of host where Standby NameNode will be
created.
@param nameservice: Nameservice to be used while enabling HA.
Optional if Active NameNode already has this
config set.
@param jns: List of Journal Nodes to be created during the command.
Each element of the list must be a dict containing the
following items:
- jns['jnHostId']: ID of the host where the new JournalNode
will be created.
- jns['jnName']: Name of the JournalNode role (optional)
- jns['jnEditsDir']: Edits dir of the JournalNode. Can be
omitted if the config is already set
at RCG level.
@param standby_name_dir_list: List of directories for the new Standby
NameNode. If not provided then it will
use same dirs as Active NameNode.
@param qj_name: Name of the journal located on each JournalNodes'
filesystem. This can be optionally provided if the
config hasn't been already set for the Active NameNode.
If this isn't provided and Active NameNode doesn't
also have the config, then nameservice is used by
default.
@param standby_name: Name of the Standby NameNode role to be created
(Optional).
@param active_fc_name: Name of the Active Failover Controller role to
be created (Optional).
@param standby_fc_name: Name of the Standby Failover Controller role to
be created (Optional).
@param zk_service_name: Name of the ZooKeeper service to use for auto-
failover. If HDFS service already depends on a
ZooKeeper service then that ZooKeeper service
will be used for auto-failover and in that case
this parameter can either be omitted or should
be the same ZooKeeper service.
@param force_init_znode: Indicates if the ZNode should be force
initialized if it is already present. Useful
while re-enabling High Availability. (Default:
TRUE)
@param clear_existing_standby_name_dirs: Indicates if the existing name
directories for Standby
NameNode should be cleared
during the workflow.
Useful while re-enabling High
Availability. (Default: TRUE)
@param clear_existing_jn_edits_dir: Indicates if the existing edits
directories for the JournalNodes
for the specified nameservice
should be cleared during the
workflow. Useful while re-enabling
High Availability. (Default: TRUE)
@return: Reference to the submitted command.
@since: API v6
"""
args = dict(
activeNnName=active_name,
standbyNnName=standby_name,
standbyNnHostId=standby_host_id,
standbyNameDirList=standby_name_dir_list,
nameservice=nameservice,
qjName=qj_name,
activeFcName=active_fc_name,
standbyFcName=standby_fc_name,
zkServiceName=zk_service_name,
forceInitZNode=force_init_znode,
clearExistingStandbyNameDirs=clear_existing_standby_name_dirs,
clearExistingJnEditsDir=clear_existing_jn_edits_dir,
jns=jns
)
return self._cmd('hdfsEnableNnHa', data=args, api_version=6)
def enable_rm_ha(self, new_rm_host_id, zk_service_name=None):
"""Enable high availability for a YARN ResourceManager.
@param new_rm_host_id: id of the host where the second ResourceManager
will be added.
@param zk_service_name: Name of the ZooKeeper service to use for auto-
failover. If YARN service depends on a
ZooKeeper service then that ZooKeeper service
will be used for auto-failover and in that case
this parameter can be omitted.
@return: Reference to the submitted command.
@since: API v6
"""
args = dict(
newRmHostId=new_rm_host_id,
zkServiceName=zk_service_name
)
return self._cmd('enableRmHa', data=args)
class ApiServiceSetupInfo(ApiService):
_ATTRIBUTES = {
'name': None,
'type': None,
'config': types.Attr(types.ApiConfig),
'roles': types.Attr(roles.ApiRole),
}
def __init__(self, name=None, type=None,
config=None, roles=None):
# The BaseApiObject expects a resource_root, which we don't care about
resource_root = None
# Unfortunately, the json key is called "type". So our input arg
# needs to be called "type" as well, despite it being a python keyword.
types.BaseApiObject.init(self, None, locals())
def set_config(self, config):
"""Set the service configuration
:param config: A dictionary of config key/value
"""
if self.config is None:
self.config = {}
self.config.update(types.config_to_api_list(config))
def add_role_info(self, role_name, role_type, host_id, config=None):
"""Add a role info
The role will be created along with the service setup.
:param role_name: Role name
:param role_type: Role type
:param host_id: The host where the role should run
:param config: (Optional) A dictionary of role config values
"""
if self.roles is None:
self.roles = []
api_config_list = (config is not None
and types.config_to_api_list(config)
or None)
self.roles.append({
'name': role_name,
'type': role_type,
'hostRef': {'hostId': host_id},
'config': api_config_list})
| |
#!/usr/bin/env python
#
## Differential Evolution Solver Class
## Based on algorithms developed by Dr. Rainer Storn & Kenneth Price
## Original C++ code written by: Lester E. Godwin
## PushCorp, Inc.
## Dallas, Texas
## 972-840-0208 x102
## godwin@pushcorp.com
## Created: 6/8/98
## Last Modified: 6/8/98 Revision: 1.0
##
## Solver code ported to Python from C++ July 2002
## by: James R. Phillips
## Birmingham, Alabama USA
## zunzun@zunzun.com
##
## DE Solver modified and cleaned by Patrick Hung, May 2006.
## additional DE Solver (DESolver2) added by Patrick Hung.
##
## bounds (and minimal interface) added by Mike McKerns
## adapted to AbstractSolver interface by Mike McKerns
##
## modified for AbstractMapSolver interface by Mike McKerns
#
# Author: Patrick Hung (patrickh @caltech)
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 2006-2015 California Institute of Technology.
# License: 3-clause BSD. The full license text is available at:
# - http://trac.mystic.cacr.caltech.edu/project/mystic/browser/mystic/LICENSE
"""
Solvers
=======
This module contains a collection of optimization routines based on
Storn and Price's differential evolution algorithm. The core solver
algorithm was adapted from Phillips's DETest.py. An alternate solver
is provided that follows the logic in Price, Storn, and Lampen -- in that
both a current generation and a trial generation are maintained, and all
vectors for creating difference vectors and mutations draw from the
current generation... which remains invariant until the end of the
iteration.
A minimal interface that mimics a scipy.optimize interface has also been
implemented, and functionality from the mystic solver API has been added
with reasonable defaults.
Minimal function interface to optimization routines::
diffev -- Differential Evolution (DE) solver
diffev2 -- Price & Storn's Differential Evolution solver
The corresponding solvers built on mystic's AbstractSolver are::
DifferentialEvolutionSolver -- a DE solver
DifferentialEvolutionSolver2 -- Storn & Price's DE solver
Mystic solver behavior activated in diffev and diffev2::
- EvaluationMonitor = Monitor()
- StepMonitor = Monitor()
- strategy = Best1Bin
- termination = ChangeOverGeneration(ftol,gtol), if gtol provided
'' = VTRChangeOverGenerations(ftol), otherwise
Storn & Price's DE Solver has also been implemented to use the "map"
interface. Mystic enables the user to override the standard python
map function with their own 'map' function, or one of the map functions
provided by the pathos package (see http://dev.danse.us/trac/pathos)
for distributed and high-performance computing.
Usage
=====
Practical advice for how to configure the Differential Evolution
Solver for your own objective function can be found on R. Storn's
web page (http://www.icsi.berkeley.edu/~storn/code.html), and is
reproduced here::
First try the following classical settings for the solver configuration:
Choose a crossover strategy (e.g. Rand1Bin), set the number of parents
NP to 10 times the number of parameters, select ScalingFactor=0.8, and
CrossProbability=0.9.
It has been found recently that selecting ScalingFactor from the interval
[0.5, 1.0] randomly for each generation or for each difference vector,
a technique called dither, improves convergence behaviour significantly,
especially for noisy objective functions.
It has also been found that setting CrossProbability to a low value,
e.g. CrossProbability=0.2 helps optimizing separable functions since
it fosters the search along the coordinate axes. On the contrary,
this choice is not effective if parameter dependence is encountered,
something which is frequently occuring in real-world optimization
problems rather than artificial test functions. So for parameter
dependence the choice of CrossProbability=0.9 is more appropriate.
Another interesting empirical finding is that rasing NP above, say, 40
does not substantially improve the convergence, independent of the
number of parameters. It is worthwhile to experiment with these suggestions.
Make sure that you initialize your parameter vectors by exploiting
their full numerical range, i.e. if a parameter is allowed to exhibit
values in the range [-100, 100] it's a good idea to pick the initial
values from this range instead of unnecessarily restricting diversity.
Keep in mind that different problems often require different settings
for NP, ScalingFactor and CrossProbability (see Ref 1, 2). If you
experience misconvergence, you typically can increase the value for NP,
but often you only have to adjust ScalingFactor to be a little lower or
higher than 0.8. If you increase NP and simultaneously lower ScalingFactor
a little, convergence is more likely to occur but generally takes longer,
i.e. DE is getting more robust (a convergence speed/robustness tradeoff).
If you still get misconvergence you might want to instead try a different
crossover strategy. The most commonly used are Rand1Bin, Rand1Exp,
Best1Bin, and Best1Exp. The crossover strategy is not so important a
choice, although K. Price claims that binomial (Bin) is never worse than
exponential (Exp).
In case of continued misconvergence, check the choice of objective function.
There might be a better one to describe your problem. Any knowledge that
you have about the problem should be worked into the objective function.
A good objective function can make all the difference.
See `mystic.examples.test_rosenbrock` for an example of using
DifferentialEvolutionSolver. DifferentialEvolutionSolver2 has
the identical interface and usage.
All solvers included in this module provide the standard signal handling.
For more information, see `mystic.mystic.abstract_solver`.
References
==========
[1] Storn, R. and Price, K. Differential Evolution - A Simple and Efficient
Heuristic for Global Optimization over Continuous Spaces. Journal of Global
Optimization 11: 341-359, 1997.
[2] Price, K., Storn, R., and Lampinen, J. - Differential Evolution,
A Practical Approach to Global Optimization. Springer, 1st Edition, 2005
"""
__all__ = ['DifferentialEvolutionSolver','DifferentialEvolutionSolver2',\
'diffev','diffev2']
from mystic.tools import wrap_function, unpair, isiterable
from mystic.tools import wrap_bounds, wrap_penalty, reduced
from mystic.abstract_solver import AbstractSolver
from mystic.abstract_map_solver import AbstractMapSolver
from numpy import asfarray, ravel
class DifferentialEvolutionSolver(AbstractSolver):
"""
Differential Evolution optimization.
"""
def __init__(self, dim, NP=4):
"""
Takes two initial inputs:
dim -- dimensionality of the problem
NP -- size of the trial solution population. [requires: NP >= 4]
All important class members are inherited from AbstractSolver.
"""
NP = max(NP, dim, 4) #XXX: raise Error if npop <= 4?
AbstractSolver.__init__(self,dim,npop=NP)
self.genealogy = [ [] for j in range(NP)]
self.scale = 0.8
self.probability = 0.9
self.strategy = 'Best1Bin'
ftol = 5e-3
from mystic.termination import VTRChangeOverGeneration
self._termination = VTRChangeOverGeneration(ftol)
### XXX: OBSOLETED by wrap_bounds ###
# def _keepSolutionWithinRangeBoundary(self, base):
# """scale trialSolution to be between base value and range boundary"""
# if not self._useStrictRange:
# return
# min = self._strictMin
# max = self._strictMax
# import random
# for i in range(self.nDim):
# if base[i] < min[i] or base[i] > max[i]:
# self.trialSolution[i] = random.uniform(min[i],max[i])
# elif self.trialSolution[i] < min[i]:
# self.trialSolution[i] = random.uniform(min[i],base[i])
# elif self.trialSolution[i] > max[i]:
# self.trialSolution[i] = random.uniform(base[i],max[i])
# return
def UpdateGenealogyRecords(self, id, newchild):
"""
Override me for more refined behavior. Currently all changes
are logged.
"""
self.genealogy[id].append(newchild)
return
def SetConstraints(self, constraints):
"""apply a constraints function to the optimization
input::
- a constraints function of the form: xk' = constraints(xk),
where xk is the current parameter vector. Ideally, this function
is constructed so the parameter vector it passes to the cost function
will satisfy the desired (i.e. encoded) constraints."""
if not constraints:
self._constraints = lambda x: x
elif not callable(constraints):
raise TypeError, "'%s' is not a callable function" % constraints
else: #XXX: check for format: x' = constraints(x) ?
self._constraints = constraints
return # doesn't use wrap_nested
def _decorate_objective(self, cost, ExtraArgs=None):
"""decorate cost function with bounds, penalties, monitors, etc"""
#print ("@", cost, ExtraArgs, max)
raw = cost
if ExtraArgs is None: ExtraArgs = ()
self._fcalls, cost = wrap_function(cost, ExtraArgs, self._evalmon)
if self._useStrictRange:
indx = list(self.popEnergy).index(self.bestEnergy)
ngen = self.generations #XXX: no random if generations=0 ?
for i in range(self.nPop):
self.population[i] = self._clipGuessWithinRangeBoundary(self.population[i], (not ngen) or (i is indx))
cost = wrap_bounds(cost, self._strictMin, self._strictMax)
cost = wrap_penalty(cost, self._penalty)
if self._reducer:
#cost = reduced(*self._reducer)(cost) # was self._reducer = (f,bool)
cost = reduced(self._reducer, arraylike=True)(cost)
# hold on to the 'wrapped' and 'raw' cost function
self._cost = (cost, raw, ExtraArgs)
self._live = True
return cost
def _Step(self, cost=None, ExtraArgs=None, **kwds):
"""perform a single optimization iteration
Note that ExtraArgs should be a *tuple* of extra arguments"""
# process and activate input settings
settings = self._process_inputs(kwds)
for key in settings:
exec "%s = settings['%s']" % (key,key)
# HACK to enable not explicitly calling _decorate_objective
cost = self._bootstrap_objective(cost, ExtraArgs)
init = False # flag to do 0th iteration 'post-initialization'
if not len(self._stepmon): # do generation = 0
init = True
strategy = None
self.population[0] = asfarray(self.population[0])
# decouple bestSolution from population and bestEnergy from popEnergy
self.bestSolution = self.population[0]
self.bestEnergy = self.popEnergy[0]
for candidate in range(self.nPop):
if not len(self._stepmon):
# generate trialSolution (within valid range)
self.trialSolution[:] = self.population[candidate]
if strategy:
# generate trialSolution (within valid range)
strategy(self, candidate)
# apply constraints
self.trialSolution[:] = self._constraints(self.trialSolution)
# apply penalty
#trialEnergy = self._penalty(self.trialSolution)
# calculate cost
trialEnergy = cost(self.trialSolution)
# trialEnergy should be a scalar
if isiterable(trialEnergy) and len(trialEnergy) == 1:
trialEnergy = trialEnergy[0]
# for len(trialEnergy) > 1, will throw ValueError below
if trialEnergy < self.popEnergy[candidate]:
# New low for this candidate
self.popEnergy[candidate] = trialEnergy
self.population[candidate][:] = self.trialSolution
self.UpdateGenealogyRecords(candidate, self.trialSolution[:])
# Check if all-time low
if trialEnergy < self.bestEnergy:
self.bestEnergy = trialEnergy
self.bestSolution[:] = self.trialSolution
# log bestSolution and bestEnergy (includes penalty)
self._stepmon(self.bestSolution[:], self.bestEnergy, self.id)
# if savefrequency matches, then save state
self._AbstractSolver__save_state()
# do callback
if callback is not None: callback(self.bestSolution)
# initialize termination conditions, if needed
if init: self._termination(self) #XXX: at generation 0 or always?
return #XXX: call Terminated ?
def _process_inputs(self, kwds):
"""process and activate input settings"""
#allow for inputs that don't conform to AbstractSolver interface
#NOTE: not sticky: callback, disp
#NOTE: sticky: EvaluationMonitor, StepMonitor, penalty, constraints
#NOTE: sticky: strategy, CrossProbability, ScalingFactor
settings = super(DifferentialEvolutionSolver, self)._process_inputs(kwds)
from mystic import strategy
strategy = getattr(strategy,self.strategy,strategy.Best1Bin) #XXX: None?
settings.update({\
'strategy': strategy}) #mutation strategy (see mystic.strategy)
probability=self.probability #potential for parameter cross-mutation
scale=self.scale #multiplier for mutation impact
[settings.update({i:j}) for (i,j) in kwds.items() if i in settings]
word = 'CrossProbability'
self.probability = kwds[word] if word in kwds else probability
word = 'ScalingFactor'
self.scale = kwds[word] if word in kwds else scale
self.strategy = getattr(settings['strategy'],'__name__','Best1Bin')
return settings
def Solve(self, cost=None, termination=None, ExtraArgs=None, **kwds):
"""Minimize a function using differential evolution.
Description:
Uses a differential evolution algorithm to find the minimum of
a function of one or more variables.
Inputs:
cost -- the Python function or method to be minimized.
Additional Inputs:
termination -- callable object providing termination conditions.
ExtraArgs -- extra arguments for cost.
Further Inputs:
strategy -- the mutation strategy for generating new trial
solutions [default = Best1Bin]
CrossProbability -- the probability of cross-parameter mutations
[default = 0.9]
ScalingFactor -- multiplier for the impact of mutations on the
trial solution [default = 0.8]
sigint_callback -- callback function for signal handler.
callback -- an optional user-supplied function to call after each
iteration. It is called as callback(xk), where xk is
the current parameter vector. [default = None]
disp -- non-zero to print convergence messages.
"""
super(DifferentialEvolutionSolver, self).Solve(cost, termination,\
ExtraArgs, **kwds)
return
class DifferentialEvolutionSolver2(AbstractMapSolver):
"""
Differential Evolution optimization, using Storn and Price's algorithm.
Alternate implementation:
- utilizes a map-reduce interface, extensible to parallel computing
- both a current and a next generation are kept, while the current
generation is invariant during the main DE logic
"""
def __init__(self, dim, NP=4):
"""
Takes two initial inputs:
dim -- dimensionality of the problem
NP -- size of the trial solution population. [requires: NP >= 4]
All important class members are inherited from AbstractSolver.
"""
NP = max(NP, dim, 4) #XXX: raise Error if npop <= 4?
super(DifferentialEvolutionSolver2, self).__init__(dim, npop=NP)
self.genealogy = [ [] for j in range(NP)]
self.scale = 0.8
self.probability = 0.9
self.strategy = 'Best1Bin'
ftol = 5e-3
from mystic.termination import VTRChangeOverGeneration
self._termination = VTRChangeOverGeneration(ftol)
def UpdateGenealogyRecords(self, id, newchild):
"""
Override me for more refined behavior. Currently all changes
are logged.
"""
self.genealogy[id].append(newchild)
return
def SetConstraints(self, constraints):
"""apply a constraints function to the optimization
input::
- a constraints function of the form: xk' = constraints(xk),
where xk is the current parameter vector. Ideally, this function
is constructed so the parameter vector it passes to the cost function
will satisfy the desired (i.e. encoded) constraints."""
if not constraints:
self._constraints = lambda x: x
elif not callable(constraints):
raise TypeError, "'%s' is not a callable function" % constraints
else: #XXX: check for format: x' = constraints(x) ?
self._constraints = constraints
return # doesn't use wrap_nested
def _decorate_objective(self, cost, ExtraArgs=None):
"""decorate cost function with bounds, penalties, monitors, etc"""
#print ("@", cost, ExtraArgs, max)
raw = cost
if ExtraArgs is None: ExtraArgs = ()
from python_map import python_map
if self._map != python_map:
#FIXME: EvaluationMonitor fails for MPI, throws error for 'pp'
from mystic.monitors import Null
evalmon = Null()
else: evalmon = self._evalmon
fcalls, cost = wrap_function(cost, ExtraArgs, evalmon)
if self._useStrictRange:
indx = list(self.popEnergy).index(self.bestEnergy)
ngen = self.generations #XXX: no random if generations=0 ?
for i in range(self.nPop):
self.population[i] = self._clipGuessWithinRangeBoundary(self.population[i], (not ngen) or (i is indx))
cost = wrap_bounds(cost, self._strictMin, self._strictMax)
cost = wrap_penalty(cost, self._penalty)
if self._reducer:
#cost = reduced(*self._reducer)(cost) # was self._reducer = (f,bool)
cost = reduced(self._reducer, arraylike=True)(cost)
# hold on to the 'wrapped' and 'raw' cost function
self._cost = (cost, raw, ExtraArgs)
self._live = True
return cost
def _Step(self, cost=None, ExtraArgs=None, **kwds):
"""perform a single optimization iteration
Note that ExtraArgs should be a *tuple* of extra arguments"""
# process and activate input settings
settings = self._process_inputs(kwds)
for key in settings:
exec "%s = settings['%s']" % (key,key)
# HACK to enable not explicitly calling _decorate_objective
cost = self._bootstrap_objective(cost, ExtraArgs)
init = False # flag to do 0th iteration 'post-initialization'
if not len(self._stepmon): # do generation = 0
init = True
strategy = None
self.population[0] = asfarray(self.population[0])
# decouple bestSolution from population and bestEnergy from popEnergy
self.bestSolution = self.population[0]
self.bestEnergy = self.popEnergy[0]
for candidate in range(self.nPop):
if not len(self._stepmon):
# generate trialSolution (within valid range)
self.trialSolution[candidate][:] = self.population[candidate]
if strategy:
# generate trialSolution (within valid range)
strategy(self, candidate)
# apply constraints
self.trialSolution[candidate][:] = self._constraints(self.trialSolution[candidate])
# bind constraints to cost #XXX: apparently imposes constraints poorly
#concost = wrap_nested(cost, self._constraints)
# apply penalty
#trialEnergy = map(self._penalty, self.trialSolution)#,**self._mapconfig)
# calculate cost
trialEnergy = self._map(cost, self.trialSolution, **self._mapconfig)
self._fcalls[0] += len(self.trialSolution) #FIXME: manually increment
# each trialEnergy should be a scalar
if isiterable(trialEnergy[0]) and len(trialEnergy[0]) == 1:
trialEnergy = ravel(trialEnergy)
# for len(trialEnergy) > 1, will throw ValueError below
for candidate in range(self.nPop):
if trialEnergy[candidate] < self.popEnergy[candidate]:
# New low for this candidate
self.popEnergy[candidate] = trialEnergy[candidate]
self.population[candidate][:] = self.trialSolution[candidate]
self.UpdateGenealogyRecords(candidate, self.trialSolution[candidate][:])
# Check if all-time low
if trialEnergy[candidate] < self.bestEnergy:
self.bestEnergy = trialEnergy[candidate]
self.bestSolution[:] = self.trialSolution[candidate]
# log bestSolution and bestEnergy (includes penalty)
#FIXME: StepMonitor works for 'pp'?
self._stepmon(self.bestSolution[:], self.bestEnergy, self.id)
# if savefrequency matches, then save state
self._AbstractSolver__save_state()
# do callback
if callback is not None: callback(self.bestSolution)
# initialize termination conditions, if needed
if init: self._termination(self) #XXX: at generation 0 or always?
return #XXX: call Terminated ?
def _process_inputs(self, kwds):
"""process and activate input settings"""
#allow for inputs that don't conform to AbstractSolver interface
#NOTE: not sticky: callback, disp
#NOTE: sticky: EvaluationMonitor, StepMonitor, penalty, constraints
#NOTE: sticky: strategy, CrossProbability, ScalingFactor
settings = super(DifferentialEvolutionSolver2, self)._process_inputs(kwds)
from mystic import strategy
strategy = getattr(strategy,self.strategy,strategy.Best1Bin) #XXX: None?
settings.update({\
'strategy': strategy}) #mutation strategy (see mystic.strategy)
probability=self.probability #potential for parameter cross-mutation
scale=self.scale #multiplier for mutation impact
[settings.update({i:j}) for (i,j) in kwds.items() if i in settings]
word = 'CrossProbability'
self.probability = kwds[word] if word in kwds else probability
word = 'ScalingFactor'
self.scale = kwds[word] if word in kwds else scale
self.strategy = getattr(settings['strategy'],'__name__','Best1Bin')
return settings
def Solve(self, cost=None, termination=None, ExtraArgs=None, **kwds):
"""Minimize a function using differential evolution.
Description:
Uses a differential evolution algorithm to find the minimum of
a function of one or more variables. This implementation holds
the current generation invariant until the end of each iteration.
Inputs:
cost -- the Python function or method to be minimized.
Additional Inputs:
termination -- callable object providing termination conditions.
ExtraArgs -- extra arguments for cost.
Further Inputs:
strategy -- the mutation strategy for generating new trial
solutions [default = Best1Bin]
CrossProbability -- the probability of cross-parameter mutations
[default = 0.9]
ScalingFactor -- multiplier for the impact of mutations on the
trial solution [default = 0.8]
sigint_callback -- callback function for signal handler.
callback -- an optional user-supplied function to call after each
iteration. It is called as callback(xk), where xk is
the current parameter vector. [default = None]
disp -- non-zero to print convergence messages.
"""
super(DifferentialEvolutionSolver2, self).Solve(cost, termination,\
ExtraArgs, **kwds)
return
def diffev2(cost,x0,npop=4,args=(),bounds=None,ftol=5e-3,gtol=None,
maxiter=None,maxfun=None,cross=0.9,scale=0.8,
full_output=0,disp=1,retall=0,callback=None,**kwds):
"""Minimize a function using Storn & Price's differential evolution.
Description:
Uses Storn & Prices's differential evolution algorithm to find the minimum
of a function of one or more variables. Mimics a scipy.optimize style
interface.
Inputs:
cost -- the Python function or method to be minimized.
x0 -- the initial guess (ndarray), if desired to start from a
set point; otherwise takes an array of (min,max) bounds,
for when random initial points are desired
npop -- size of the trial solution population.
Additional Inputs:
args -- extra arguments for cost.
bounds -- list - n pairs of bounds (min,max), one pair for each parameter.
ftol -- number - acceptable relative error in cost(xopt) for convergence.
gtol -- number - maximum number of iterations to run without improvement.
maxiter -- number - the maximum number of iterations to perform.
maxfun -- number - the maximum number of function evaluations.
cross -- number - the probability of cross-parameter mutations
scale -- number - multiplier for impact of mutations on trial solution.
full_output -- number - non-zero if fval and warnflag outputs are desired.
disp -- number - non-zero to print convergence messages.
retall -- number - non-zero to return list of solutions at each iteration.
callback -- an optional user-supplied function to call after each
iteration. It is called as callback(xk), where xk is the
current parameter vector.
handler -- boolean - enable/disable handling of interrupt signal.
strategy -- strategy - override the default mutation strategy.
itermon -- monitor - override the default GenerationMonitor.
evalmon -- monitor - override the default EvaluationMonitor.
constraints -- an optional user-supplied function. It is called as
constraints(xk), where xk is the current parameter vector.
This function must return xk', a parameter vector that satisfies
the encoded constraints.
penalty -- an optional user-supplied function. It is called as
penalty(xk), where xk is the current parameter vector.
This function should return y', with y' == 0 when the encoded
constraints are satisfied, and y' > 0 otherwise.
Returns: (xopt, {fopt, iter, funcalls, warnflag}, {allvecs})
xopt -- ndarray - minimizer of function
fopt -- number - value of function at minimum: fopt = cost(xopt)
iter -- number - number of iterations
funcalls -- number - number of function calls
warnflag -- number - Integer warning flag:
1 : 'Maximum number of function evaluations.'
2 : 'Maximum number of iterations.'
allvecs -- list - a list of solutions at each iteration
"""
invariant_current = kwds['invariant_current'] if 'invariant_current' in kwds else True
kwds['invariant_current'] = invariant_current
return diffev(cost,x0,npop,args=args,bounds=bounds,ftol=ftol,gtol=gtol,
maxiter=maxiter,maxfun=maxfun,cross=cross,scale=scale,
full_output=full_output,disp=disp,retall=retall,
callback=callback,**kwds)
def diffev(cost,x0,npop=4,args=(),bounds=None,ftol=5e-3,gtol=None,
maxiter=None,maxfun=None,cross=0.9,scale=0.8,
full_output=0,disp=1,retall=0,callback=None,**kwds):
"""Minimize a function using differential evolution.
Description:
Uses a differential evolution algorithm to find the minimum of
a function of one or more variables. Mimics a scipy.optimize style
interface.
Inputs:
cost -- the Python function or method to be minimized.
x0 -- the initial guess (ndarray), if desired to start from a
set point; otherwise takes an array of (min,max) bounds,
for when random initial points are desired
npop -- size of the trial solution population.
Additional Inputs:
args -- extra arguments for cost.
bounds -- list - n pairs of bounds (min,max), one pair for each parameter.
ftol -- number - acceptable relative error in cost(xopt) for convergence.
gtol -- number - maximum number of iterations to run without improvement.
maxiter -- number - the maximum number of iterations to perform.
maxfun -- number - the maximum number of function evaluations.
cross -- number - the probability of cross-parameter mutations
scale -- number - multiplier for impact of mutations on trial solution.
full_output -- number - non-zero if fval and warnflag outputs are desired.
disp -- number - non-zero to print convergence messages.
retall -- number - non-zero to return list of solutions at each iteration.
callback -- an optional user-supplied function to call after each
iteration. It is called as callback(xk), where xk is the
current parameter vector.
handler -- boolean - enable/disable handling of interrupt signal.
strategy -- strategy - override the default mutation strategy.
itermon -- monitor - override the default GenerationMonitor.
evalmon -- monitor - override the default EvaluationMonitor.
constraints -- an optional user-supplied function. It is called as
constraints(xk), where xk is the current parameter vector.
This function must return xk', a parameter vector that satisfies
the encoded constraints.
penalty -- an optional user-supplied function. It is called as
penalty(xk), where xk is the current parameter vector.
This function should return y', with y' == 0 when the encoded
constraints are satisfied, and y' > 0 otherwise.
Returns: (xopt, {fopt, iter, funcalls, warnflag}, {allvecs})
xopt -- ndarray - minimizer of function
fopt -- number - value of function at minimum: fopt = cost(xopt)
iter -- number - number of iterations
funcalls -- number - number of function calls
warnflag -- number - Integer warning flag:
1 : 'Maximum number of function evaluations.'
2 : 'Maximum number of iterations.'
allvecs -- list - a list of solutions at each iteration
"""
invariant_current = kwds['invariant_current'] if 'invariant_current' in kwds else False
handler = kwds['handler'] if 'handler' in kwds else False
from mystic.strategy import Best1Bin
strategy = kwds['strategy'] if 'strategy' in kwds else Best1Bin
from mystic.monitors import Monitor
stepmon = kwds['itermon'] if 'itermon' in kwds else Monitor()
evalmon = kwds['evalmon'] if 'evalmon' in kwds else Monitor()
if gtol: #if number of generations provided, use ChangeOverGeneration
from mystic.termination import ChangeOverGeneration
termination = ChangeOverGeneration(ftol,gtol)
else:
from mystic.termination import VTRChangeOverGeneration
termination = VTRChangeOverGeneration(ftol)
ND = len(x0)
if invariant_current: #use Solver2, not Solver1
solver = DifferentialEvolutionSolver2(ND,npop)
else:
solver = DifferentialEvolutionSolver(ND,npop)
solver.SetEvaluationLimits(maxiter,maxfun)
solver.SetEvaluationMonitor(evalmon)
solver.SetGenerationMonitor(stepmon)
if 'penalty' in kwds:
solver.SetPenalty(kwds['penalty'])
if 'constraints' in kwds:
solver.SetConstraints(kwds['constraints'])
if bounds is not None:
minb,maxb = unpair(bounds)
solver.SetStrictRanges(minb,maxb)
try: #x0 passed as 1D array of (min,max) pairs
minb,maxb = unpair(x0)
solver.SetRandomInitialPoints(minb,maxb)
except: #x0 passed as 1D array of initial parameter values
solver.SetInitialPoints(x0)
if handler: solver.enable_signal_handler()
#TODO: allow sigint_callbacks for all minimal interfaces ?
solver.Solve(cost, termination=termination, strategy=strategy, \
#sigint_callback=other_callback,\
CrossProbability=cross, ScalingFactor=scale, \
ExtraArgs=args, callback=callback)
solution = solver.Solution()
# code below here pushes output to scipy.optimize.fmin interface
#x = list(solver.bestSolution)
x = solver.bestSolution
fval = solver.bestEnergy
warnflag = 0
fcalls = solver.evaluations
iterations = solver.generations
allvecs = stepmon.x
if fcalls >= solver._maxfun:
warnflag = 1
if disp:
print "Warning: Maximum number of function evaluations has "\
"been exceeded."
elif iterations >= solver._maxiter:
warnflag = 2
if disp:
print "Warning: Maximum number of iterations has been exceeded"
else:
if disp:
print "Optimization terminated successfully."
print " Current function value: %f" % fval
print " Iterations: %d" % iterations
print " Function evaluations: %d" % fcalls
if full_output:
retlist = x, fval, iterations, fcalls, warnflag
if retall:
retlist += (allvecs,)
else:
retlist = x
if retall:
retlist = (x, allvecs)
return retlist
if __name__=='__main__':
help(__name__)
# end of file
| |
# Copyright (C) 2018, HTCondor Team, Computer Sciences Department,
# University of Wisconsin-Madison, WI.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import math
import os
import time
from typing import Any
import htcondor
from toil.batchSystems.abstractGridEngineBatchSystem import AbstractGridEngineBatchSystem
logger = logging.getLogger(__name__)
class HTCondorBatchSystem(AbstractGridEngineBatchSystem):
# When using HTCondor, the Schedd handles scheduling
class Worker(AbstractGridEngineBatchSystem.Worker):
# Override the createJobs method so that we can use htcondor.Submit objects
# and so that we can get disk allocation requests and ceil the CPU request.
def createJobs(self, newJob: Any) -> bool:
activity = False
if newJob is not None:
self.waitingJobs.append(newJob)
# Queue jobs as necessary:
while len(self.waitingJobs) > 0 and len(self.runningJobs) < int(self.boss.config.maxLocalJobs):
activity = True
jobID, cpu, memory, disk, jobName, command = self.waitingJobs.pop(0)
# Prepare the htcondor.Submit object
submitObj: htcondor.Submit = self.prepareHTSubmission(cpu, memory, disk, jobID, jobName, command)
logger.debug("Submitting %r", submitObj)
# Submit job and get batch system ID (i.e. the ClusterId)
batchJobID = self.submitJob(submitObj)
logger.debug("Submitted job %s", str(batchJobID))
# Store dict for mapping Toil job ID to batch job ID
# TODO: Note that this currently stores a tuple of (batch system
# ID, Task), but the second value is None by default and doesn't
# seem to be used
self.batchJobIDs[jobID] = (batchJobID, None)
# Add to queue of queued ("running") jobs
self.runningJobs.add(jobID)
return activity
def prepareHTSubmission(self, cpu: int, memory: int, disk: int, jobID: int, jobName: str, command: str) -> htcondor.Submit:
# Convert resource requests
cpu = int(math.ceil(cpu)) # integer CPUs
ht_memory = float(memory)/1024 # memory in KB
ht_disk = float(disk)/1024 # disk in KB
# NOTE: formatStdOutErrPath() puts files in the Toil workflow directory, which defaults
# to being in the system temporary directory ($TMPDIR, /tmp) which is unlikely to be on
# a shared filesystem. So to make this work we need to set should_transfer_files = Yes
# in the submit file, so that HTCondor will write the standard output/error files on the
# compute node, then transfer back once the job has completed.
stdoutfile: str = self.boss.formatStdOutErrPath(jobID, '$(cluster)', 'out')
stderrfile: str = self.boss.formatStdOutErrPath(jobID, '$(cluster)', 'err')
condorlogfile: str = self.boss.formatStdOutErrPath(jobID, '$(cluster)', 'events')
# Execute the entire command as /bin/sh -c "command"
# TODO: Transfer the jobStore directory if using a local file store with a relative path.
submit_parameters = {
'executable': '/bin/sh',
'transfer_executable': 'False',
'arguments': '''"-c '{0}'"'''.format(command).encode('utf-8'), # Workaround for HTCondor Python bindings Unicode conversion bug
'environment': self.getEnvString(),
'getenv': 'True',
'should_transfer_files': 'Yes', # See note above for stdoutfile, stderrfile
'output': stdoutfile,
'error': stderrfile,
'log': condorlogfile,
'request_cpus': '{0}'.format(cpu),
'request_memory': '{0:.3f}KB'.format(ht_memory),
'request_disk': '{0:.3f}KB'.format(ht_disk),
'leave_in_queue': '(JobStatus == 4)',
'+IsToilJob': 'True',
'+ToilJobID': '{0}'.format(jobID),
'+ToilJobName': '"{0}"'.format(jobName),
'+ToilJobKilled': 'False',
}
# Extra parameters for HTCondor
extra_parameters = os.getenv('TOIL_HTCONDOR_PARAMS')
if extra_parameters is not None:
logger.debug("Extra HTCondor parameters added to submit file from TOIL_HTCONDOR_PARAMS env. variable: {}".format(extra_parameters))
for parameter, value in [parameter_value.split('=', 1) for parameter_value in extra_parameters.split(';')]:
parameter = parameter.strip()
value = value.strip()
if parameter in submit_parameters:
raise ValueError("Some extra parameters are incompatible: {}".format(extra_parameters))
submit_parameters[parameter] = value
# Return the Submit object
return htcondor.Submit(submit_parameters)
def submitJob(self, submitObj):
# Queue the job using a Schedd transaction
schedd = self.connectSchedd()
with schedd.transaction() as txn:
batchJobID = submitObj.queue(txn)
# Return the ClusterId
return batchJobID
def getRunningJobIDs(self):
# Get all Toil jobs that are running
requirements = '(JobStatus == 2) && (IsToilJob)'
projection = ['ClusterId', 'ToilJobID', 'EnteredCurrentStatus']
schedd = self.connectSchedd()
ads = schedd.xquery(requirements = requirements,
projection = projection)
# Only consider the Toil jobs that are part of this workflow
batchJobIDs = [batchJobID for (batchJobID, task) in self.batchJobIDs.values()]
job_runtimes = {}
for ad in ads:
batchJobID = int(ad['ClusterId'])
jobID = int(ad['ToilJobID'])
if not (batchJobID in batchJobIDs):
continue
# HTCondor stores the start of the runtime as a Unix timestamp
runtime = time.time() - ad['EnteredCurrentStatus']
job_runtimes[jobID] = runtime
return job_runtimes
def killJob(self, jobID):
batchJobID = self.batchJobIDs[jobID][0]
logger.debug("Killing HTCondor job {0}".format(batchJobID))
# Set the job to be killed when its exit status is checked
schedd = self.connectSchedd()
job_spec = '(ClusterId == {0})'.format(batchJobID)
schedd.edit(job_spec, 'ToilJobKilled', 'True')
def getJobExitCode(self, batchJobID):
logger.debug("Getting exit code for HTCondor job {0}".format(batchJobID))
status = {
1: 'Idle',
2: 'Running',
3: 'Removed',
4: 'Completed',
5: 'Held',
6: 'Transferring Output',
7: 'Suspended'
}
requirements = '(ClusterId == {0})'.format(batchJobID)
projection = ['JobStatus', 'ToilJobKilled', 'ExitCode',
'HoldReason', 'HoldReasonSubCode']
schedd = self.connectSchedd()
ads = schedd.xquery(requirements = requirements, projection = projection)
# Make sure a ClassAd was returned
try:
try:
ad = next(ads)
except TypeError:
ad = ads.next()
except StopIteration:
logger.error(
"No HTCondor ads returned using constraint: {0}".format(requirements))
raise
# Make sure only one ClassAd was returned
try:
try:
next(ads)
except TypeError:
ads.next()
except StopIteration:
pass
else:
logger.warning(
"Multiple HTCondor ads returned using constraint: {0}".format(requirements))
if ad['ToilJobKilled']:
logger.debug("HTCondor job {0} was killed by Toil".format(batchJobID))
# Remove the job from the Schedd and return 1
job_spec = 'ClusterId == {0}'.format(batchJobID)
schedd.act(htcondor.JobAction.Remove, job_spec)
return 1
elif status[ad['JobStatus']] == 'Completed':
logger.debug("HTCondor job {0} completed with exit code {1}".format(
batchJobID, ad['ExitCode']))
# Remove the job from the Schedd and return its exit code
job_spec = 'ClusterId == {0}'.format(batchJobID)
schedd.act(htcondor.JobAction.Remove, job_spec)
return int(ad['ExitCode'])
elif status[ad['JobStatus']] == 'Held':
logger.error("HTCondor job {0} was held: '{1} (sub code {2})'".format(
batchJobID, ad['HoldReason'], ad['HoldReasonSubCode']))
# Remove the job from the Schedd and return 1
job_spec = 'ClusterId == {0}'.format(batchJobID)
schedd.act(htcondor.JobAction.Remove, job_spec)
return 1
else: # Job still running or idle or doing something else
logger.debug("HTCondor job {0} has not completed (Status: {1})".format(
batchJobID, status[ad['JobStatus']]))
return None
"""
Implementation-specific helper methods
"""
def connectSchedd(self):
'''Connect to HTCondor Schedd and return a Schedd object'''
condor_host = os.getenv('TOIL_HTCONDOR_COLLECTOR')
schedd_name = os.getenv('TOIL_HTCONDOR_SCHEDD')
# If TOIL_HTCONDOR_ variables are set, use them to find the Schedd
if condor_host and schedd_name:
logger.debug(
"Connecting to HTCondor Schedd {0} using Collector at {1}".format(
schedd_name, condor_host))
try:
schedd_ad = htcondor.Collector(condor_host).locate(
htcondor.DaemonTypes.Schedd, schedd_name)
except IOError:
logger.error(
"Could not connect to HTCondor Collector at {0}".format(condor_host))
raise
except ValueError:
logger.error(
"Could not find HTCondor Schedd with name {0}".format(schedd_name))
raise
else:
schedd = htcondor.Schedd(schedd_ad)
# Otherwise assume the Schedd is on the local machine
else:
logger.debug("Connecting to HTCondor Schedd on local machine")
schedd = htcondor.Schedd()
# Ping the Schedd to make sure it's there and responding
try:
schedd.xquery(limit = 0)
except RuntimeError:
logger.error("Could not connect to HTCondor Schedd")
raise
return schedd
def getEnvString(self):
'''Build an environment string that a HTCondor Submit object can use.
For examples of valid strings, see:
http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html#man-condor-submit-environment
'''
env_items = []
if self.boss.environment:
for key, value in self.boss.environment.items():
# Each variable should be in the form of <key>='<value>'
env_string = key + "="
# The entire value should be encapsulated in single quotes
# Quote marks (single or double) that are part of the value should be duplicated
env_string += "'" + value.replace("'", "''").replace('"', '""') + "'"
env_items.append(env_string)
# The entire string should be encapsulated in double quotes
# Each variable should be separated by a single space
return '"' + ' '.join(env_items) + '"'
# Override the issueBatchJob method so HTCondor can be given the disk request
def issueBatchJob(self, jobNode):
# Avoid submitting internal jobs to the batch queue, handle locally
localID = self.handleLocalJob(jobNode)
if localID:
return localID
else:
self.checkResourceRequest(jobNode.memory, jobNode.cores, jobNode.disk)
jobID = self.getNextJobID()
self.currentJobs.add(jobID)
# Add the jobNode.disk and jobNode.jobName to the job tuple
self.newJobsQueue.put((jobID, jobNode.cores, jobNode.memory, jobNode.disk, jobNode.jobName, jobNode.command))
logger.debug("Issued the job command: %s with job id: %s ", jobNode.command, str(jobID))
return jobID
| |
#! /usr/bin/env python
from __future__ import print_function
import os
import re
import argparse
import logging
from collections import defaultdict
buffer_size = 1000 # How many IGTs to cache before writing to the file
default_split_key = '_ungrouped_'
### READING ODIN TEXT ##################################################
doc_re = re.compile(r'doc_id=(?P<doc_id>\S+) '
r'(?:igt_id=(?P<igt_id>\S+) )?'
r'(?P<linerange>\d+ \d+) '
r'(?P<linetypes>.*)')
def odin_blocks(lines):
line_iterator = iter(lines)
for line in line_iterator:
doc = doc_re.match(line)
if doc is None:
if 'doc_id=' in line:
logging.warning('Possible ODIN instance missed: {}'
.format(line))
continue
header_lines = []
lang = None
iso639 = None
odin_lines = []
try:
while line.strip() != '' and not line.startswith('line='):
header_lines.append(line.rstrip())
line = next(line_iterator)
lang, iso639 = get_best_lang_match(header_lines)
log_comments(
doc.group('doc_id'), doc.group('linerange'),
header_lines
)
if lang is None or iso639 is None:
logging.warning('Failed to get language or language code for '
'document {}, lines {}.'
.format(doc.group('doc_id'),
doc.group('linerange')))
else:
logging.debug('Document {}, lines {}, Language: {}, '
'ISO-639-3: {}'
.format(doc.group('doc_id'),
doc.group('linerange'),
lang, iso639))
while line.strip() != '':
odin_lines.append(odin_line(line))
line = next(line_iterator)
except StopIteration:
pass
finally:
yield {
'doc_id': doc.group('doc_id'),
'igt_id': doc.group('igt_id'),
'line_range': doc.group('linerange'),
'line_types': doc.group('linetypes'),
'language': lang,
'iso-639-3': iso639,
'lines': odin_lines,
'header_lines': header_lines
}
lang_chosen_re = re.compile(r'(?P<name>.*) \((?P<iso639>[^)]+)\)\s*$',
re.UNICODE)
stage2_LN_re = re.compile(r'stage2_LN_lang_code: (?P<name>.*) '
r'\([^,]+, (?P<iso639>[^)]+)\)')
chosen_idx_re = re.compile(r'lang_chosen_idx=(?P<idx>[-0-9]+)')
def get_best_lang_match(lines):
lang_lines = dict(l.split(':', 1) for l in lines if ':' in l)
# find best match
match = None
for key in ('language', 'stage3_lang_chosen', 'stage2_lang_chosen'):
if key in lang_lines:
match = lang_chosen_re.search(lang_lines[key])
if match:
break
if match is None:
if 'stage2_LN_lang_code' in lang_lines:
first = lang_lines['stage2_LN_lang_code'].split('||', 1)[0]
match = stage2_LN_re.match(first)
elif 'lang_code' in lang_lines and \
'lang_chosen_idx' in lang_lines['note']:
prematch = chosen_idx_re.search(lang_lines['note'])
if prematch:
idx = int(prematch.group('idx'))
if idx != -1:
langstring = lang_lines['lang_code'].split('||')[idx]
match = lang_chosen_re.match(langstring)
if match:
return (match.group('name').strip().title(),
match.group('iso639').strip().lower())
else:
return ('(Undetermined)', 'und')
def log_comments(doc_id, linerange, lines):
comment_keys = ('comments', 'stage2_comment', 'not_an_IGT')
comments = [
line for line in lines
if ':' in line and line.split(':', 1)[0] in comment_keys
]
if comments:
logging.info(
'doc_id={} lines={} has annotator comments:\n'
.format(doc_id, linerange) +
' ' + '\n '.join(comments)
)
line_re = re.compile(r'line=(?P<line>\d+) tag=(?P<tag>[^:]+):(?P<content>.*)')
def odin_line(line):
match = line_re.match(line)
if match:
return {
'line': match.group('line'),
'tag': match.group('tag'),
'content': match.group('content')
}
else:
logging.warning('Non-empty IGT line could not be parsed:\n{}'
.format(line))
return {}
## ============================================= ##
## For running as a script rather than a library ##
## ============================================= ##
def main(arglist=None):
class HelpFormatter(argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
pass
parser = argparse.ArgumentParser(
formatter_class=HelpFormatter,
description="Read ODIN text data and output",
epilog='examples:\n'
' odintxt.py --assign-igt-ids by-doc-id original/*.txt\n'
' odintxt.py --split-by=language by-lang by-doc-id/*.txt'
)
parser.add_argument('-v', '--verbose',
action='count', dest='verbosity', default=2,
help='increase the verbosity (can be repeated: -vvv)'
)
# parser.add_argument('-M', '--file-meta',
# choices=('keep', 'discard'), default='discard',
# help='how to handle file-level metadata'
# )
parser.add_argument('-m', '--igt-meta',
choices=('keep', 'discard'), default='discard',
help='how to handle igt-level metadata'
)
parser.add_argument('--assign-igt-ids',
action='store_true',
help='assign unique IDs to each IGT'
)
parser.add_argument('--first-id',
metavar='N', type=int, default=1,
help='the index of the first ID'
)
parser.add_argument('-s', '--split-by',
choices=('doc_id', 'iso-639-3'), default='doc_id',
help='group IGTs by their doc_id|language'
)
parser.add_argument('outdir', help='the directory for output files')
parser.add_argument('infiles',
nargs='*',
help='the ODIN text files to read (if none, read from stdin)'
)
args = parser.parse_args(arglist)
args.file_meta = 'discard' # remove if --file-meta is enabled above
logging.basicConfig(level=50-(args.verbosity*10))
run(args)
class _BufferedIGTWriter(object):
"""
Buffer grouped IGTs so we don't open the file to write every time.
"""
def __init__(self, outdir):
self.outdir = outdir
self.cache = defaultdict(list)
def write(self, key, igt):
self.cache[key].append(igt)
if len(self.cache[key]) >= buffer_size:
self.flush(key)
def flush(self, key=None):
if key is None:
keys = list(self.cache.keys())
else:
keys = [key]
for key in keys:
path = key.replace(':', '-') + '.txt'
with open(os.path.join(self.outdir, path), 'a') as f:
for igt in self.cache[key]:
print(format_odin_igt(igt), file=f, end='\n\n')
del self.cache[key]
def format_odin_igt(igt):
# now choose top line based on existence of igt_id field
if 'igt_id' in igt:
top = 'doc_id={doc_id} igt_id={igt_id} {line_range} {line_types}'
else:
top = 'doc_id={doc_id} {line_range} {line_types}'
lang_line = 'language: {language} ({iso-639-3})'
line = 'line={line} tag={tag}:{content}'
lines = [top.format(**igt), lang_line.format(**igt)]
for hl in igt['header_lines']:
if not hl.startswith('language:'): # don't put language line twice
lines.append(hl)
for linedata in igt['lines']:
lines.append(line.format(**linedata))
return '\n'.join(lines)
def run(args):
if not os.path.exists(args.outdir):
os.mkdir(args.outdir) # raises OSError, e.g., if dir exists
writer = _BufferedIGTWriter(args.outdir)
# either go through all files or just read from stdin
if args.infiles:
for fn in args.infiles:
with open(fn, 'r') as f:
process(f, writer, args)
else:
process(sys.stdin, writer, args)
writer.flush()
def process(f, writer, args):
for i, igt in enumerate(odin_blocks(f)):
if args.assign_igt_ids:
igt['igt_id'] = 'igt{}-{}'.format(
igt['doc_id'],
args.first_id + i
)
if args.igt_meta == 'discard':
igt['header_lines'] = []
key = igt.get(args.split_by, default_split_key)
writer.write(key, igt)
if __name__ == '__main__':
main()
| |
# Natural Language Toolkit: Semantic Interpretation
#
# Author: Ewan Klein <ewan@inf.ed.ac.uk>
#
# Copyright (C) 2001-2011 NLTK Project
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
Utility functions for batch-processing sentences: parsing and
extraction of the semantic representation of the root node of the the
syntax tree, followed by evaluation of the semantic representation in
a first-order model.
"""
import evaluate
import re
import nltk
from nltk.sem.logic import *
##############################################################
## Utility functions for connecting parse output to semantics
##############################################################
def batch_parse(inputs, grammar, trace=0):
"""
Convert input sentences into syntactic trees.
@parameter inputs: sentences to be parsed
@type inputs: C{list} of C{str}
@parameter grammar: L{FeatureGrammar} or name of feature-based grammar
@rtype: C{dict}
@return: a mapping from input sentences to a list of L{Tree}s
"""
if isinstance(grammar, nltk.grammar.FeatureGrammar):
cp = nltk.parse.FeatureChartParser(grammar)
else:
cp = nltk.parse.load_parser(grammar, trace=trace)
parses = []
for sent in inputs:
tokens = sent.split() # use a tokenizer?
syntrees = cp.nbest_parse(tokens)
parses.append(syntrees)
return parses
def root_semrep(syntree, semkey='SEM'):
"""
Find the semantic representation at the root of a tree.
@parameter syntree: a parse L{Tree}
@parameter semkey: the feature label to use for the root semantics in the tree
@return: the semantic representation at the root of a L{Tree}
@rtype: L{logic.Expression}
"""
node = syntree.node
assert isinstance(node, nltk.grammar.FeatStructNonterminal)
try:
return node[semkey]
except KeyError:
print node,
print "has no specification for the feature %s" % semkey
raise
def batch_interpret(inputs, grammar, semkey='SEM', trace=0):
"""
Add the semantic representation to each syntactic parse tree
of each input sentence.
@parameter inputs: a list of sentences
@parameter grammar: L{FeatureGrammar} or name of feature-based grammar
@return: a mapping from sentences to lists of pairs (parse-tree, semantic-representations)
@rtype: C{dict}
"""
return [[(syn, root_semrep(syn, semkey)) for syn in syntrees]
for syntrees in batch_parse(inputs, grammar, trace=trace)]
def batch_evaluate(inputs, grammar, model, assignment, trace=0):
"""
Add the truth-in-a-model value to each semantic representation
for each syntactic parse of each input sentences.
@parameter inputs: a list of sentences
@parameter grammar: L{FeatureGrammar} or name of feature-based grammar
@return: a mapping from sentences to lists of triples (parse-tree, semantic-representations, evaluation-in-model)
@rtype: C{dict}
"""
return [[(syn, sem, model.evaluate(str(sem), assignment, trace=trace))
for (syn, sem) in interpretations]
for interpretations in batch_interpret(inputs, grammar)]
##########################################
# REs used by the parse_valuation function
##########################################
_VAL_SPLIT_RE = re.compile(r'\s*=+>\s*')
_ELEMENT_SPLIT_RE = re.compile(r'\s*,\s*')
_TUPLES_RE = re.compile(r"""\s*
(\([^)]+\)) # tuple-expression
\s*""", re.VERBOSE)
def parse_valuation_line(s):
"""
Parse a line in a valuation file.
Lines are expected to be of the form::
noosa => n
girl => {g1, g2}
chase => {(b1, g1), (b2, g1), (g1, d1), (g2, d2)}
@parameter s: input line
@type s: C{str}
@return: a pair (symbol, value)
@rtype: C{tuple}
"""
pieces = _VAL_SPLIT_RE.split(s)
symbol = pieces[0]
value = pieces[1]
# check whether the value is meant to be a set
if value.startswith('{'):
value = value[1:-1]
tuple_strings = _TUPLES_RE.findall(value)
# are the set elements tuples?
if tuple_strings:
set_elements = []
for ts in tuple_strings:
ts = ts[1:-1]
element = tuple(_ELEMENT_SPLIT_RE.split(ts))
set_elements.append(element)
else:
set_elements = _ELEMENT_SPLIT_RE.split(value)
value = set(set_elements)
return symbol, value
def parse_valuation(s):
"""
Convert a valuation file into a valuation.
@parameter s: the contents of a valuation file
@type s: C{str}
@return: a L{nltk.sem} valuation
@rtype: L{Valuation}
"""
statements = []
for linenum, line in enumerate(s.splitlines()):
line = line.strip()
if line.startswith('#') or line=='': continue
try: statements.append(parse_valuation_line(line))
except ValueError:
raise ValueError, 'Unable to parse line %s: %s' % (linenum, line)
val = evaluate.Valuation(statements)
return val
def parse_logic(s, logic_parser=None):
"""
Convert a file of First Order Formulas into a list of {Expression}s.
@param s: the contents of the file
@type s: C{str}
@param logic_parser: The parser to be used to parse the logical expression
@type logic_parser: C{LogicParser}
@return: a list of parsed formulas.
@rtype: C{list} of L{Expression}
"""
if logic_parser is None:
logic_parser = LogicParser()
statements = []
for linenum, line in enumerate(s.splitlines()):
line = line.strip()
if line.startswith('#') or line=='': continue
try:
statements.append(logic_parser.parse(line))
except ParseException:
raise ValueError, 'Unable to parse line %s: %s' % (linenum, line)
return statements
def skolemize(expression, univ_scope=None, used_variables=None):
"""
Skolemize the expression and convert to conjunctive normal form (CNF)
"""
if univ_scope is None:
univ_scope = set()
if used_variables is None:
used_variables = set()
if isinstance(expression, AllExpression):
term = skolemize(expression.term, univ_scope|set([expression.variable]), used_variables|set([expression.variable]))
return term.replace(expression.variable, VariableExpression(unique_variable(ignore=used_variables)))
elif isinstance(expression, AndExpression):
return skolemize(expression.first, univ_scope, used_variables) &\
skolemize(expression.second, univ_scope, used_variables)
elif isinstance(expression, OrExpression):
return to_cnf(skolemize(expression.first, univ_scope, used_variables),
skolemize(expression.second, univ_scope, used_variables))
elif isinstance(expression, ImpExpression):
return to_cnf(skolemize(-expression.first, univ_scope, used_variables),
skolemize(expression.second, univ_scope, used_variables))
elif isinstance(expression, IffExpression):
return to_cnf(skolemize(-expression.first, univ_scope, used_variables),
skolemize(expression.second, univ_scope, used_variables)) &\
to_cnf(skolemize(expression.first, univ_scope, used_variables),
skolemize(-expression.second, univ_scope, used_variables))
elif isinstance(expression, EqualityExpression):
return expression
elif isinstance(expression, NegatedExpression):
negated = expression.term
if isinstance(negated, AllExpression):
term = skolemize(-negated.term, univ_scope, used_variables|set([negated.variable]))
if univ_scope:
return term.replace(negated.variable, skolem_function(univ_scope))
else:
skolem_constant = VariableExpression(unique_variable(ignore=used_variables))
return term.replace(negated.variable, skolem_constant)
elif isinstance(negated, AndExpression):
return to_cnf(skolemize(-negated.first, univ_scope, used_variables),
skolemize(-negated.second, univ_scope, used_variables))
elif isinstance(negated, OrExpression):
return skolemize(-negated.first, univ_scope, used_variables) &\
skolemize(-negated.second, univ_scope, used_variables)
elif isinstance(negated, ImpExpression):
return skolemize(negated.first, univ_scope, used_variables) &\
skolemize(-negated.second, univ_scope, used_variables)
elif isinstance(negated, IffExpression):
return to_cnf(skolemize(-negated.first, univ_scope, used_variables),
skolemize(-negated.second, univ_scope, used_variables)) &\
to_cnf(skolemize(negated.first, univ_scope, used_variables),
skolemize(negated.second, univ_scope, used_variables))
elif isinstance(negated, EqualityExpression):
return expression
elif isinstance(negated, NegatedExpression):
return skolemize(negated.term, univ_scope, used_variables)
elif isinstance(negated, ExistsExpression):
term = skolemize(-negated.term, univ_scope|set([negated.variable]), used_variables|set([negated.variable]))
return term.replace(negated.variable, VariableExpression(unique_variable(ignore=used_variables)))
elif isinstance(negated, ApplicationExpression):
return expression
else:
raise Exception('\'%s\' cannot be skolemized' % expression)
elif isinstance(expression, ExistsExpression):
term = skolemize(expression.term, univ_scope, used_variables|set([expression.variable]))
if univ_scope:
return term.replace(expression.variable, skolem_function(univ_scope))
else:
skolem_constant = VariableExpression(unique_variable(ignore=used_variables))
return term.replace(expression.variable, skolem_constant)
elif isinstance(expression, ApplicationExpression):
return expression
else:
raise Exception('\'%s\' cannot be skolemized' % expression)
def to_cnf(first, second):
"""
Convert this split disjunction to conjunctive normal form (CNF)
"""
if isinstance(first, AndExpression):
r_first = to_cnf(first.first, second)
r_second = to_cnf(first.second, second)
return r_first & r_second
elif isinstance(second, AndExpression):
r_first = to_cnf(first, second.first)
r_second = to_cnf(first, second.second)
return r_first & r_second
else:
return first | second
def demo_model0():
global m0, g0
#Initialize a valuation of non-logical constants."""
v = [('john', 'b1'),
('mary', 'g1'),
('suzie', 'g2'),
('fido', 'd1'),
('tess', 'd2'),
('noosa', 'n'),
('girl', set(['g1', 'g2'])),
('boy', set(['b1', 'b2'])),
('dog', set(['d1', 'd2'])),
('bark', set(['d1', 'd2'])),
('walk', set(['b1', 'g2', 'd1'])),
('chase', set([('b1', 'g1'), ('b2', 'g1'), ('g1', 'd1'), ('g2', 'd2')])),
('see', set([('b1', 'g1'), ('b2', 'd2'), ('g1', 'b1'),('d2', 'b1'), ('g2', 'n')])),
('in', set([('b1', 'n'), ('b2', 'n'), ('d2', 'n')])),
('with', set([('b1', 'g1'), ('g1', 'b1'), ('d1', 'b1'), ('b1', 'd1')]))
]
#Read in the data from C{v}
val = evaluate.Valuation(v)
#Bind C{dom} to the C{domain} property of C{val}
dom = val.domain
#Initialize a model with parameters C{dom} and C{val}.
m0 = evaluate.Model(dom, val)
#Initialize a variable assignment with parameter C{dom}
g0 = evaluate.Assignment(dom)
def read_sents(file):
sents = [l.rstrip() for l in open(file)]
# get rid of blank lines
sents = [l for l in sents if len(l) > 0]
sents = [l for l in sents if not l[0] == '#']
return sents
def demo_legacy_grammar():
"""
Check that batch_interpret() is compatible with legacy grammars that use
a lowercase 'sem' feature.
Define 'test.fcfg' to be the following
"""
g = nltk.parse_fcfg("""
% start S
S[sem=<hello>] -> 'hello'
""")
print "Reading grammar: %s" % g
print "*" * 20
for reading in batch_interpret(['hello'], g, semkey='sem'):
syn, sem = reading[0]
print
print "output: ", sem
def demo():
import sys
from optparse import OptionParser
description = \
"""
Parse and evaluate some sentences.
"""
opts = OptionParser(description=description)
opts.set_defaults(evaluate=True, beta=True, syntrace=0,
semtrace=0, demo='default', grammar='', sentences='')
opts.add_option("-d", "--demo", dest="demo",
help="choose demo D; omit this for the default demo, or specify 'chat80'", metavar="D")
opts.add_option("-g", "--gram", dest="grammar",
help="read in grammar G", metavar="G")
opts.add_option("-m", "--model", dest="model",
help="import model M (omit '.py' suffix)", metavar="M")
opts.add_option("-s", "--sentences", dest="sentences",
help="read in a file of test sentences S", metavar="S")
opts.add_option("-e", "--no-eval", action="store_false", dest="evaluate",
help="just do a syntactic analysis")
opts.add_option("-b", "--no-beta-reduction", action="store_false",
dest="beta", help="don't carry out beta-reduction")
opts.add_option("-t", "--syntrace", action="count", dest="syntrace",
help="set syntactic tracing on; requires '-e' option")
opts.add_option("-T", "--semtrace", action="count", dest="semtrace",
help="set semantic tracing on")
(options, args) = opts.parse_args()
SPACER = '-' * 30
demo_model0()
sents = [
'Fido sees a boy with Mary',
'John sees Mary',
'every girl chases a dog',
'every boy chases a girl',
'John walks with a girl in Noosa',
'who walks']
gramfile = 'grammars/sample_grammars/sem2.fcfg'
if options.sentences:
sentsfile = options.sentences
if options.grammar:
gramfile = options.grammar
if options.model:
exec "import %s as model" % options.model
if sents is None:
sents = read_sents(sentsfile)
# Set model and assignment
model = m0
g = g0
if options.evaluate:
evaluations = \
batch_evaluate(sents, gramfile, model, g, trace=options.semtrace)
else:
semreps = \
batch_interpret(sents, gramfile, trace=options.syntrace)
for i, sent in enumerate(sents):
n = 1
print '\nSentence: %s' % sent
print SPACER
if options.evaluate:
for (syntree, semrep, value) in evaluations[i]:
if isinstance(value, dict):
value = set(value.keys())
print '%d: %s' % (n, semrep)
print value
n += 1
else:
for (syntree, semrep) in semreps[i]:
print '%d: %s' % (n, semrep)
n += 1
if __name__ == "__main__":
#demo()
demo_legacy_grammar()
| |
# coding: utf-8
from __future__ import unicode_literals
from contextlib import contextmanager
from ctypes import c_int, c_uint, c_char, c_char_p, pointer, POINTER, byref, Structure, sizeof
import os
from Queue import Queue
import sys
import subprocess
from threading import Thread
from time import sleep
from uuid import uuid4
if sys.platform == 'darwin':
from ctypes import cdll as dll, CFUNCTYPE as functype
else:
from ctypes import windll as dll, WINFUNCTYPE as functype
from box import Box
class ComThread(Thread):
def __init__(self, target=None, **kwargs):
def _start():
if sys.platform == 'win32':
import pythoncom
pythoncom.CoInitialize()
target()
super(ComThread, self).__init__(target=_start, **kwargs)
class EdsDirectoryItemInfo(Structure):
_fields_ = [
('Size', c_uint),
('isFolder', c_int),
('GroupID', c_uint),
('Option', c_uint),
('szFileName', c_char * 256),
('format', c_uint),
('dateTime', c_uint),
]
class EdsCapacity(Structure):
_fields_ = [
('NumberOfFreeClusters', c_int),
('BytesPerSector', c_int),
('Reset', c_int),
]
class Camera(object):
OBJECT_EVENT_ALL = 0x200
PROP_SAVE_TO = 0xb
PROP_VAL_SAVE_TO_PC = 2
DIR_ITEM_CONTEXT_CHANGED = 0x00000208
PROP_EVENT_ALL = 0x100
PROP_EVENT_PROP_CHANGED = 0x101
PROP_EVF_MODE = 0x501
def __init__(self):
self._create_sdk()
self._filename = None
self._camera = None
self._box = Box()
self._photo_queue = Queue()
self._photo_thread = Thread(target=self._process_queue)
self._photo_thread.daemon = True
self._photo_thread.start()
self._name = None
self._message = None
self._photos = []
self._waiting_for_callback = False
self._event_object = None
self._no_shutdown_thread = None
self._stop_no_shutdown_thread = False
def _create_sdk(self):
if sys.platform == 'darwin':
library_path = ('edsdk', 'EDSDK', 'Frameworks', 'EDSDK.Framework', 'Versions', 'A', 'EDSDK')
else:
library_path = ('Windows', 'EDSDK', 'Dll', 'EDSDK.dll')
self._sdk = dll.LoadLibrary(os.path.join(os.getcwd(), *library_path))
def _process_queue(self):
while True:
try:
self._box.upload_photo(*self._photo_queue.get())
except:
pass
@contextmanager
def _initialized_sdk(self):
initialize_error = self._sdk.EdsInitializeSDK()
print 'initialize', initialize_error
if initialize_error:
raise RuntimeError('Could not inititalize SDK.')
try:
yield
finally:
print 'terminate', self._sdk.EdsTerminateSDK()
@contextmanager
def _camera_session(self):
camera_list_ref = c_int()
camera_list_error = self._sdk.EdsGetCameraList(byref(camera_list_ref))
print 'get list', camera_list_error
self._camera = c_int()
camera_error = self._sdk.EdsGetChildAtIndex(camera_list_ref, 0, byref(self._camera))
print 'get camera', camera_error
print self._camera
session_error = self._sdk.EdsOpenSession(self._camera)
print 'open session', session_error
self._shutdown_thread = ComThread(target=self._extend_shutdown)
self._shutdown_thread.daemon = True
self._shutdown_thread.start()
ui_lock_error = self._sdk.EdsSendStatusCommand(self._camera, 0, 0)
print 'lock ui', ui_lock_error
try:
yield self._camera
finally:
ui_unlock_error = self._sdk.EdsSendStatusCommand(self._camera, 1, 0)
print 'unlock ui', ui_unlock_error
close_session_error = self._sdk.EdsCloseSession(self._camera)
print 'close session', close_session_error
self._camera = None
self._no_shutdown_thread = True
self._shutdown_thread = None
def _extend_shutdown(self):
while not self._no_shutdown_thread:
sleep(60)
try:
self._sdk.EdsSendCommand(self._camera, 0x01, 0)
except:
pass
self._no_shutdown_thread = False
@contextmanager
def live_view(self):
#EdsCreateEvfImageRef, EdsDownloadEvfImage, kEdsCameraCommand_DoEvfAf
size = sizeof(c_int)
evf_on_error = self._sdk.EdsSetPropertyData(self._camera, 0x00000501, 0, size, pointer(c_int(1)))
print 'evf on', evf_on_error # Turn on EVF
evf_pc_error = self._sdk.EdsSetPropertyData(self._camera, 0x00000500, 0, size, pointer(c_int(2)))
print 'evf pc', evf_pc_error # Set EVF device to PC
af_live_face_error = self._sdk.EdsSetPropertyData(self._camera, 0x0000050E, 0, size, pointer(c_int(2)))
print 'evf af live face', af_live_face_error # Set AF Mode to live face
stream = c_int()
sys_path = os.path.abspath(os.path.join('evf', 'evf.jpg'))
sys_path_p = c_char_p()
sys_path_p.value = sys_path
create_stream_error = self._sdk.EdsCreateFileStream(sys_path_p, 1, 2, byref(stream))
print 'create stream', create_stream_error
#self._sdk.EdsCreateMemoryStream(0, byref(memory_stream))
evf_image = c_int()
create_image_ref_error = self._sdk.EdsCreateEvfImageRef(stream, byref(evf_image))
print 'create image ref', create_image_ref_error
yield evf_image
release_error = self._sdk.EdsRelease(evf_image)
print 'release image', release_error
release_error = self._sdk.EdsRelease(stream)
print 'release stream', release_error
evf_tft_error = self._sdk.EdsSetPropertyData(self._camera, 0x00000500, 0, size, pointer(c_int(1)))
print 'evf tft', evf_tft_error # Set EVF device to TFT
evf_off_error = self._sdk.EdsSetPropertyData(self._camera, 0x00000501, 0, size, pointer(c_int(0)))
print 'evf off', evf_off_error # Turn off EVF
def get_evf_frame(self, evf_image):
while True:
download_evf_image_error = self._sdk.EdsDownloadEvfImage(self._camera, evf_image)
error = download_evf_image_error
print 'download image', error
if not error:
break
sys_path = os.path.abspath(os.path.join('evf', 'evf.jpg'))
return sys_path
def shoot(self, name, message):
self._name = name
self._message = message
shutter_down_error = 1
while shutter_down_error:
shutter_down_error = self._sdk.EdsSendCommand(self._camera, 0x00000004, 3)
print 'shutter down', shutter_down_error # Press shutter button completely
shutter_up_error = 1
while shutter_up_error:
shutter_up_error = self._sdk.EdsSendCommand(self._camera, 0x00000004, 0)
print 'shutter up', shutter_up_error # Press shutter button off
self._waiting_for_callback = True
while self._waiting_for_callback:
print 'get event', self._sdk.EdsGetEvent()
sleep(.2)
dir_info = EdsDirectoryItemInfo()
get_directory_item_info_error = self._sdk.EdsGetDirectoryItemInfo(self._event_object, pointer(dir_info))
print 'get dir info', get_directory_item_info_error
stream = c_int()
self._filename = uuid4().hex + dir_info.szFileName
print self._filename
sys_path = os.path.abspath(os.path.join('image', self._filename))
print sys_path
sys_path_p = c_char_p()
sys_path_p.value = sys_path
file_stream_error = self._sdk.EdsCreateFileStream(sys_path_p, 1, 2, byref(stream))
print 'create file stream', file_stream_error
download_error = self._sdk.EdsDownload(self._event_object, dir_info.Size, stream)
print 'download', download_error
#sleep(2)
download_complete_error = self._sdk.EdsDownloadComplete(self._event_object)
print 'dl complete', download_complete_error
release_error = self._sdk.EdsRelease(self._event_object)
print 'release dir info', release_error
self._event_object = None
release_error = self._sdk.EdsRelease(stream)
print 'release stream', release_error
photo_info = (self._name, self._message, sys_path)
self._photo_queue.put(photo_info)
self._photos.append(photo_info)
return len(self._photos)
@property
def filename(self):
return self._filename
@property
def photos(self):
return self._photos
@contextmanager
def run(self):
def object_callback(event_type, object_ref, _):
print 'got object callback', event_type, object_ref
if event_type == self.DIR_ITEM_CONTEXT_CHANGED:
print 'got dir item context changed callback'
self._waiting_for_callback = False
self._event_object = object_ref
return 0
def property_callback(event_type, property_id, param, _):
print 'got property callback', event_type, property_id, param
if property_id == self.PROP_EVF_MODE:
self._waiting_for_callback = False
with self._initialized_sdk():
with self._camera_session() as camera:
object_callback_type = functype(c_uint, c_uint, POINTER(c_int), POINTER(c_int))
object_callback = object_callback_type(object_callback)
object_error = self._sdk.EdsSetObjectEventHandler(camera, self.OBJECT_EVENT_ALL, object_callback, None)
print 'set object handler', object_error
property_callback_type = functype(c_uint, c_uint, c_uint, c_uint, POINTER(c_int))
property_callback = property_callback_type(property_callback)
size = sizeof(c_int)
save_to_pc_error = self._sdk.EdsSetPropertyData(camera, self.PROP_SAVE_TO, 0, size, pointer(c_int(2)))
print 'set save to pc', save_to_pc_error
capacity_error = self._sdk.EdsSetCapacity(camera, EdsCapacity(0x7fffffff, 0x1000, 1))
print 'set capacity', capacity_error
yield
class TestCamera(Camera):
def __init__(self):
self._test_images = ['image/video-streaming-{}.jpg'.format(i) for i in [1, 2, 3]]
self._photos = []
self._filename = 'video-streaming-1.jpg'
@contextmanager
def run(self):
yield
@contextmanager
def live_view(self):
yield
def get_evf_frame(self, evf_image):
import time
return self._test_images[int(time.time()) % 3]
def shoot(self, name, message):
return -1
class MacbookCamera(Camera):
def __init__(self):
super(MacbookCamera, self).__init__()
self._evf_thread = None
def _create_sdk(self):
pass
@contextmanager
def run(self):
yield
def _evf(self):
self._evf_popen = subprocess.Popen(['../imagesnap', '-q', '-t', '0.1'], cwd='evf')
@contextmanager
def live_view(self):
self._evf_thread = Thread(target=self._evf)
self._evf_thread.start()
yield
self._evf_popen.terminate()
self._evf_thread.join()
for s in os.listdir('evf'):
os.remove(os.path.join('evf', s))
def get_evf_frame(self, evf_image):
snapshots = sorted([s for s in os.listdir('evf') if s.startswith('snapshot')], reverse=True)
if snapshots:
last = snapshots[0]
for s in snapshots[1:]:
os.remove(os.path.join('evf', s))
return os.path.join('evf', last)
def shoot(self, name, message):
self._name = name
self._message = message
self._filename = 'photobooth-{}.jpg'.format(len(self._photos))
file_sys_path = os.path.join('image', self._filename)
subprocess.call(['./imagesnap', '-q', '-w', '1', file_sys_path])
photo_info = (self._name, self._message, file_sys_path)
self._photo_queue.put(photo_info)
self._photos.append(photo_info)
return len(self._photos) - 1
if __name__ == '__main__':
from datetime import datetime, timedelta
camera = Camera()
count = 1
with camera.run():
seconds = 5
stop_time = datetime.now() + timedelta(seconds=seconds)
with camera.live_view() as view:
while datetime.now() < stop_time:
filename = camera.get_evf_frame(view)
if filename:
with open(filename, 'rb') as evf_file:
frame = evf_file.read()
with open(os.path.join('evf', str(count) + '.jpg'), 'wb') as evf_log:
evf_log.write(frame)
count += 1
| |
# coding=utf8
import sys, re, logging, os, random, urllib2, hashlib, time, wave, struct, traceback
import json
from common import VoiceEntry, Song, get_sqla_session, SCRIPT_DIR, MAX_SAMPLE_VAL, iframes_to_frames, frames_to_iframes
from sqlalchemy import or_, not_, null, and_
import numpy as np
import scipy
import scipy.signal as signal
__llevel = logging.DEBUG
logging.basicConfig( format="%(asctime)s [%(levelname)s] %(message)s" )
LOG = logging.getLogger("simple_logger")
LOG.setLevel( __llevel )
RANDOM = random.SystemRandom()
FRAME_CHUNK_SIZE = 1024*10
SOUND_DEPTH = 2 # in bytes
FIT_VOICE_ENTRIES_RELPATH = "fit_voice_entries"
TRIM_THRESHOLD = MAX_SAMPLE_VAL * 0.05
def trim_signal( iframes ):
left = 0
for sample in iframes:
if abs(sample) >= TRIM_THRESHOLD:
break
left += 1
left = min( left, len(iframes)-1 )
iframes = iframes[left:]
right = 0
for sample in iframes[::-1]:
if abs(sample) >= TRIM_THRESHOLD:
break
right += 1
right = min( right, len(iframes) )
iframes = iframes[:-right]
return iframes
def __shift_speed( iframes, orig_len, new_len, srate ):
if 0.95 < (new_len // orig_len) < 1.05:
return iframes
iframes_nparr = np.array( iframes )
shifted_iframes = list( signal.resample( iframes_nparr, new_len ) )
return shifted_iframes
def __shift_tempo( iframes, orig_len, new_len, srate ):
# if 0.95 < (new_len // orig_len) < 1.05:
# return iframes
iframes_nparr = np.array( iframes )
n = len(iframes)
freqs = np.fft.rfftfreq( len(iframes), d=(1.0/srate) )[ range(n/2) ]
Y = scipy.fft( iframes_nparr )
Y = Y[ range(n/2) ]
Y_mag = np.absolute( Y )
Y_phase = np.angle( Y )
N = np.array( xrange( new_len ) )
freq_args = list( freqs * (2 * np.pi / srate) )
mag_args = list(Y_mag)
phase_args = list(Y_phase)
sin_args = filter(
lambda (freq,mag,phase): mag > 1.0,
zip( freq_args, mag_args, phase_args )
)
new_iframes_nparr = np.zeros( new_len )
for freq,mag,phase in sin_args:
new_iframes_nparr += (mag * np.sin( freq*N + phase ))
new_iframes = list( new_iframes_nparr.astype('int16') )
return list(new_iframes)
change_size = __shift_speed
def fit_voice_entries( session, song_title ):
song = session.query( Song ).get( song_title )
if song is None:
LOG.error( "song not found!" )
raise Exception()
nchannels = 1
srate = 44100
sdepth = 2
song_wav = wave.open( song.path, "rb" )
if song_wav.getnchannels() != nchannels or song_wav.getframerate() != srate or song_wav.getsampwidth() != sdepth:
LOG.error( "Target song: unsupported format" )
raise Exception()
def load_ventry_info( voice_entry ):
f = wave.open( voice_entry.path )
if f.getnchannels() != nchannels or f.getframerate() != srate or f.getsampwidth() != sdepth:
LOG.error( "Voice entry \"%s\": unsupported format", voice_entry.path )
raise Exception()
frames = f.readframes( f.getnframes() )
iframes = frames_to_iframes( frames )
return (f, iframes, voice_entry)
veinfo_by_path = dict()
vepath_by_ifidx = dict()
orig_voice_entries = session.query( VoiceEntry ).filter(
and_( VoiceEntry.song_title==song.title, VoiceEntry.t_end == null() )
)
for voice_entry in orig_voice_entries:
if not voice_entry.path in veinfo_by_path:
(vewav,vewav_iframes,voice_entry) = load_ventry_info( voice_entry )
veinfo_by_path[ voice_entry.path ] = (vewav,vewav_iframes,voice_entry)
ifidx = int( voice_entry.t_start * srate )
vepath_by_ifidx[ ifidx ] = voice_entry.path
vepath_by_ifidx_items = sorted( vepath_by_ifidx.items() )
if len(vepath_by_ifidx_items):
new_len_by_ifidx = dict()
previous_ifidx = vepath_by_ifidx_items[0][0]
previous_length = veinfo_by_path[vepath_by_ifidx_items[0][1]][0].getnframes()
for ifidx,vepath in vepath_by_ifidx_items[1:]:
prev_max_len = min( (ifidx - previous_ifidx) - 1, previous_length )
new_len_by_ifidx[previous_ifidx] = prev_max_len
previous_ifidx = ifidx
previous_length = veinfo_by_path[vepath][0].getnframes()
last_ifidx = vepath_by_ifidx_items[-1][0]
new_len_by_ifidx[ last_ifidx ] = veinfo_by_path[vepath_by_ifidx_items[-1][1]][0].getnframes()
veinfo_by_origpath_and_len = dict()
origpath_and_len_by_ifidx = dict()
for ifidx,vepath in vepath_by_ifidx_items:
new_len = new_len_by_ifidx[ifidx]
if new_len < 1:
LOG.error( "\"%s\" IGNORED :( too big or misplaced.", vepath )
continue
if not (vepath,new_len) in veinfo_by_origpath_and_len:
(orig_vewav, orig_vewav_iframes, orig_ventry) = veinfo_by_path[vepath]
orig_len = orig_vewav.getnframes()
orig_fname_base = os.path.basename( orig_ventry.path )
fname_base = "%s_%.0f%%" % (orig_fname_base, 100.0 * new_len/float(orig_len))
rel_fpath = "%s/%s.wav" % (FIT_VOICE_ENTRIES_RELPATH, fname_base)
file_exists = os.path.isfile( rel_fpath ) and os.path.getsize( rel_fpath )
actual_len = None
if not file_exists:
LOG.info( "\"%s\" not found, creating it...", fname_base )
out_wav = wave.open( rel_fpath, "w" )
out_wav.setnchannels( orig_vewav.getnchannels() )
out_wav.setsampwidth( orig_vewav.getsampwidth() )
out_wav.setframerate( orig_vewav.getframerate() )
trimmed_frames = trim_signal( orig_vewav_iframes )
out_iframes = change_size( trimmed_frames, len(trimmed_frames), new_len, srate )
out_length = len(out_iframes)
out_frames = iframes_to_frames( out_iframes )
out_wav.writeframes( out_frames )
out_wav.close()
actual_len = out_length
else:
LOG.debug( "\"%s\" found.", fname_base )
wav = wave.open( rel_fpath, "r" )
actual_len = wav.getnframes()
wav.close()
veinfo_by_origpath_and_len[ (orig_ventry.path, new_len) ] = (rel_fpath,actual_len)
origpath_and_len_by_ifidx[ ifidx ] = (vepath, new_len)
for ifidx,(orig_path,new_len) in origpath_and_len_by_ifidx.items():
(rel_fpath,actual_len) = veinfo_by_origpath_and_len[ (orig_path,new_len) ]
voice_entry = VoiceEntry()
voice_entry.id = RANDOM.randint( 0, 1 << 32 )
voice_entry.t_start = ifidx / float(srate)
voice_entry.t_end = (ifidx + actual_len) / float(srate)
voice_entry.path = rel_fpath
song.voice_entries.append( voice_entry )
for (vewav,vewav_iframes,ventry) in veinfo_by_path.values():
vewav.close()
for voice_entry in orig_voice_entries:
session.delete( voice_entry )
song_wav.close()
def do_it( song_title ):
session = get_sqla_session()
try:
fit_voice_entries( session, song_title )
session.commit()
except Exception:
traceback.print_exc()
session.rollback()
session.close()
song_title = sys.argv[1]
do_it( song_title )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.